From 47fdfa09294c993bc0e3e539ed46f5e5c0298447 Mon Sep 17 00:00:00 2001 From: Evgenii Kniazev Date: Mon, 12 Jan 2026 14:28:06 +0000 Subject: [PATCH 1/4] aitools: Add SDK documentation query tool for MCP server Add a new MCP tool `databricks_query_sdk_docs` that allows LLM agents to search Databricks SDK documentation for methods, types, and examples. This addresses the problem where LLMs struggle with the Databricks SDK because they lack indexed documentation. Instead of guessing API calls, agents can now query for proper method signatures, parameters, and usage. Features: - Fuzzy/keyword search across services, methods, types, and enums - Category and service filtering - Score-based result ranking - LLM-friendly markdown output with signatures and examples Implementation: - New sdkdocs provider with embedded JSON documentation index - Index generator tool that parses annotations_openapi.yml - Generated index includes 7 core services, 277 types, and 3 enums - Full unit test coverage for search and index loading Co-Authored-By: Claude Opus 4.5 --- experimental/aitools/lib/prompts/flow.tmpl | 1 + .../aitools/lib/providers/sdkdocs/index.go | 127 + .../lib/providers/sdkdocs/index_test.go | 94 + .../aitools/lib/providers/sdkdocs/provider.go | 82 + .../lib/providers/sdkdocs/query_sdk_docs.go | 210 + .../lib/providers/sdkdocs/sdk_docs_index.json | 8163 +++++++++++++++++ .../aitools/lib/providers/sdkdocs/search.go | 265 + .../lib/providers/sdkdocs/search_test.go | 274 + experimental/aitools/lib/server/server.go | 22 + tools/gen_sdk_docs_index.go | 664 ++ 10 files changed, 9902 insertions(+) create mode 100644 experimental/aitools/lib/providers/sdkdocs/index.go create mode 100644 experimental/aitools/lib/providers/sdkdocs/index_test.go create mode 100644 experimental/aitools/lib/providers/sdkdocs/provider.go create mode 100644 experimental/aitools/lib/providers/sdkdocs/query_sdk_docs.go create mode 100644 experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json create mode 100644 experimental/aitools/lib/providers/sdkdocs/search.go create mode 100644 experimental/aitools/lib/providers/sdkdocs/search_test.go create mode 100644 tools/gen_sdk_docs_index.go diff --git a/experimental/aitools/lib/prompts/flow.tmpl b/experimental/aitools/lib/prompts/flow.tmpl index 3a71c91e02..1bd21bdcd4 100644 --- a/experimental/aitools/lib/prompts/flow.tmpl +++ b/experimental/aitools/lib/prompts/flow.tmpl @@ -9,6 +9,7 @@ - **databricks_discover**: MUST call first - returns scaffolding commands - **invoke_databricks_cli**: Execute CLI commands including init-template for scaffolding - **databricks_configure_auth**: Switch workspace profile/host +- **databricks_query_sdk_docs**: Search SDK documentation for methods, types, and examples ## Critical Workflow Rules 1. ALWAYS call databricks_discover FIRST to get scaffolding guidance diff --git a/experimental/aitools/lib/providers/sdkdocs/index.go b/experimental/aitools/lib/providers/sdkdocs/index.go new file mode 100644 index 0000000000..808f37f155 --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/index.go @@ -0,0 +1,127 @@ +package sdkdocs + +import ( + "embed" + "encoding/json" + "fmt" +) + +//go:embed sdk_docs_index.json +var indexFS embed.FS + +// SDKDocsIndex represents the complete SDK documentation index. +type SDKDocsIndex struct { + Version string `json:"version"` + GeneratedAt string `json:"generated_at"` + Services map[string]*ServiceDoc `json:"services"` + Types map[string]*TypeDoc `json:"types"` + Enums map[string]*EnumDoc `json:"enums"` +} + +// ServiceDoc represents documentation for an API service. +type ServiceDoc struct { + Name string `json:"name"` + Description string `json:"description"` + Package string `json:"package"` + Methods map[string]*MethodDoc `json:"methods"` +} + +// MethodDoc represents documentation for an API method. +type MethodDoc struct { + Name string `json:"name"` + Description string `json:"description"` + Signature string `json:"signature"` + Parameters []ParamDoc `json:"parameters"` + Returns *ReturnDoc `json:"returns,omitempty"` + Example string `json:"example,omitempty"` + HTTPMethod string `json:"http_method,omitempty"` + HTTPPath string `json:"http_path,omitempty"` +} + +// ParamDoc represents documentation for a method parameter. +type ParamDoc struct { + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description"` + Required bool `json:"required"` +} + +// ReturnDoc represents documentation for a method return type. +type ReturnDoc struct { + Type string `json:"type"` + Description string `json:"description"` +} + +// TypeDoc represents documentation for a data type. +type TypeDoc struct { + Name string `json:"name"` + Package string `json:"package"` + Description string `json:"description"` + Fields map[string]*FieldDoc `json:"fields"` +} + +// FieldDoc represents documentation for a struct field. +type FieldDoc struct { + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description"` + Required bool `json:"required"` + OutputOnly bool `json:"output_only,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +// EnumDoc represents documentation for an enum type. +type EnumDoc struct { + Name string `json:"name"` + Package string `json:"package"` + Description string `json:"description"` + Values []string `json:"values"` +} + +// LoadIndex loads the embedded SDK documentation index. +func LoadIndex() (*SDKDocsIndex, error) { + data, err := indexFS.ReadFile("sdk_docs_index.json") + if err != nil { + return nil, fmt.Errorf("failed to read embedded SDK docs index: %w", err) + } + + var index SDKDocsIndex + if err := json.Unmarshal(data, &index); err != nil { + return nil, fmt.Errorf("failed to parse SDK docs index: %w", err) + } + + return &index, nil +} + +// GetMethod retrieves a method by its path (e.g., "jobs.Create"). +func (idx *SDKDocsIndex) GetMethod(serviceName, methodName string) *MethodDoc { + service, ok := idx.Services[serviceName] + if !ok { + return nil + } + return service.Methods[methodName] +} + +// GetType retrieves a type by its full path (e.g., "jobs.CreateJob"). +func (idx *SDKDocsIndex) GetType(typePath string) *TypeDoc { + return idx.Types[typePath] +} + +// GetEnum retrieves an enum by its full path. +func (idx *SDKDocsIndex) GetEnum(enumPath string) *EnumDoc { + return idx.Enums[enumPath] +} + +// GetService retrieves a service by name. +func (idx *SDKDocsIndex) GetService(serviceName string) *ServiceDoc { + return idx.Services[serviceName] +} + +// ListServices returns all service names. +func (idx *SDKDocsIndex) ListServices() []string { + names := make([]string, 0, len(idx.Services)) + for name := range idx.Services { + names = append(names, name) + } + return names +} diff --git a/experimental/aitools/lib/providers/sdkdocs/index_test.go b/experimental/aitools/lib/providers/sdkdocs/index_test.go new file mode 100644 index 0000000000..54b5f5912c --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/index_test.go @@ -0,0 +1,94 @@ +package sdkdocs + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadIndex(t *testing.T) { + index, err := LoadIndex() + require.NoError(t, err) + require.NotNil(t, index) + + // Verify the index has expected structure + assert.NotEmpty(t, index.Version) + assert.NotEmpty(t, index.GeneratedAt) + assert.NotEmpty(t, index.Services) + + // Check that jobs service exists and has methods + jobsService := index.GetService("jobs") + require.NotNil(t, jobsService, "jobs service should exist") + assert.Equal(t, "Jobs", jobsService.Name) + assert.NotEmpty(t, jobsService.Methods) + + // Check that Create method exists + createMethod := index.GetMethod("jobs", "Create") + require.NotNil(t, createMethod, "jobs.Create method should exist") + assert.Equal(t, "Create", createMethod.Name) + assert.NotEmpty(t, createMethod.Description) +} + +func TestGetMethod(t *testing.T) { + index, err := LoadIndex() + require.NoError(t, err) + + t.Run("existing method", func(t *testing.T) { + method := index.GetMethod("jobs", "Create") + require.NotNil(t, method) + assert.Equal(t, "Create", method.Name) + }) + + t.Run("non-existing method", func(t *testing.T) { + method := index.GetMethod("jobs", "NonExistent") + assert.Nil(t, method) + }) + + t.Run("non-existing service", func(t *testing.T) { + method := index.GetMethod("nonexistent", "Create") + assert.Nil(t, method) + }) +} + +func TestGetService(t *testing.T) { + index, err := LoadIndex() + require.NoError(t, err) + + t.Run("existing service", func(t *testing.T) { + service := index.GetService("jobs") + require.NotNil(t, service) + assert.Equal(t, "Jobs", service.Name) + }) + + t.Run("non-existing service", func(t *testing.T) { + service := index.GetService("nonexistent") + assert.Nil(t, service) + }) +} + +func TestListServices(t *testing.T) { + index, err := LoadIndex() + require.NoError(t, err) + + services := index.ListServices() + assert.NotEmpty(t, services) + assert.Contains(t, services, "jobs") +} + +func TestGetEnum(t *testing.T) { + index, err := LoadIndex() + require.NoError(t, err) + + t.Run("existing enum", func(t *testing.T) { + enum := index.GetEnum("jobs.RunLifeCycleState") + require.NotNil(t, enum) + assert.Equal(t, "RunLifeCycleState", enum.Name) + assert.NotEmpty(t, enum.Values) + }) + + t.Run("non-existing enum", func(t *testing.T) { + enum := index.GetEnum("nonexistent.Enum") + assert.Nil(t, enum) + }) +} diff --git a/experimental/aitools/lib/providers/sdkdocs/provider.go b/experimental/aitools/lib/providers/sdkdocs/provider.go new file mode 100644 index 0000000000..a6e4a21c77 --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/provider.go @@ -0,0 +1,82 @@ +package sdkdocs + +import ( + "context" + + mcp "github.com/databricks/cli/experimental/aitools/lib" + mcpsdk "github.com/databricks/cli/experimental/aitools/lib/mcp" + "github.com/databricks/cli/experimental/aitools/lib/providers" + "github.com/databricks/cli/experimental/aitools/lib/session" + "github.com/databricks/cli/libs/log" +) + +func init() { + providers.Register("sdkdocs", func(ctx context.Context, cfg *mcp.Config, sess *session.Session) (providers.Provider, error) { + return NewProvider(ctx, cfg, sess) + }, providers.ProviderConfig{ + Always: true, + }) +} + +// Provider provides SDK documentation search capabilities. +type Provider struct { + config *mcp.Config + session *session.Session + ctx context.Context + index *SDKDocsIndex +} + +// NewProvider creates a new SDK docs provider. +func NewProvider(ctx context.Context, cfg *mcp.Config, sess *session.Session) (*Provider, error) { + index, err := LoadIndex() + if err != nil { + log.Warnf(ctx, "Failed to load SDK docs index: %v", err) + // Return a provider with an empty index rather than failing + index = &SDKDocsIndex{ + Services: make(map[string]*ServiceDoc), + Types: make(map[string]*TypeDoc), + Enums: make(map[string]*EnumDoc), + } + } + + log.Infof(ctx, "SDK docs provider initialized: %d services, %d types, %d enums", + len(index.Services), len(index.Types), len(index.Enums)) + + return &Provider{ + config: cfg, + session: sess, + ctx: ctx, + index: index, + }, nil +} + +// Name returns the provider name. +func (p *Provider) Name() string { + return "sdkdocs" +} + +// RegisterTools registers the SDK documentation tools with the MCP server. +func (p *Provider) RegisterTools(server *mcpsdk.Server) error { + log.Info(p.ctx, "Registering SDK docs tools") + + mcpsdk.AddTool(server, + &mcpsdk.Tool{ + Name: "databricks_query_sdk_docs", + Description: `Search Databricks SDK documentation for methods, types, and examples. + +Use this tool to find: +- API methods: "how to create a job", "list clusters", "run pipeline" +- Type definitions: "JobSettings fields", "ClusterSpec parameters" +- Enums: "run lifecycle states", "cluster state values" + +Returns method signatures, parameter descriptions, return types, and usage examples. +This is useful when you need to understand the correct way to call Databricks APIs.`, + }, + func(ctx context.Context, req *mcpsdk.CallToolRequest, args QuerySDKDocsInput) (*mcpsdk.CallToolResult, any, error) { + return p.querySDKDocs(ctx, args) + }, + ) + + log.Infof(p.ctx, "Registered SDK docs tools: count=%d", 1) + return nil +} diff --git a/experimental/aitools/lib/providers/sdkdocs/query_sdk_docs.go b/experimental/aitools/lib/providers/sdkdocs/query_sdk_docs.go new file mode 100644 index 0000000000..0fcdcdb89e --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/query_sdk_docs.go @@ -0,0 +1,210 @@ +package sdkdocs + +import ( + "context" + "fmt" + "strings" + + mcpsdk "github.com/databricks/cli/experimental/aitools/lib/mcp" + "github.com/databricks/cli/libs/log" +) + +// QuerySDKDocsInput represents the input for the databricks_query_sdk_docs tool. +type QuerySDKDocsInput struct { + Query string `json:"query" jsonschema:"required" jsonschema_description:"Search query for SDK documentation (e.g., 'how to create a job', 'cluster configuration', 'JobSettings fields')"` + Category string `json:"category,omitempty" jsonschema_description:"Optional category filter: 'services', 'methods', 'types', or 'enums'"` + Service string `json:"service,omitempty" jsonschema_description:"Optional service filter (e.g., 'jobs', 'clusters', 'pipelines', 'catalog')"` + Limit int `json:"limit,omitempty" jsonschema_description:"Maximum number of results to return (default: 10, max: 50)"` +} + +// querySDKDocs handles the databricks_query_sdk_docs tool invocation. +func (p *Provider) querySDKDocs(ctx context.Context, args QuerySDKDocsInput) (*mcpsdk.CallToolResult, any, error) { + log.Debugf(ctx, "databricks_query_sdk_docs called: query=%q, category=%q, service=%q, limit=%d", + args.Query, args.Category, args.Service, args.Limit) + + results := p.index.Search(SearchOptions{ + Query: args.Query, + Category: args.Category, + Service: args.Service, + Limit: args.Limit, + }) + + if len(results) == 0 { + return mcpsdk.CreateNewTextContentResult( + fmt.Sprintf("No SDK documentation found for query: %q\n\nTry:\n- Using different keywords\n- Removing filters\n- Checking available services: %s", + args.Query, strings.Join(p.index.ListServices(), ", ")), + ), nil, nil + } + + response := p.formatResponse(results) + return mcpsdk.CreateNewTextContentResult(response), nil, nil +} + +// formatResponse formats search results for LLM consumption. +func (p *Provider) formatResponse(results []SearchResult) string { + var sb strings.Builder + + sb.WriteString("## SDK Documentation Results\n\n") + + for _, result := range results { + switch result.Type { + case "method": + p.formatMethodResult(&sb, result) + case "type": + p.formatTypeResult(&sb, result) + case "service": + p.formatServiceResult(&sb, result) + case "enum": + p.formatEnumResult(&sb, result) + } + sb.WriteString("\n---\n\n") + } + + return sb.String() +} + +// formatMethodResult formats a method search result. +func (p *Provider) formatMethodResult(sb *strings.Builder, result SearchResult) { + method := p.index.GetMethod(result.Service, result.Name) + if method == nil { + sb.WriteString(fmt.Sprintf("### Method: %s\n\n%s\n", result.Name, result.Description)) + return + } + + sb.WriteString(fmt.Sprintf("### Method: %s.%s\n\n", result.Service, method.Name)) + + if method.Signature != "" { + sb.WriteString("**Signature:**\n```go\n") + sb.WriteString(method.Signature) + sb.WriteString("\n```\n\n") + } + + if method.Description != "" { + sb.WriteString("**Description:**\n") + sb.WriteString(method.Description) + sb.WriteString("\n\n") + } + + if len(method.Parameters) > 0 { + sb.WriteString("**Parameters:**\n") + for _, param := range method.Parameters { + required := "" + if param.Required { + required = " (required)" + } + sb.WriteString(fmt.Sprintf("- `%s` (%s)%s: %s\n", param.Name, param.Type, required, param.Description)) + } + sb.WriteString("\n") + } + + if method.Returns != nil { + sb.WriteString("**Returns:**\n") + sb.WriteString(fmt.Sprintf("- `%s`: %s\n\n", method.Returns.Type, method.Returns.Description)) + } + + if method.Example != "" { + sb.WriteString("**Example:**\n```go\n") + sb.WriteString(method.Example) + sb.WriteString("\n```\n") + } +} + +// formatTypeResult formats a type search result. +func (p *Provider) formatTypeResult(sb *strings.Builder, result SearchResult) { + typeDoc := p.index.GetType(result.Path) + if typeDoc == nil { + sb.WriteString(fmt.Sprintf("### Type: %s\n\n%s\n", result.Name, result.Description)) + return + } + + sb.WriteString(fmt.Sprintf("### Type: %s\n\n", typeDoc.Name)) + + if typeDoc.Package != "" { + sb.WriteString(fmt.Sprintf("**Package:** `%s`\n\n", typeDoc.Package)) + } + + if typeDoc.Description != "" { + sb.WriteString("**Description:**\n") + sb.WriteString(typeDoc.Description) + sb.WriteString("\n\n") + } + + if len(typeDoc.Fields) > 0 { + sb.WriteString("**Fields:**\n\n") + sb.WriteString("| Field | Type | Required | Description |\n") + sb.WriteString("|-------|------|----------|-------------|\n") + + for _, field := range typeDoc.Fields { + required := "No" + if field.Required { + required = "Yes" + } + desc := field.Description + if field.OutputOnly { + desc = "(output-only) " + desc + } + if field.Deprecated { + desc = "(deprecated) " + desc + } + // Escape pipe characters in descriptions + desc = strings.ReplaceAll(desc, "|", "\\|") + sb.WriteString(fmt.Sprintf("| %s | %s | %s | %s |\n", field.Name, field.Type, required, truncate(desc, 100))) + } + sb.WriteString("\n") + } +} + +// formatServiceResult formats a service search result. +func (p *Provider) formatServiceResult(sb *strings.Builder, result SearchResult) { + service := p.index.GetService(result.Path) + if service == nil { + sb.WriteString(fmt.Sprintf("### Service: %s\n\n%s\n", result.Name, result.Description)) + return + } + + sb.WriteString(fmt.Sprintf("### Service: %s\n\n", service.Name)) + + if service.Description != "" { + sb.WriteString("**Description:**\n") + sb.WriteString(service.Description) + sb.WriteString("\n\n") + } + + if len(service.Methods) > 0 { + sb.WriteString("**Available Methods:**\n") + for methodName, method := range service.Methods { + desc := truncate(method.Description, 80) + sb.WriteString(fmt.Sprintf("- `%s`: %s\n", methodName, desc)) + } + sb.WriteString("\n") + } +} + +// formatEnumResult formats an enum search result. +func (p *Provider) formatEnumResult(sb *strings.Builder, result SearchResult) { + enumDoc := p.index.GetEnum(result.Path) + if enumDoc == nil { + sb.WriteString(fmt.Sprintf("### Enum: %s\n\n%s\n", result.Name, result.Description)) + return + } + + sb.WriteString(fmt.Sprintf("### Enum: %s\n\n", enumDoc.Name)) + + if enumDoc.Package != "" { + sb.WriteString(fmt.Sprintf("**Package:** `%s`\n\n", enumDoc.Package)) + } + + if enumDoc.Description != "" { + sb.WriteString("**Description:**\n") + sb.WriteString(enumDoc.Description) + sb.WriteString("\n\n") + } + + if len(enumDoc.Values) > 0 { + sb.WriteString("**Values:**\n") + for _, value := range enumDoc.Values { + sb.WriteString(fmt.Sprintf("- `%s`\n", value)) + } + sb.WriteString("\n") + } +} diff --git a/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json b/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json new file mode 100644 index 0000000000..140559415c --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json @@ -0,0 +1,8163 @@ +{ + "version": "1.0", + "generated_at": "2026-01-12T14:13:07Z", + "services": { + "apps": { + "name": "Apps", + "description": "Databricks Apps API for deploying and managing web applications on Databricks.", + "package": "github.com/databricks/databricks-sdk-go/service/apps", + "methods": { + "Create": { + "name": "Create", + "description": "Creates a new app.", + "signature": "Create(ctx context.Context, request CreateAppRequest) (*App, error)", + "parameters": [ + { + "name": "request", + "type": "CreateAppRequest", + "description": "App configuration including name and description", + "required": true + } + ], + "returns": { + "type": "*App", + "description": "The created app details" + } + }, + "Deploy": { + "name": "Deploy", + "description": "Deploys an app to Databricks Apps.", + "signature": "Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error)", + "parameters": [ + { + "name": "request", + "type": "CreateAppDeploymentRequest", + "description": "Deployment configuration", + "required": true + } + ], + "returns": { + "type": "*AppDeployment", + "description": "Deployment status and details" + } + }, + "List": { + "name": "List", + "description": "Lists all apps in the workspace.", + "signature": "List(ctx context.Context, request ListAppsRequest) listing.Iterator[App]", + "parameters": null, + "returns": { + "type": "listing.Iterator[App]", + "description": "Iterator over apps" + } + } + } + }, + "catalog": { + "name": "Catalog", + "description": "Unity Catalog APIs for managing catalogs, schemas, tables, and other data assets.", + "package": "github.com/databricks/databricks-sdk-go/service/catalog", + "methods": { + "ListCatalogs": { + "name": "ListCatalogs", + "description": "Lists all catalogs in the metastore.", + "signature": "List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo]", + "parameters": null, + "returns": { + "type": "listing.Iterator[CatalogInfo]", + "description": "Iterator over catalog information" + } + }, + "ListSchemas": { + "name": "ListSchemas", + "description": "Lists all schemas in a catalog.", + "signature": "List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo]", + "parameters": [ + { + "name": "request", + "type": "ListSchemasRequest", + "description": "Contains catalog_name to list schemas from", + "required": true + } + ], + "returns": { + "type": "listing.Iterator[SchemaInfo]", + "description": "Iterator over schema information" + } + }, + "ListTables": { + "name": "ListTables", + "description": "Lists all tables in a schema.", + "signature": "List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo]", + "parameters": [ + { + "name": "request", + "type": "ListTablesRequest", + "description": "Contains catalog_name and schema_name", + "required": true + } + ], + "returns": { + "type": "listing.Iterator[TableInfo]", + "description": "Iterator over table information" + } + } + } + }, + "compute": { + "name": "Clusters", + "description": "The Clusters API allows you to create, start, edit, and terminate clusters. Clusters are managed cloud resources for running Spark workloads.", + "package": "github.com/databricks/databricks-sdk-go/service/compute", + "methods": { + "Create": { + "name": "Create", + "description": "Create a new Spark cluster.", + "signature": "Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateCluster", + "description": "Cluster configuration including node types, autoscaling, and Spark version", + "required": true + } + ], + "returns": { + "type": "*CreateClusterResponse", + "description": "Contains cluster_id of the created cluster" + } + }, + "Delete": { + "name": "Delete", + "description": "Permanently deletes a Spark cluster.", + "signature": "Delete(ctx context.Context, request DeleteCluster) error", + "parameters": [ + { + "name": "request", + "type": "DeleteCluster", + "description": "Contains cluster_id to delete", + "required": true + } + ] + }, + "Get": { + "name": "Get", + "description": "Retrieves the information for a cluster given its identifier.", + "signature": "Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)", + "parameters": [ + { + "name": "request", + "type": "GetClusterRequest", + "description": "Contains cluster_id", + "required": true + } + ], + "returns": { + "type": "*ClusterDetails", + "description": "Full cluster configuration and state" + } + }, + "List": { + "name": "List", + "description": "Returns information about all clusters.", + "signature": "List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails]", + "parameters": null, + "returns": { + "type": "listing.Iterator[ClusterDetails]", + "description": "Iterator over cluster details" + } + }, + "Start": { + "name": "Start", + "description": "Starts a terminated cluster.", + "signature": "Start(ctx context.Context, request StartCluster) error", + "parameters": [ + { + "name": "request", + "type": "StartCluster", + "description": "Contains cluster_id to start", + "required": true + } + ] + } + } + }, + "jobs": { + "name": "Jobs", + "description": "The Jobs API allows you to create, edit, and delete jobs. Jobs are the primary unit of scheduled execution in Databricks.", + "package": "github.com/databricks/databricks-sdk-go/service/jobs", + "methods": { + "Create": { + "name": "Create", + "description": "Create a new job.", + "signature": "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateJob", + "description": "Job creation parameters including name, tasks, and schedule", + "required": true + } + ], + "returns": { + "type": "*CreateResponse", + "description": "Contains the job_id of the created job" + }, + "example": "resp, err := w.Jobs.Create(ctx, jobs.CreateJob{\n Name: \"my-job\",\n Tasks: []jobs.Task{{TaskKey: \"main\", ...}},\n})" + }, + "Delete": { + "name": "Delete", + "description": "Deletes a job.", + "signature": "Delete(ctx context.Context, request DeleteJob) error", + "parameters": [ + { + "name": "request", + "type": "DeleteJob", + "description": "Contains job_id to delete", + "required": true + } + ] + }, + "Get": { + "name": "Get", + "description": "Retrieves the details for a single job.", + "signature": "Get(ctx context.Context, request GetJobRequest) (*Job, error)", + "parameters": [ + { + "name": "request", + "type": "GetJobRequest", + "description": "Contains job_id to retrieve", + "required": true + } + ], + "returns": { + "type": "*Job", + "description": "Full job details including settings and run history" + } + }, + "List": { + "name": "List", + "description": "Retrieves a list of jobs.", + "signature": "List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]", + "parameters": [ + { + "name": "request", + "type": "ListJobsRequest", + "description": "Filter and pagination parameters", + "required": false + } + ], + "returns": { + "type": "listing.Iterator[BaseJob]", + "description": "Iterator over jobs matching the filter" + } + }, + "RunNow": { + "name": "RunNow", + "description": "Triggers an immediate run of a job.", + "signature": "RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)", + "parameters": [ + { + "name": "request", + "type": "RunNow", + "description": "Job ID and optional parameters for the run", + "required": true + } + ], + "returns": { + "type": "*RunNowResponse", + "description": "Contains run_id of the triggered run" + } + } + } + }, + "pipelines": { + "name": "Pipelines", + "description": "The Delta Live Tables API allows you to create, edit, and run pipelines for data transformation and ingestion.", + "package": "github.com/databricks/databricks-sdk-go/service/pipelines", + "methods": { + "Create": { + "name": "Create", + "description": "Creates a new data processing pipeline.", + "signature": "Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreatePipeline", + "description": "Pipeline configuration including clusters, libraries, and target", + "required": true + } + ], + "returns": { + "type": "*CreatePipelineResponse", + "description": "Contains pipeline_id of the created pipeline" + } + }, + "List": { + "name": "List", + "description": "Lists pipelines defined in the workspace.", + "signature": "List(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]", + "parameters": null, + "returns": { + "type": "listing.Iterator[PipelineStateInfo]", + "description": "Iterator over pipeline info" + } + }, + "StartUpdate": { + "name": "StartUpdate", + "description": "Starts a new update for the pipeline.", + "signature": "StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "StartUpdate", + "description": "Pipeline ID and update options", + "required": true + } + ], + "returns": { + "type": "*StartUpdateResponse", + "description": "Contains update_id of the started update" + } + } + } + }, + "sql": { + "name": "SQL", + "description": "Databricks SQL APIs for managing warehouses, queries, and dashboards.", + "package": "github.com/databricks/databricks-sdk-go/service/sql", + "methods": { + "ExecuteStatement": { + "name": "ExecuteStatement", + "description": "Execute a SQL statement and return results.", + "signature": "ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ExecuteStatementRequest", + "description": "SQL statement, warehouse ID, and execution options", + "required": true + } + ], + "returns": { + "type": "*ExecuteStatementResponse", + "description": "Query results or statement ID for async execution" + } + }, + "ListWarehouses": { + "name": "ListWarehouses", + "description": "Lists all SQL warehouses.", + "signature": "List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo]", + "parameters": null, + "returns": { + "type": "listing.Iterator[EndpointInfo]", + "description": "Iterator over warehouse information" + } + } + } + }, + "workspace": { + "name": "Workspace", + "description": "Workspace API for managing notebooks, folders, and other workspace objects.", + "package": "github.com/databricks/databricks-sdk-go/service/workspace", + "methods": { + "GetStatus": { + "name": "GetStatus", + "description": "Gets the status of a workspace object.", + "signature": "GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error)", + "parameters": [ + { + "name": "request", + "type": "GetStatusRequest", + "description": "Contains path to get status for", + "required": true + } + ], + "returns": { + "type": "*ObjectInfo", + "description": "Object information including type and path" + } + }, + "Import": { + "name": "Import", + "description": "Imports a notebook or file into the workspace.", + "signature": "Import(ctx context.Context, request Import) error", + "parameters": [ + { + "name": "request", + "type": "Import", + "description": "Path, content, and format of the object to import", + "required": true + } + ] + }, + "List": { + "name": "List", + "description": "Lists the contents of a directory.", + "signature": "List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo]", + "parameters": [ + { + "name": "request", + "type": "ListWorkspaceRequest", + "description": "Contains path to list", + "required": true + } + ], + "returns": { + "type": "listing.Iterator[ObjectInfo]", + "description": "Iterator over workspace objects" + } + } + } + } + }, + "types": { + "apps.AppDeployment": { + "name": "AppDeployment", + "package": "apps", + "description": "app deployment configuration.", + "fields": { + "command": { + "name": "command", + "type": "any", + "description": "The command with which to run the app. This will override the command specified in the app.yaml file.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string (timestamp)", + "description": "The creation time of the deployment. Formatted timestamp in ISO 6801.", + "required": false, + "output_only": true + }, + "creator": { + "name": "creator", + "type": "any", + "description": "The email of the user creates the deployment.", + "required": false, + "output_only": true + }, + "deployment_artifacts": { + "name": "deployment_artifacts", + "type": "any", + "description": "The deployment artifacts for an app.", + "required": false, + "output_only": true + }, + "deployment_id": { + "name": "deployment_id", + "type": "string", + "description": "The unique id of the deployment.", + "required": false + }, + "env_vars": { + "name": "env_vars", + "type": "any", + "description": "The environment variables to set in the app runtime environment. This will override the environment variables specified in the app.yaml file.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "any", + "description": "Git repository to use as the source for the app deployment.", + "required": false + }, + "mode": { + "name": "mode", + "type": "any", + "description": "The mode of which the deployment will manage the source code.", + "required": false + }, + "source_code_path": { + "name": "source_code_path", + "type": "string", + "description": "The workspace file system path of the source code used to create the app deployment. This is different from\n`deployment_artifacts.source_code_path`, which is the path used by the deployed app. The former refers\nto the original source code location of the app in the workspace during deployment creation, whereas\nthe latter provides a system generated stable snapshotted source code path used by the deployment.", + "required": false + }, + "status": { + "name": "status", + "type": "any", + "description": "Status and status message of the deployment", + "required": false, + "output_only": true + }, + "update_time": { + "name": "update_time", + "type": "string (timestamp)", + "description": "The update time of the deployment. Formatted timestamp in ISO 6801.", + "required": false, + "output_only": true + } + } + }, + "apps.AppDeploymentArtifacts": { + "name": "AppDeploymentArtifacts", + "package": "apps", + "description": "app deployment artifacts configuration.", + "fields": { + "source_code_path": { + "name": "source_code_path", + "type": "string", + "description": "The snapshotted workspace file system path of the source code loaded by the deployed app.", + "required": false + } + } + }, + "apps.AppDeploymentMode": { + "name": "AppDeploymentMode", + "package": "apps", + "description": "app deployment mode configuration.", + "fields": {} + }, + "apps.AppDeploymentState": { + "name": "AppDeploymentState", + "package": "apps", + "description": "app deployment state configuration.", + "fields": {} + }, + "apps.AppDeploymentStatus": { + "name": "AppDeploymentStatus", + "package": "apps", + "description": "app deployment status configuration.", + "fields": { + "message": { + "name": "message", + "type": "any", + "description": "Message corresponding with the deployment state.", + "required": false, + "output_only": true + }, + "state": { + "name": "state", + "type": "any", + "description": "State of the deployment.", + "required": false, + "output_only": true + } + } + }, + "apps.AppResource": { + "name": "AppResource", + "package": "apps", + "description": "app resource configuration.", + "fields": { + "database": { + "name": "database", + "type": "any", + "description": "", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of the App Resource.", + "required": false + }, + "experiment": { + "name": "experiment", + "type": "any", + "description": "", + "required": false + }, + "genie_space": { + "name": "genie_space", + "type": "any", + "description": "", + "required": false + }, + "job": { + "name": "job", + "type": "any", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Name of the App Resource.", + "required": false + }, + "secret": { + "name": "secret", + "type": "any", + "description": "", + "required": false + }, + "serving_endpoint": { + "name": "serving_endpoint", + "type": "any", + "description": "", + "required": false + }, + "sql_warehouse": { + "name": "sql_warehouse", + "type": "any", + "description": "", + "required": false + }, + "uc_securable": { + "name": "uc_securable", + "type": "any", + "description": "", + "required": false + } + } + }, + "apps.AppResourceDatabase": { + "name": "AppResourceDatabase", + "package": "apps", + "description": "app resource database configuration.", + "fields": { + "database_name": { + "name": "database_name", + "type": "string", + "description": "", + "required": false + }, + "instance_name": { + "name": "instance_name", + "type": "string", + "description": "", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "", + "required": false + } + } + }, + "apps.AppResourceDatabaseDatabasePermission": { + "name": "AppResourceDatabaseDatabasePermission", + "package": "apps", + "description": "app resource database database permission configuration.", + "fields": {} + }, + "apps.AppResourceExperiment": { + "name": "AppResourceExperiment", + "package": "apps", + "description": "app resource experiment configuration.", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "", + "required": false + } + } + }, + "apps.AppResourceExperimentExperimentPermission": { + "name": "AppResourceExperimentExperimentPermission", + "package": "apps", + "description": "app resource experiment experiment permission configuration.", + "fields": {} + }, + "apps.AppResourceGenieSpace": { + "name": "AppResourceGenieSpace", + "package": "apps", + "description": "app resource genie space configuration.", + "fields": { + "name": { + "name": "name", + "type": "any", + "description": "", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "", + "required": false + }, + "space_id": { + "name": "space_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "apps.AppResourceGenieSpaceGenieSpacePermission": { + "name": "AppResourceGenieSpaceGenieSpacePermission", + "package": "apps", + "description": "app resource genie space genie space permission configuration.", + "fields": {} + }, + "apps.AppResourceJob": { + "name": "AppResourceJob", + "package": "apps", + "description": "app resource job configuration.", + "fields": { + "id": { + "name": "id", + "type": "any", + "description": "Id of the job to grant permission on.", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "Permissions to grant on the Job. Supported permissions are: \"CAN_MANAGE\", \"IS_OWNER\", \"CAN_MANAGE_RUN\", \"CAN_VIEW\".", + "required": false + } + } + }, + "apps.AppResourceJobJobPermission": { + "name": "AppResourceJobJobPermission", + "package": "apps", + "description": "app resource job job permission configuration.", + "fields": {} + }, + "apps.AppResourceSecret": { + "name": "AppResourceSecret", + "package": "apps", + "description": "app resource secret configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "Key of the secret to grant permission on.", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: \"READ\", \"WRITE\", \"MANAGE\".", + "required": false + }, + "scope": { + "name": "scope", + "type": "any", + "description": "Scope of the secret to grant permission on.", + "required": false + } + } + }, + "apps.AppResourceSecretSecretPermission": { + "name": "AppResourceSecretSecretPermission", + "package": "apps", + "description": "Permission to grant on the secret scope. Supported permissions are: \"READ\", \"WRITE\", \"MANAGE\".", + "fields": {} + }, + "apps.AppResourceServingEndpoint": { + "name": "AppResourceServingEndpoint", + "package": "apps", + "description": "app resource serving endpoint configuration.", + "fields": { + "name": { + "name": "name", + "type": "any", + "description": "Name of the serving endpoint to grant permission on.", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "Permission to grant on the serving endpoint. Supported permissions are: \"CAN_MANAGE\", \"CAN_QUERY\", \"CAN_VIEW\".", + "required": false + } + } + }, + "apps.AppResourceServingEndpointServingEndpointPermission": { + "name": "AppResourceServingEndpointServingEndpointPermission", + "package": "apps", + "description": "app resource serving endpoint serving endpoint permission configuration.", + "fields": {} + }, + "apps.AppResourceSqlWarehouse": { + "name": "AppResourceSqlWarehouse", + "package": "apps", + "description": "app resource sql warehouse configuration.", + "fields": { + "id": { + "name": "id", + "type": "any", + "description": "Id of the SQL warehouse to grant permission on.", + "required": false + }, + "permission": { + "name": "permission", + "type": "any", + "description": "Permission to grant on the SQL warehouse. Supported permissions are: \"CAN_MANAGE\", \"CAN_USE\", \"IS_OWNER\".", + "required": false + } + } + }, + "apps.AppResourceSqlWarehouseSqlWarehousePermission": { + "name": "AppResourceSqlWarehouseSqlWarehousePermission", + "package": "apps", + "description": "app resource sql warehouse sql warehouse permission configuration.", + "fields": {} + }, + "apps.AppResourceUcSecurable": { + "name": "AppResourceUcSecurable", + "package": "apps", + "description": "app resource uc securable configuration.", + "fields": { + "permission": { + "name": "permission", + "type": "any", + "description": "", + "required": false + }, + "securable_full_name": { + "name": "securable_full_name", + "type": "string", + "description": "", + "required": false + }, + "securable_type": { + "name": "securable_type", + "type": "any", + "description": "", + "required": false + } + } + }, + "apps.AppResourceUcSecurableUcSecurablePermission": { + "name": "AppResourceUcSecurableUcSecurablePermission", + "package": "apps", + "description": "app resource uc securable uc securable permission configuration.", + "fields": {} + }, + "apps.AppResourceUcSecurableUcSecurableType": { + "name": "AppResourceUcSecurableUcSecurableType", + "package": "apps", + "description": "app resource uc securable uc securable type configuration.", + "fields": {} + }, + "apps.ApplicationState": { + "name": "ApplicationState", + "package": "apps", + "description": "application state configuration.", + "fields": {} + }, + "apps.ApplicationStatus": { + "name": "ApplicationStatus", + "package": "apps", + "description": "application status configuration.", + "fields": { + "message": { + "name": "message", + "type": "any", + "description": "Application status message", + "required": false, + "output_only": true + }, + "state": { + "name": "state", + "type": "any", + "description": "State of the application.", + "required": false, + "output_only": true + } + } + }, + "apps.ComputeSize": { + "name": "ComputeSize", + "package": "apps", + "description": "compute size configuration.", + "fields": {} + }, + "apps.ComputeState": { + "name": "ComputeState", + "package": "apps", + "description": "compute state configuration.", + "fields": {} + }, + "apps.ComputeStatus": { + "name": "ComputeStatus", + "package": "apps", + "description": "compute status configuration.", + "fields": { + "message": { + "name": "message", + "type": "any", + "description": "Compute status message", + "required": false, + "output_only": true + }, + "state": { + "name": "state", + "type": "any", + "description": "State of the app compute.", + "required": false, + "output_only": true + } + } + }, + "apps.EnvVar": { + "name": "EnvVar", + "package": "apps", + "description": "env var configuration.", + "fields": { + "name": { + "name": "name", + "type": "any", + "description": "The name of the environment variable.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The value for the environment variable.", + "required": false + }, + "value_from": { + "name": "value_from", + "type": "any", + "description": "The name of an external Databricks resource that contains the value, such as a secret or a database table.", + "required": false + } + } + }, + "apps.GitRepository": { + "name": "GitRepository", + "package": "apps", + "description": "Git repository configuration specifying the location of the repository.", + "fields": { + "provider": { + "name": "provider", + "type": "any", + "description": "Git provider. Case insensitive. Supported values: gitHub, gitHubEnterprise, bitbucketCloud,\nbitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition, awsCodeCommit.", + "required": false + }, + "url": { + "name": "url", + "type": "any", + "description": "URL of the Git repository.", + "required": false + } + } + }, + "apps.GitSource": { + "name": "GitSource", + "package": "apps", + "description": "Complete git source specification including repository location and reference.", + "fields": { + "branch": { + "name": "branch", + "type": "any", + "description": "Git branch to checkout.", + "required": false + }, + "commit": { + "name": "commit", + "type": "any", + "description": "Git commit SHA to checkout.", + "required": false + }, + "git_repository": { + "name": "git_repository", + "type": "any", + "description": "Git repository configuration. Populated from the app's git_repository configuration.", + "required": false, + "output_only": true + }, + "resolved_commit": { + "name": "resolved_commit", + "type": "any", + "description": "The resolved commit SHA that was actually used for the deployment. This is populated by the\nsystem after resolving the reference (branch, tag, or commit). If commit is specified\ndirectly, this will match commit. If a branch or tag is specified, this contains the\ncommit SHA that the branch or tag pointed to at deployment time.", + "required": false, + "output_only": true + }, + "source_code_path": { + "name": "source_code_path", + "type": "string", + "description": "Relative path to the app source code within the Git repository. If not specified, the root\nof the repository is used.", + "required": false + }, + "tag": { + "name": "tag", + "type": "any", + "description": "Git tag to checkout.", + "required": false + } + } + }, + "bundle.Alert": { + "name": "Alert", + "package": "resources", + "description": "alert configuration.", + "fields": { + "create_time": { + "name": "create_time", + "type": "string (timestamp)", + "description": "The timestamp indicating when the alert was created.", + "required": false, + "output_only": true + }, + "custom_description": { + "name": "custom_description", + "type": "string", + "description": "Custom description for the alert. support mustache template.", + "required": false + }, + "custom_summary": { + "name": "custom_summary", + "type": "any", + "description": "Custom summary for the alert. support mustache template.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "effective_run_as": { + "name": "effective_run_as", + "type": "any", + "description": "The actual identity that will be used to execute the alert.\nThis is an output-only field that shows the resolved run-as identity after applying\npermissions and defaults.", + "required": false, + "output_only": true + }, + "evaluation": { + "name": "evaluation", + "type": "any", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "any", + "description": "UUID identifying the alert.", + "required": false, + "output_only": true + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "any", + "description": "Indicates whether the query is trashed.", + "required": false, + "output_only": true + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has been deleted.", + "required": false, + "output_only": true + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "The workspace path of the folder containing the alert. Can only be set on create, and cannot be updated.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "any", + "description": "Text of the query to be run.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "any", + "description": "Specifies the identity that will be used to run the alert.\nThis field allows you to configure alerts to run as a specific user or service principal.\n- For user identity: Set `user_name` to the email of an active workspace user. Users can only set this to their own email.\n- For service principal: Set `service_principal_name` to the application ID. Requires the `servicePrincipal/user` role.\nIf not specified, the alert will run as the request user.", + "required": false + }, + "run_as_user_name": { + "name": "run_as_user_name", + "type": "string", + "description": "The run as username or application ID of service principal.\nOn Create and Update, this field can be set to application ID of an active service principal. Setting this field requires the servicePrincipal/user role.\nDeprecated: Use `run_as` field instead. This field will be removed in a future release.", + "required": false, + "deprecated": true + }, + "schedule": { + "name": "schedule", + "type": "any", + "description": "", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string (timestamp)", + "description": "The timestamp indicating when the alert was updated.", + "required": false, + "output_only": true + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the alert.", + "required": false + } + } + }, + "bundle.App": { + "name": "App", + "package": "resources", + "description": "app configuration.", + "fields": { + "active_deployment": { + "name": "active_deployment", + "type": "any", + "description": "The active deployment of the app. A deployment is considered active when it has been deployed\nto the app compute.", + "required": false, + "output_only": true + }, + "app_status": { + "name": "app_status", + "type": "any", + "description": "", + "required": false, + "output_only": true + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "", + "required": false + }, + "compute_size": { + "name": "compute_size", + "type": "int", + "description": "", + "required": false + }, + "compute_status": { + "name": "compute_status", + "type": "any", + "description": "", + "required": false, + "output_only": true + }, + "create_time": { + "name": "create_time", + "type": "string (timestamp)", + "description": "The creation time of the app. Formatted timestamp in ISO 6801.", + "required": false, + "output_only": true + }, + "creator": { + "name": "creator", + "type": "any", + "description": "The email of the user that created the app.", + "required": false, + "output_only": true + }, + "default_source_code_path": { + "name": "default_source_code_path", + "type": "string", + "description": "The default workspace file system path of the source code from which app deployment are\ncreated. This field tracks the workspace source code path of the last active deployment.", + "required": false, + "output_only": true + }, + "description": { + "name": "description", + "type": "string", + "description": "The description of the app.", + "required": false + }, + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "effective_user_api_scopes": { + "name": "effective_user_api_scopes", + "type": "any", + "description": "The effective api scopes granted to the user access token.", + "required": false, + "output_only": true + }, + "git_repository": { + "name": "git_repository", + "type": "any", + "description": "Git repository configuration for app deployments. When specified, deployments can\nreference code from this repository by providing only the git reference (branch, tag, or commit).", + "required": false + }, + "id": { + "name": "id", + "type": "any", + "description": "The unique identifier of the app.", + "required": false, + "output_only": true + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the app. The name must contain only lowercase alphanumeric characters and hyphens.\nIt must be unique within the workspace.", + "required": false + }, + "oauth2_app_client_id": { + "name": "oauth2_app_client_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "oauth2_app_integration_id": { + "name": "oauth2_app_integration_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "pending_deployment": { + "name": "pending_deployment", + "type": "any", + "description": "The pending deployment of the app. A deployment is considered pending when it is being prepared\nfor deployment to the app compute.", + "required": false, + "output_only": true + }, + "resources": { + "name": "resources", + "type": "any", + "description": "Resources for the app.", + "required": false + }, + "service_principal_client_id": { + "name": "service_principal_client_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "service_principal_id": { + "name": "service_principal_id", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "", + "required": false, + "output_only": true + }, + "update_time": { + "name": "update_time", + "type": "string (timestamp)", + "description": "The update time of the app. Formatted timestamp in ISO 6801.", + "required": false, + "output_only": true + }, + "updater": { + "name": "updater", + "type": "any", + "description": "The email of the user that last updated the app.", + "required": false, + "output_only": true + }, + "url": { + "name": "url", + "type": "any", + "description": "The URL of the app once it is deployed.", + "required": false, + "output_only": true + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "", + "required": false + }, + "user_api_scopes": { + "name": "user_api_scopes", + "type": "any", + "description": "", + "required": false + } + } + }, + "bundle.Cluster": { + "name": "Cluster", + "package": "resources", + "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "any", + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "any", + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "any", + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "any", + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "any", + "description": "The configuration for delivering spark logs to a long-term storage destination.\nThree kinds of destinations (DBFS, S3 and Unity Catalog volumes) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\nFor job clusters, the cluster name is automatically set based on the job and job run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "any", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "any", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "any", + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "any", + "description": "The configuration for storing init scripts. Any number of destinations can be specified.\nThe scripts are executed sequentially in the order provided.\nIf `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "any", + "description": "The kind of compute described by this compute specification.\n\nDepending on `kind`, different validations and default values will be applied.\n\nClusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no specified `kind` do not.\n* [is_single_node](/api/workspace/clusters/create#is_single_node)\n* [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime)\n* [data_security_mode](/api/workspace/clusters/create#data_security_mode) set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`\n\nBy using the [simple form](https://docs.databricks.com/compute/simple-form.html), your clusters are automatically using `kind = CLASSIC_PREVIEW`.", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "any", + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "any", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "any", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "any", + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "any", + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "any", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "any", + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "any", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "any", + "description": "Cluster Attributes showing for clusters workload types.", + "required": false + } + } + }, + "bundle.DatabaseCatalog": { + "name": "DatabaseCatalog", + "package": "resources", + "description": "database catalog configuration.", + "fields": { + "create_database_if_not_exists": { + "name": "create_database_if_not_exists", + "type": "any", + "description": "", + "required": false + }, + "database_instance_name": { + "name": "database_instance_name", + "type": "string", + "description": "The name of the DatabaseInstance housing the database.", + "required": false + }, + "database_name": { + "name": "database_name", + "type": "string", + "description": "The name of the database (in a instance) associated with the catalog.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the catalog in UC.", + "required": false + }, + "uid": { + "name": "uid", + "type": "any", + "description": "", + "required": false, + "output_only": true + } + } + }, + "bundle.DatabaseInstance": { + "name": "DatabaseInstance", + "package": "resources", + "description": "A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage.", + "fields": { + "capacity": { + "name": "capacity", + "type": "any", + "description": "The sku of the instance. Valid values are \"CU_1\", \"CU_2\", \"CU_4\", \"CU_8\".", + "required": false + }, + "child_instance_refs": { + "name": "child_instance_refs", + "type": "any", + "description": "The refs of the child instances. This is only available if the instance is\nparent instance.", + "required": false, + "output_only": true + }, + "creation_time": { + "name": "creation_time", + "type": "string (timestamp)", + "description": "The timestamp when the instance was created.", + "required": false, + "output_only": true + }, + "creator": { + "name": "creator", + "type": "any", + "description": "The email of the creator of the instance.", + "required": false, + "output_only": true + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Custom tags associated with the instance. This field is only included on create and update responses.", + "required": false + }, + "effective_capacity": { + "name": "effective_capacity", + "type": "any", + "description": "Deprecated. The sku of the instance; this field will always match the value of capacity.", + "required": false, + "output_only": true, + "deprecated": true + }, + "effective_custom_tags": { + "name": "effective_custom_tags", + "type": "map[string]string", + "description": "The recorded custom tags associated with the instance.", + "required": false, + "output_only": true + }, + "effective_enable_pg_native_login": { + "name": "effective_enable_pg_native_login", + "type": "any", + "description": "Whether the instance has PG native password login enabled.", + "required": false, + "output_only": true + }, + "effective_enable_readable_secondaries": { + "name": "effective_enable_readable_secondaries", + "type": "any", + "description": "Whether secondaries serving read-only traffic are enabled. Defaults to false.", + "required": false, + "output_only": true + }, + "effective_node_count": { + "name": "effective_node_count", + "type": "int", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", + "required": false, + "output_only": true + }, + "effective_retention_window_in_days": { + "name": "effective_retention_window_in_days", + "type": "any", + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained.", + "required": false, + "output_only": true + }, + "effective_stopped": { + "name": "effective_stopped", + "type": "any", + "description": "Whether the instance is stopped.", + "required": false, + "output_only": true + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "The policy that is applied to the instance.", + "required": false, + "output_only": true + }, + "enable_pg_native_login": { + "name": "enable_pg_native_login", + "type": "bool", + "description": "Whether to enable PG native password login on the instance. Defaults to false.", + "required": false + }, + "enable_readable_secondaries": { + "name": "enable_readable_secondaries", + "type": "bool", + "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the instance. This is the unique identifier for the instance.", + "required": false + }, + "node_count": { + "name": "node_count", + "type": "int", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries. This field is input only, see effective_node_count for the output.", + "required": false + }, + "parent_instance_ref": { + "name": "parent_instance_ref", + "type": "any", + "description": "The ref of the parent instance. This is only available if the instance is\nchild instance.\nInput: For specifying the parent instance to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "required": false + }, + "pg_version": { + "name": "pg_version", + "type": "any", + "description": "The version of Postgres running on the instance.", + "required": false, + "output_only": true + }, + "read_only_dns": { + "name": "read_only_dns", + "type": "any", + "description": "The DNS endpoint to connect to the instance for read only access. This is only available if\nenable_readable_secondaries is true.", + "required": false, + "output_only": true + }, + "read_write_dns": { + "name": "read_write_dns", + "type": "any", + "description": "The DNS endpoint to connect to the instance for read+write access.", + "required": false, + "output_only": true + }, + "retention_window_in_days": { + "name": "retention_window_in_days", + "type": "any", + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained. The default value is 7 days.\nValid values are 2 to 35 days.", + "required": false + }, + "state": { + "name": "state", + "type": "any", + "description": "The current state of the instance.", + "required": false, + "output_only": true + }, + "stopped": { + "name": "stopped", + "type": "any", + "description": "Whether to stop the instance. An input only param, see effective_stopped for the output.", + "required": false + }, + "uid": { + "name": "uid", + "type": "any", + "description": "An immutable UUID identifier for the instance.", + "required": false, + "output_only": true + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The desired usage policy to associate with the instance.", + "required": false + } + } + }, + "bundle.Job": { + "name": "Job", + "package": "resources", + "description": "job configuration.", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "any", + "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "any", + "description": "Deployment information for jobs managed by external sources.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding.", + "required": false + }, + "edit_mode": { + "name": "edit_mode", + "type": "any", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "any", + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", + "required": false + }, + "environments": { + "name": "environments", + "type": "any", + "description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nFor serverless notebook tasks, if the environment_key is not specified, the notebook environment will be used if present. If a jobs environment is specified, it will override the notebook environment.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.", + "required": false + }, + "format": { + "name": "format", + "type": "any", + "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`.", + "required": false, + "deprecated": true + }, + "git_source": { + "name": "git_source", + "type": "any", + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", + "required": false + }, + "health": { + "name": "health", + "type": "any", + "description": "An optional set of health rules that can be defined for this job.", + "required": false + }, + "job_clusters": { + "name": "job_clusters", + "type": "any", + "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", + "required": false + }, + "max_concurrent_runs": { + "name": "max_concurrent_runs", + "type": "any", + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "any", + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "any", + "description": "Job-level parameter definitions", + "required": false + }, + "performance_target": { + "name": "performance_target", + "type": "any", + "description": "The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run.\nThe performance target does not apply to tasks that run on Serverless GPU compute.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads.\n* `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance.", + "required": false + }, + "queue": { + "name": "queue", + "type": "any", + "description": "The queue settings of the job.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "any", + "description": "The user or service principal that the job runs as, if specified in the request.\nThis field indicates the explicit configuration of `run_as` for the job.\nTo find the value in all cases, explicit or implicit, use `run_as_user_name`.", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "any", + "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "any", + "description": "A list of task specifications to be executed by this job.\nIt supports up to 1000 elements in write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit).\nRead endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "any", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The id of the user specified usage policy to use for this job.\nIf not specified, a default usage policy may be applied when creating or modifying the job.\nSee `effective_usage_policy_id` for the usage policy used by this workload.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "any", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "required": false + } + } + }, + "bundle.MlflowExperiment": { + "name": "MlflowExperiment", + "package": "resources", + "description": "mlflow experiment configuration.", + "fields": { + "artifact_location": { + "name": "artifact_location", + "type": "any", + "description": "Location where all artifacts for the experiment are stored.\nIf not provided, the remote server will select an appropriate default.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Experiment name.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A collection of tags to set on the experiment. Maximum tag size and number of tags per request\ndepends on the storage backend. All storage backends are guaranteed to support tag keys up\nto 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also\nguaranteed to support up to 20 tags per request.", + "required": false + } + } + }, + "bundle.MlflowModel": { + "name": "MlflowModel", + "package": "resources", + "description": "mlflow model configuration.", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "Optional description for registered model.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Register models under this name", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "Additional metadata for registered model.", + "required": false + } + } + }, + "bundle.ModelServingEndpoint": { + "name": "ModelServingEndpoint", + "package": "resources", + "description": "model serving endpoint configuration.", + "fields": { + "ai_gateway": { + "name": "ai_gateway", + "type": "any", + "description": "The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The budget policy to be applied to the serving endpoint.", + "required": false + }, + "config": { + "name": "config", + "type": "any", + "description": "The core config of the serving endpoint.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "any", + "description": "Email notification settings.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "any", + "description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.", + "required": false, + "deprecated": true + }, + "route_optimized": { + "name": "route_optimized", + "type": "any", + "description": "Enable route optimization for the serving endpoint.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", + "required": false + } + } + }, + "bundle.Pipeline": { + "name": "Pipeline", + "package": "resources", + "description": "pipeline configuration.", + "fields": { + "allow_duplicate_names": { + "name": "allow_duplicate_names", + "type": "any", + "description": "If false, deployment will fail if name conflicts with that of another pipeline.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy of this pipeline.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "any", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.", + "required": false + }, + "channel": { + "name": "channel", + "type": "any", + "description": "DLT Release Channel that specifies which version to use.", + "required": false + }, + "clusters": { + "name": "clusters", + "type": "any", + "description": "Cluster settings for this pipeline deployment.", + "required": false + }, + "configuration": { + "name": "configuration", + "type": "any", + "description": "String-String configuration for this pipeline execution.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "any", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "any", + "description": "Deployment type of this pipeline.", + "required": false + }, + "development": { + "name": "development", + "type": "any", + "description": "Whether the pipeline is in Development mode. Defaults to false.", + "required": false + }, + "dry_run": { + "name": "dry_run", + "type": "any", + "description": "", + "required": false + }, + "edition": { + "name": "edition", + "type": "any", + "description": "Pipeline product edition.", + "required": false + }, + "environment": { + "name": "environment", + "type": "any", + "description": "Environment specification for this pipeline used to install dependencies.", + "required": false + }, + "event_log": { + "name": "event_log", + "type": "any", + "description": "Event log configuration for this pipeline", + "required": false + }, + "filters": { + "name": "filters", + "type": "any", + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false + }, + "gateway_definition": { + "name": "gateway_definition", + "type": "any", + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false + }, + "id": { + "name": "id", + "type": "any", + "description": "Unique identifier for this pipeline.", + "required": false + }, + "ingestion_definition": { + "name": "ingestion_definition", + "type": "any", + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'schema', 'target', or 'catalog' settings.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "any", + "description": "Libraries or code needed by this deployment.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Friendly identifier for this pipeline.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "any", + "description": "List of notification settings for this pipeline.", + "required": false + }, + "photon": { + "name": "photon", + "type": "any", + "description": "Whether Photon is enabled for this pipeline.", + "required": false + }, + "restart_window": { + "name": "restart_window", + "type": "any", + "description": "Restart window of this pipeline.", + "required": false + }, + "root_path": { + "name": "root_path", + "type": "string", + "description": "Root path for this pipeline.\nThis is used as the root directory when editing the pipeline in the Databricks user interface and it is\nadded to sys.path when executing Python sources during pipeline execution.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "any", + "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", + "required": false + }, + "schema": { + "name": "schema", + "type": "any", + "description": "The default schema (database) where tables are read from or published to.", + "required": false + }, + "serverless": { + "name": "serverless", + "type": "any", + "description": "Whether serverless compute is enabled for this pipeline.", + "required": false + }, + "storage": { + "name": "storage", + "type": "any", + "description": "DBFS root directory for storing checkpoints and tables.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the pipeline.\nThese are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.\nA maximum of 25 tags can be added to the pipeline.", + "required": false + }, + "target": { + "name": "target", + "type": "any", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.", + "required": false, + "deprecated": true + }, + "trigger": { + "name": "trigger", + "type": "any", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false, + "deprecated": true + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "Usage policy of this pipeline.", + "required": false + } + } + }, + "bundle.QualityMonitor": { + "name": "QualityMonitor", + "package": "resources", + "description": "quality monitor configuration.", + "fields": { + "assets_dir": { + "name": "assets_dir", + "type": "any", + "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store data-monitoring\nassets. Normally prepopulated to a default user location via UI and Python APIs.", + "required": false + }, + "baseline_table_name": { + "name": "baseline_table_name", + "type": "string", + "description": "[Create:OPT Update:OPT] Baseline table name.\nBaseline data is used to compute drift from the data in the monitored `table_name`.\nThe baseline table and the monitored table shall have the same schema.", + "required": false + }, + "custom_metrics": { + "name": "custom_metrics", + "type": "any", + "description": "[Create:OPT Update:OPT] Custom metrics.", + "required": false + }, + "data_classification_config": { + "name": "data_classification_config", + "type": "any", + "description": "[Create:OPT Update:OPT] Data classification related config.", + "required": false + }, + "inference_log": { + "name": "inference_log", + "type": "any", + "description": "", + "required": false + }, + "latest_monitor_failure_msg": { + "name": "latest_monitor_failure_msg", + "type": "any", + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "any", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", + "required": false + }, + "output_schema_name": { + "name": "output_schema_name", + "type": "string", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format {catalog}.{schema}", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "any", + "description": "[Create:OPT Update:OPT] The monitor schedule.", + "required": false + }, + "skip_builtin_dashboard": { + "name": "skip_builtin_dashboard", + "type": "any", + "description": "Whether to skip creating a default dashboard summarizing data quality metrics.", + "required": false + }, + "slicing_exprs": { + "name": "slicing_exprs", + "type": "any", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The data is grouped by\neach expression independently, resulting in a separate slice for each predicate and its\ncomplements. For example `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the following\nslices: two slices for `col_2 \u003e 10` (True and False), and one slice per unique value in\n`col1`. For high-cardinality columns, only the top 100 unique values by frequency will\ngenerate slices.", + "required": false + }, + "snapshot": { + "name": "snapshot", + "type": "any", + "description": "Configuration for monitoring snapshot tables.", + "required": false + }, + "time_series": { + "name": "time_series", + "type": "any", + "description": "Configuration for monitoring time series tables.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional argument to specify the warehouse for dashboard creation. If not specified, the first running\nwarehouse will be used.", + "required": false + } + } + }, + "bundle.RegisteredModel": { + "name": "RegisteredModel", + "package": "resources", + "description": "registered model configuration.", + "fields": { + "aliases": { + "name": "aliases", + "type": "any", + "description": "List of aliases associated with the registered model", + "required": false + }, + "browse_only": { + "name": "browse_only", + "type": "any", + "description": "Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog where the schema and the registered model reside", + "required": false + }, + "comment": { + "name": "comment", + "type": "any", + "description": "The comment attached to the registered model", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "string (timestamp)", + "description": "Creation timestamp of the registered model in milliseconds since the Unix epoch", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "any", + "description": "The identifier of the user who created the registered model", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The three-level (fully qualified) name of the registered model", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the registered model", + "required": false + }, + "owner": { + "name": "owner", + "type": "any", + "description": "The identifier of the user who owns the registered model", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema where the registered model resides", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "any", + "description": "The storage location on the cloud under which model version data files are stored", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string (timestamp)", + "description": "Last-update timestamp of the registered model in milliseconds since the Unix epoch", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "any", + "description": "The identifier of the user who updated the registered model last time", + "required": false + } + } + }, + "bundle.Schema": { + "name": "Schema", + "package": "resources", + "description": "schema configuration.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent catalog.", + "required": false + }, + "comment": { + "name": "comment", + "type": "any", + "description": "User-provided free-form text description.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Name of schema, relative to parent catalog.", + "required": false + }, + "properties": { + "name": "properties", + "type": "any", + "description": "A map of key-value properties attached to the securable.", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "any", + "description": "Storage root URL for managed tables within schema.", + "required": false + } + } + }, + "bundle.SqlWarehouse": { + "name": "SqlWarehouse", + "package": "resources", + "description": "Creates a new SQL warehouse.", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", + "type": "any", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", + "required": false + }, + "channel": { + "name": "channel", + "type": "any", + "description": "Channel Details", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "int", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", + "type": "string", + "description": "warehouse creator name", + "required": false + }, + "enable_photon": { + "name": "enable_photon", + "type": "bool", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Configures whether the warehouse should use serverless compute", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", + "required": false, + "deprecated": true + }, + "max_num_clusters": { + "name": "max_num_clusters", + "type": "any", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false + }, + "min_num_clusters": { + "name": "min_num_clusters", + "type": "any", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", + "required": false + }, + "spot_instance_policy": { + "name": "spot_instance_policy", + "type": "any", + "description": "Configurations whether the endpoint should use spot instances.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "any", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute,\nyou must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", + "required": false + } + } + }, + "bundle.SyncedDatabaseTable": { + "name": "SyncedDatabaseTable", + "package": "resources", + "description": "Next field marker: 18", + "fields": { + "data_synchronization_status": { + "name": "data_synchronization_status", + "type": "any", + "description": "Synced Table data synchronization status", + "required": false, + "output_only": true + }, + "database_instance_name": { + "name": "database_instance_name", + "type": "string", + "description": "Name of the target database instance. This is required when creating synced database tables in standard catalogs.\nThis is optional when creating synced database tables in registered catalogs. If this field is specified\nwhen creating synced database tables in registered catalogs, the database instance name MUST\nmatch that of the registered catalog (or the request will be rejected).", + "required": false + }, + "effective_database_instance_name": { + "name": "effective_database_instance_name", + "type": "string", + "description": "The name of the database instance that this table is registered to. This field is always returned, and for\ntables inside database catalogs is inferred database instance associated with the catalog.", + "required": false, + "output_only": true + }, + "effective_logical_database_name": { + "name": "effective_logical_database_name", + "type": "string", + "description": "The name of the logical database that this table is registered to.", + "required": false, + "output_only": true + }, + "logical_database_name": { + "name": "logical_database_name", + "type": "string", + "description": "Target Postgres database object (logical database) name for this table.\n\nWhen creating a synced table in a registered Postgres catalog, the\ntarget Postgres database name is inferred to be that of the registered catalog.\nIf this field is specified in this scenario, the Postgres database name MUST\nmatch that of the registered catalog (or the request will be rejected).\n\nWhen creating a synced table in a standard catalog, this field is required.\nIn this scenario, specifying this field will allow targeting an arbitrary postgres database.\nNote that this has implications for the `create_database_objects_is_missing` field in `spec`.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Full three-part (catalog, schema, table) name of the table.", + "required": false + }, + "spec": { + "name": "spec", + "type": "any", + "description": "Specification of a synced database table.", + "required": false + }, + "unity_catalog_provisioning_state": { + "name": "unity_catalog_provisioning_state", + "type": "any", + "description": "The provisioning state of the synced table entity in Unity Catalog. This is distinct from the\nstate of the data synchronization pipeline (i.e. the table may be in \"ACTIVE\" but the pipeline\nmay be in \"PROVISIONING\" as it runs asynchronously).", + "required": false, + "output_only": true + } + } + }, + "bundle.Volume": { + "name": "Volume", + "package": "resources", + "description": "volume configuration.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog where the schema and the volume are", + "required": false + }, + "comment": { + "name": "comment", + "type": "any", + "description": "The comment attached to the volume", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the volume", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema where the volume is", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "any", + "description": "The storage location on the cloud", + "required": false + }, + "volume_type": { + "name": "volume_type", + "type": "any", + "description": "The type of the volume. An external volume is located in the specified external location.\nA managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore.\n[Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", + "required": false + } + } + }, + "catalog.MonitorCronSchedule": { + "name": "MonitorCronSchedule", + "package": "catalog", + "description": "monitor cron schedule configuration.", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "any", + "description": "Read only field that indicates whether a schedule is paused or not.", + "required": false + }, + "quartz_cron_expression": { + "name": "quartz_cron_expression", + "type": "any", + "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "The timezone id (e.g., ``PST``) in which to evaluate the quartz expression.", + "required": false + } + } + }, + "catalog.MonitorCronSchedulePauseStatus": { + "name": "MonitorCronSchedulePauseStatus", + "package": "catalog", + "description": "Source link: https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto\nMonitoring workflow schedule pause status.", + "fields": {} + }, + "catalog.MonitorDataClassificationConfig": { + "name": "MonitorDataClassificationConfig", + "package": "catalog", + "description": "Data classification related configuration.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable data classification.", + "required": false + } + } + }, + "catalog.MonitorDestination": { + "name": "MonitorDestination", + "package": "catalog", + "description": "monitor destination configuration.", + "fields": { + "email_addresses": { + "name": "email_addresses", + "type": "any", + "description": "The list of email addresses to send the notification to. A maximum of 5 email addresses is supported.", + "required": false + } + } + }, + "catalog.MonitorInferenceLog": { + "name": "MonitorInferenceLog", + "package": "catalog", + "description": "monitor inference log configuration.", + "fields": { + "granularities": { + "name": "granularities", + "type": "any", + "description": "List of granularities to use when aggregating data into time windows based on their timestamp.", + "required": false + }, + "label_col": { + "name": "label_col", + "type": "any", + "description": "Column for the label.", + "required": false + }, + "model_id_col": { + "name": "model_id_col", + "type": "any", + "description": "Column for the model identifier.", + "required": false + }, + "prediction_col": { + "name": "prediction_col", + "type": "any", + "description": "Column for the prediction.", + "required": false + }, + "prediction_proba_col": { + "name": "prediction_proba_col", + "type": "any", + "description": "Column for prediction probabilities", + "required": false + }, + "problem_type": { + "name": "problem_type", + "type": "any", + "description": "Problem type the model aims to solve.", + "required": false + }, + "timestamp_col": { + "name": "timestamp_col", + "type": "any", + "description": "Column for the timestamp.", + "required": false + } + } + }, + "catalog.MonitorInferenceLogProblemType": { + "name": "MonitorInferenceLogProblemType", + "package": "catalog", + "description": "monitor inference log problem type configuration.", + "fields": {} + }, + "catalog.MonitorMetric": { + "name": "MonitorMetric", + "package": "catalog", + "description": "Custom metric definition.", + "fields": { + "definition": { + "name": "definition", + "type": "any", + "description": "Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition).", + "required": false + }, + "input_columns": { + "name": "input_columns", + "type": "any", + "description": "A list of column names in the input table the metric should be computed for.\nCan use ``\":table\"`` to indicate that the metric needs information from multiple columns.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Name of the metric in the output tables.", + "required": false + }, + "output_data_type": { + "name": "output_data_type", + "type": "any", + "description": "The output type of the custom metric.", + "required": false + }, + "type": { + "name": "type", + "type": "any", + "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", + "required": false + } + } + }, + "catalog.MonitorMetricType": { + "name": "MonitorMetricType", + "package": "catalog", + "description": "Can only be one of ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"``, ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"``, or ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"``.\nThe ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"`` and ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"`` metrics\nare computed on a single table, whereas the ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", + "fields": {} + }, + "catalog.MonitorNotifications": { + "name": "MonitorNotifications", + "package": "catalog", + "description": "monitor notifications configuration.", + "fields": { + "on_failure": { + "name": "on_failure", + "type": "any", + "description": "Destinations to send notifications on failure/timeout.", + "required": false + }, + "on_new_classification_tag_detected": { + "name": "on_new_classification_tag_detected", + "type": "any", + "description": "Destinations to send notifications on new classification tag detected.", + "required": false + } + } + }, + "catalog.MonitorSnapshot": { + "name": "MonitorSnapshot", + "package": "catalog", + "description": "Snapshot analysis configuration", + "fields": {} + }, + "catalog.MonitorTimeSeries": { + "name": "MonitorTimeSeries", + "package": "catalog", + "description": "Time series analysis configuration.", + "fields": { + "granularities": { + "name": "granularities", + "type": "any", + "description": "Granularities for aggregating data into time windows based on their timestamp. Currently the following static\ngranularities are supported:\n{``\\\"5 minutes\\\"``, ``\\\"30 minutes\\\"``, ``\\\"1 hour\\\"``, ``\\\"1 day\\\"``, ``\\\"\\u003cn\\u003e week(s)\\\"``, ``\\\"1 month\\\"``, ``\\\"1 year\\\"``}.", + "required": false + }, + "timestamp_col": { + "name": "timestamp_col", + "type": "any", + "description": "Column for the timestamp.", + "required": false + } + } + }, + "catalog.RegisteredModelAlias": { + "name": "RegisteredModelAlias", + "package": "catalog", + "description": "registered model alias configuration.", + "fields": { + "alias_name": { + "name": "alias_name", + "type": "string", + "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog containing the model version", + "required": false + }, + "id": { + "name": "id", + "type": "any", + "description": "The unique identifier of the alias", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the parent registered model of the model version, relative to parent schema", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema containing the model version, relative to parent catalog", + "required": false + }, + "version_num": { + "name": "version_num", + "type": "any", + "description": "Integer version number of the model version to which this alias points.", + "required": false + } + } + }, + "catalog.VolumeType": { + "name": "VolumeType", + "package": "catalog", + "description": "volume type configuration.", + "fields": {} + }, + "compute.Adlsgen2Info": { + "name": "Adlsgen2Info", + "package": "compute", + "description": "A storage location in Adls Gen2", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`.", + "required": false + } + } + }, + "compute.AutoScale": { + "name": "AutoScale", + "package": "compute", + "description": "auto scale configuration.", + "fields": { + "max_workers": { + "name": "max_workers", + "type": "any", + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`.", + "required": false + }, + "min_workers": { + "name": "min_workers", + "type": "any", + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation.", + "required": false + } + } + }, + "compute.AwsAttributes": { + "name": "AwsAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to Amazon Web Services.", + "fields": { + "availability": { + "name": "availability", + "type": "any", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "required": false + }, + "ebs_volume_count": { + "name": "ebs_volume_count", + "type": "int", + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden.", + "required": false + }, + "ebs_volume_iops": { + "name": "ebs_volume_iops", + "type": "any", + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used.", + "required": false + }, + "ebs_volume_size": { + "name": "ebs_volume_size", + "type": "int", + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096.", + "required": false + }, + "ebs_volume_throughput": { + "name": "ebs_volume_throughput", + "type": "any", + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used.", + "required": false + }, + "ebs_volume_type": { + "name": "ebs_volume_type", + "type": "any", + "description": "The type of EBS volumes that will be launched with this cluster.", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "any", + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.", + "required": false + }, + "spot_bid_price_percent": { + "name": "spot_bid_price_percent", + "type": "any", + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.", + "required": false + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, the zone \"auto\" will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\n\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method.", + "required": false + } + } + }, + "compute.AwsAvailability": { + "name": "AwsAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "fields": {} + }, + "compute.AzureAttributes": { + "name": "AzureAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to Microsoft Azure.", + "fields": { + "availability": { + "name": "availability", + "type": "any", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\nNote: If `first_on_demand` is zero, this availability\ntype will be used for the entire cluster.", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "any", + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "required": false + }, + "log_analytics_info": { + "name": "log_analytics_info", + "type": "any", + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "required": false + }, + "spot_bid_max_price": { + "name": "spot_bid_max_price", + "type": "any", + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1.", + "required": false + } + } + }, + "compute.AzureAvailability": { + "name": "AzureAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "fields": {} + }, + "compute.ClientsTypes": { + "name": "ClientsTypes", + "package": "compute", + "description": "clients types configuration.", + "fields": { + "jobs": { + "name": "jobs", + "type": "any", + "description": "With jobs set, the cluster can be used for jobs", + "required": false + }, + "notebooks": { + "name": "notebooks", + "type": "any", + "description": "With notebooks set, this cluster can be used for notebooks", + "required": false + } + } + }, + "compute.ClusterLogConf": { + "name": "ClusterLogConf", + "package": "compute", + "description": "Cluster log delivery config", + "fields": { + "dbfs": { + "name": "dbfs", + "type": "any", + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "required": false + }, + "s3": { + "name": "s3", + "type": "any", + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "any", + "description": "destination needs to be provided, e.g.\n`{ \"volumes\": { \"destination\": \"/Volumes/catalog/schema/volume/cluster_log\" } }`", + "required": false + } + } + }, + "compute.ClusterSpec": { + "name": "ClusterSpec", + "package": "compute", + "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "any", + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "any", + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "any", + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "any", + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "any", + "description": "The configuration for delivering spark logs to a long-term storage destination.\nThree kinds of destinations (DBFS, S3 and Unity Catalog volumes) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\nFor job clusters, the cluster name is automatically set based on the job and job run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "any", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "any", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "any", + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "any", + "description": "The configuration for storing init scripts. Any number of destinations can be specified.\nThe scripts are executed sequentially in the order provided.\nIf `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "any", + "description": "The kind of compute described by this compute specification.\n\nDepending on `kind`, different validations and default values will be applied.\n\nClusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no specified `kind` do not.\n* [is_single_node](/api/workspace/clusters/create#is_single_node)\n* [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime)\n* [data_security_mode](/api/workspace/clusters/create#data_security_mode) set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`\n\nBy using the [simple form](https://docs.databricks.com/compute/simple-form.html), your clusters are automatically using `kind = CLASSIC_PREVIEW`.", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "any", + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "any", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "any", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "any", + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "any", + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "any", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "any", + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "any", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "any", + "description": "Cluster Attributes showing for clusters workload types.", + "required": false + } + } + }, + "compute.DataSecurityMode": { + "name": "DataSecurityMode", + "package": "compute", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "fields": {} + }, + "compute.DbfsStorageInfo": { + "name": "DbfsStorageInfo", + "package": "compute", + "description": "A storage location in DBFS", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "dbfs destination, e.g. `dbfs:/my/path`", + "required": false + } + } + }, + "compute.DockerBasicAuth": { + "name": "DockerBasicAuth", + "package": "compute", + "description": "docker basic auth configuration.", + "fields": { + "password": { + "name": "password", + "type": "any", + "description": "Password of the user", + "required": false + }, + "username": { + "name": "username", + "type": "any", + "description": "Name of the user", + "required": false + } + } + }, + "compute.DockerImage": { + "name": "DockerImage", + "package": "compute", + "description": "docker image configuration.", + "fields": { + "basic_auth": { + "name": "basic_auth", + "type": "any", + "description": "Basic auth with username and password", + "required": false + }, + "url": { + "name": "url", + "type": "any", + "description": "URL of the docker image.", + "required": false + } + } + }, + "compute.EbsVolumeType": { + "name": "EbsVolumeType", + "package": "compute", + "description": "All EBS volume types that Databricks supports.\nSee https://aws.amazon.com/ebs/details/ for details.", + "fields": {} + }, + "compute.Environment": { + "name": "Environment", + "package": "compute", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "fields": { + "client": { + "name": "client", + "type": "any", + "description": "Use `environment_version` instead.", + "required": false, + "deprecated": true + }, + "dependencies": { + "name": "dependencies", + "type": "any", + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a valid pip requirements file line per https://pip.pypa.io/en/stable/reference/requirements-file-format/.\nAllowed dependencies include a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in Databricks), or a VCS project URL.", + "required": false + }, + "environment_version": { + "name": "environment_version", + "type": "any", + "description": "Required. Environment version used by the environment.\nEach version comes with a specific Python version and a set of Python packages.\nThe version is a string, consisting of an integer.", + "required": false + }, + "java_dependencies": { + "name": "java_dependencies", + "type": "any", + "description": "List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`.", + "required": false + } + } + }, + "compute.GcpAttributes": { + "name": "GcpAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to GCP.", + "fields": { + "availability": { + "name": "availability", + "type": "any", + "description": "This field determines whether the spark executors will be scheduled to run on preemptible\nVMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.", + "required": false + }, + "boot_disk_size": { + "name": "boot_disk_size", + "type": "int", + "description": "Boot disk size in GB", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "any", + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "required": false + }, + "google_service_account": { + "name": "google_service_account", + "type": "any", + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator.", + "required": false + }, + "local_ssd_count": { + "name": "local_ssd_count", + "type": "int", + "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached.\nEach local SSD is 375GB in size.\nRefer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds)\nfor the supported number of local SSDs for each instance type.", + "required": false + }, + "use_preemptible_executors": { + "name": "use_preemptible_executors", + "type": "any", + "description": "This field determines whether the spark executors will be scheduled to run on preemptible\nVMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the 'availability' field instead.", + "required": false, + "deprecated": true + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default].\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from\nhttps://cloud.google.com/compute/docs/regions-zones.", + "required": false + } + } + }, + "compute.GcpAvailability": { + "name": "GcpAvailability", + "package": "compute", + "description": "This field determines whether the instance pool will contain preemptible\nVMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.", + "fields": {} + }, + "compute.GcsStorageInfo": { + "name": "GcsStorageInfo", + "package": "compute", + "description": "A storage location in Google Cloud Platform's GCS", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`", + "required": false + } + } + }, + "compute.InitScriptInfo": { + "name": "InitScriptInfo", + "package": "compute", + "description": "Config for an individual init script\nNext ID: 11", + "fields": { + "abfss": { + "name": "abfss", + "type": "any", + "description": "destination needs to be provided, e.g.\n`abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`", + "required": false + }, + "dbfs": { + "name": "dbfs", + "type": "any", + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\": { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "required": false, + "deprecated": true + }, + "file": { + "name": "file", + "type": "any", + "description": "destination needs to be provided, e.g.\n`{ \"file\": { \"destination\": \"file:/my/local/file.sh\" } }`", + "required": false + }, + "gcs": { + "name": "gcs", + "type": "any", + "description": "destination needs to be provided, e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "required": false + }, + "s3": { + "name": "s3", + "type": "any", + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \\\"s3\\\": { \\\"destination\\\": \\\"s3://cluster_log_bucket/prefix\\\", \\\"region\\\": \\\"us-west-2\\\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "any", + "description": "destination needs to be provided. e.g.\n`{ \\\"volumes\\\" : { \\\"destination\\\" : \\\"/Volumes/my-init.sh\\\" } }`", + "required": false + }, + "workspace": { + "name": "workspace", + "type": "any", + "description": "destination needs to be provided, e.g.\n`{ \"workspace\": { \"destination\": \"/cluster-init-scripts/setup-datadog.sh\" } }`", + "required": false + } + } + }, + "compute.Library": { + "name": "Library", + "package": "compute", + "description": "library configuration.", + "fields": { + "cran": { + "name": "cran", + "type": "any", + "description": "Specification of a CRAN library to be installed as part of the library", + "required": false + }, + "egg": { + "name": "egg", + "type": "any", + "description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above.", + "required": false, + "deprecated": true + }, + "jar": { + "name": "jar", + "type": "any", + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI.", + "required": false + }, + "maven": { + "name": "maven", + "type": "any", + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "required": false + }, + "pypi": { + "name": "pypi", + "type": "any", + "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", + "required": false + }, + "requirements": { + "name": "requirements", + "type": "any", + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`", + "required": false + }, + "whl": { + "name": "whl", + "type": "any", + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI.", + "required": false + } + } + }, + "compute.LocalFileInfo": { + "name": "LocalFileInfo", + "package": "compute", + "description": "local file info configuration.", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "local file destination, e.g. `file:/my/local/file.sh`", + "required": false + } + } + }, + "compute.LogAnalyticsInfo": { + "name": "LogAnalyticsInfo", + "package": "compute", + "description": "log analytics info configuration.", + "fields": { + "log_analytics_primary_key": { + "name": "log_analytics_primary_key", + "type": "any", + "description": "", + "required": false + }, + "log_analytics_workspace_id": { + "name": "log_analytics_workspace_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.MavenLibrary": { + "name": "MavenLibrary", + "package": "compute", + "description": "maven library configuration.", + "fields": { + "coordinates": { + "name": "coordinates", + "type": "any", + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\".", + "required": false + }, + "exclusions": { + "name": "exclusions", + "type": "any", + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "required": false + }, + "repo": { + "name": "repo", + "type": "any", + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched.", + "required": false + } + } + }, + "compute.PythonPyPiLibrary": { + "name": "PythonPyPiLibrary", + "package": "compute", + "description": "python py pi library configuration.", + "fields": { + "package": { + "name": "package", + "type": "any", + "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\".", + "required": false + }, + "repo": { + "name": "repo", + "type": "any", + "description": "The repository where the package can be found. If not specified, the default pip index is\nused.", + "required": false + } + } + }, + "compute.RCranLibrary": { + "name": "RCranLibrary", + "package": "compute", + "description": "r cran library configuration.", + "fields": { + "package": { + "name": "package", + "type": "any", + "description": "The name of the CRAN package to install.", + "required": false + }, + "repo": { + "name": "repo", + "type": "any", + "description": "The repository where the package can be found. If not specified, the default CRAN repo is used.", + "required": false + } + } + }, + "compute.RuntimeEngine": { + "name": "RuntimeEngine", + "package": "compute", + "description": "runtime engine configuration.", + "fields": {} + }, + "compute.S3StorageInfo": { + "name": "S3StorageInfo", + "package": "compute", + "description": "A storage location in Amazon S3", + "fields": { + "canned_acl": { + "name": "canned_acl", + "type": "any", + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs.", + "required": false + }, + "destination": { + "name": "destination", + "type": "any", + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs.", + "required": false + }, + "enable_encryption": { + "name": "enable_encryption", + "type": "bool", + "description": "(Optional) Flag to enable server side encryption, `false` by default.", + "required": false + }, + "encryption_type": { + "name": "encryption_type", + "type": "any", + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`.", + "required": false + }, + "endpoint": { + "name": "endpoint", + "type": "any", + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used.", + "required": false + }, + "kms_key": { + "name": "kms_key", + "type": "any", + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`.", + "required": false + }, + "region": { + "name": "region", + "type": "any", + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used.", + "required": false + } + } + }, + "compute.VolumesStorageInfo": { + "name": "VolumesStorageInfo", + "package": "compute", + "description": "A storage location back by UC Volumes.", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "UC Volumes destination, e.g. `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`\nor `dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`", + "required": false + } + } + }, + "compute.WorkloadType": { + "name": "WorkloadType", + "package": "compute", + "description": "Cluster Attributes showing for clusters workload types.", + "fields": { + "clients": { + "name": "clients", + "type": "any", + "description": "defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "required": false + } + } + }, + "compute.WorkspaceStorageInfo": { + "name": "WorkspaceStorageInfo", + "package": "compute", + "description": "A storage location in Workspace Filesystem (WSFS)", + "fields": { + "destination": { + "name": "destination", + "type": "any", + "description": "wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`", + "required": false + } + } + }, + "dashboards.LifecycleState": { + "name": "LifecycleState", + "package": "dashboards", + "description": "lifecycle state configuration.", + "fields": {} + }, + "database.CustomTag": { + "name": "CustomTag", + "package": "database", + "description": "custom tag configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "The key of the custom tag.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The value of the custom tag.", + "required": false + } + } + }, + "database.DatabaseInstanceRef": { + "name": "DatabaseInstanceRef", + "package": "database", + "description": "DatabaseInstanceRef is a reference to a database instance. It is used in the\nDatabaseInstance object to refer to the parent instance of an instance and\nto refer the child instances of an instance.\nTo specify as a parent instance during creation of an instance,\nthe lsn and branch_time fields are optional. If not specified, the child\ninstance will be created from the latest lsn of the parent.\nIf both lsn and branch_time are specified, the lsn will be used to create\nthe child instance.", + "fields": { + "branch_time": { + "name": "branch_time", + "type": "string (timestamp)", + "description": "Branch time of the ref database instance.\nFor a parent ref instance, this is the point in time on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the point in time on the instance from which the child\ninstance was created.\nInput: For specifying the point in time to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "required": false + }, + "effective_lsn": { + "name": "effective_lsn", + "type": "any", + "description": "For a parent ref instance, this is the LSN on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the LSN on the instance from which the child instance\nwas created.", + "required": false, + "output_only": true + }, + "lsn": { + "name": "lsn", + "type": "any", + "description": "User-specified WAL LSN of the ref database instance.\n\nInput: For specifying the WAL LSN to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Name of the ref database instance.", + "required": false + }, + "uid": { + "name": "uid", + "type": "any", + "description": "Id of the ref database instance.", + "required": false, + "output_only": true + } + } + }, + "database.DatabaseInstanceState": { + "name": "DatabaseInstanceState", + "package": "database", + "description": "database instance state configuration.", + "fields": {} + }, + "database.DeltaTableSyncInfo": { + "name": "DeltaTableSyncInfo", + "package": "database", + "description": "delta table sync info configuration.", + "fields": { + "delta_commit_timestamp": { + "name": "delta_commit_timestamp", + "type": "any", + "description": "The timestamp when the above Delta version was committed in the source Delta table.\nNote: This is the Delta commit time, not the time the data was written to the synced table.", + "required": false, + "output_only": true + }, + "delta_commit_version": { + "name": "delta_commit_version", + "type": "any", + "description": "The Delta Lake commit version that was last successfully synced.", + "required": false, + "output_only": true + } + } + }, + "database.NewPipelineSpec": { + "name": "NewPipelineSpec", + "package": "database", + "description": "Custom fields that user can set for pipeline while creating SyncedDatabaseTable.\nNote that other fields of pipeline are still inferred by table def internally", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy to set on the newly created pipeline.", + "required": false + }, + "storage_catalog": { + "name": "storage_catalog", + "type": "any", + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC catalog for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be a standard catalog where the user has permissions to create Delta tables.", + "required": false + }, + "storage_schema": { + "name": "storage_schema", + "type": "any", + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC schema for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be in the standard catalog where the user has permissions to create Delta tables.", + "required": false + } + } + }, + "database.ProvisioningInfoState": { + "name": "ProvisioningInfoState", + "package": "database", + "description": "provisioning info state configuration.", + "fields": {} + }, + "database.ProvisioningPhase": { + "name": "ProvisioningPhase", + "package": "database", + "description": "provisioning phase configuration.", + "fields": {} + }, + "database.SyncedTableContinuousUpdateStatus": { + "name": "SyncedTableContinuousUpdateStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "fields": { + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "any", + "description": "Progress of the initial data synchronization.", + "required": false, + "output_only": true + }, + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.", + "required": false, + "output_only": true + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", + "required": false, + "output_only": true + } + } + }, + "database.SyncedTableFailedStatus": { + "name": "SyncedTableFailedStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.\nThe last source table Delta version that was synced to the synced table.\nOnly populated if the table is still\nsynced and available for serving.", + "required": false, + "output_only": true + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. Only populated if the table is still synced and available for serving.", + "required": false, + "output_only": true + } + } + }, + "database.SyncedTablePipelineProgress": { + "name": "SyncedTablePipelineProgress", + "package": "database", + "description": "Progress information of the Synced Table data synchronization pipeline.", + "fields": { + "estimated_completion_time_seconds": { + "name": "estimated_completion_time_seconds", + "type": "int", + "description": "The estimated time remaining to complete this update in seconds.", + "required": false, + "output_only": true + }, + "latest_version_currently_processing": { + "name": "latest_version_currently_processing", + "type": "any", + "description": "The source table Delta version that was last processed by the pipeline. The pipeline may not\nhave completely processed this version yet.", + "required": false, + "output_only": true + }, + "provisioning_phase": { + "name": "provisioning_phase", + "type": "any", + "description": "The current phase of the data synchronization pipeline.", + "required": false, + "output_only": true + }, + "sync_progress_completion": { + "name": "sync_progress_completion", + "type": "any", + "description": "The completion ratio of this update. This is a number between 0 and 1.", + "required": false, + "output_only": true + }, + "synced_row_count": { + "name": "synced_row_count", + "type": "int", + "description": "The number of rows that have been synced in this update.", + "required": false, + "output_only": true + }, + "total_row_count": { + "name": "total_row_count", + "type": "int", + "description": "The total number of rows that need to be synced in this update. This number may be an estimate.", + "required": false, + "output_only": true + } + } + }, + "database.SyncedTablePosition": { + "name": "SyncedTablePosition", + "package": "database", + "description": "synced table position configuration.", + "fields": { + "delta_table_sync_info": { + "name": "delta_table_sync_info", + "type": "any", + "description": "", + "required": false, + "output_only": true + }, + "sync_end_timestamp": { + "name": "sync_end_timestamp", + "type": "any", + "description": "The end timestamp of the most recent successful synchronization.\nThis is the time when the data is available in the synced table.", + "required": false, + "output_only": true + }, + "sync_start_timestamp": { + "name": "sync_start_timestamp", + "type": "any", + "description": "The starting timestamp of the most recent successful synchronization from the source table\nto the destination (synced) table.\nNote this is the starting timestamp of the sync operation, not the end time.\nE.g., for a batch, this is the time when the sync operation started.", + "required": false, + "output_only": true + } + } + }, + "database.SyncedTableProvisioningStatus": { + "name": "SyncedTableProvisioningStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "fields": { + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "any", + "description": "Details about initial data synchronization. Only populated when in the\nPROVISIONING_INITIAL_SNAPSHOT state.", + "required": false, + "output_only": true + } + } + }, + "database.SyncedTableSchedulingPolicy": { + "name": "SyncedTableSchedulingPolicy", + "package": "database", + "description": "synced table scheduling policy configuration.", + "fields": {} + }, + "database.SyncedTableSpec": { + "name": "SyncedTableSpec", + "package": "database", + "description": "Specification of a synced database table.", + "fields": { + "create_database_objects_if_missing": { + "name": "create_database_objects_if_missing", + "type": "any", + "description": "If true, the synced table's logical database and schema resources in PG\nwill be created if they do not already exist.", + "required": false + }, + "existing_pipeline_id": { + "name": "existing_pipeline_id", + "type": "string", + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf existing_pipeline_id is defined, the synced table will be bin packed into the existing pipeline\nreferenced. This avoids creating a new pipeline and allows sharing existing compute.\nIn this case, the scheduling_policy of this synced table must match the scheduling policy of the existing pipeline.", + "required": false + }, + "new_pipeline_spec": { + "name": "new_pipeline_spec", + "type": "any", + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf new_pipeline_spec is defined, a new pipeline is created for this synced table. The location pointed to is used\nto store intermediate files (checkpoints, event logs etc). The caller must have write permissions to create Delta\ntables in the specified catalog and schema. Again, note this requires write permissions, whereas the source table\nonly requires read permissions.", + "required": false + }, + "primary_key_columns": { + "name": "primary_key_columns", + "type": "any", + "description": "Primary Key columns to be used for data insert/update in the destination.", + "required": false + }, + "scheduling_policy": { + "name": "scheduling_policy", + "type": "any", + "description": "Scheduling policy of the underlying pipeline.", + "required": false + }, + "source_table_full_name": { + "name": "source_table_full_name", + "type": "string", + "description": "Three-part (catalog, schema, table) name of the source Delta table.", + "required": false + }, + "timeseries_key": { + "name": "timeseries_key", + "type": "any", + "description": "Time series key to deduplicate (tie-break) rows with the same primary key.", + "required": false + } + } + }, + "database.SyncedTableState": { + "name": "SyncedTableState", + "package": "database", + "description": "The state of a synced table.", + "fields": {} + }, + "database.SyncedTableStatus": { + "name": "SyncedTableStatus", + "package": "database", + "description": "Status of a synced table.", + "fields": { + "continuous_update_status": { + "name": "continuous_update_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "required": false + }, + "detailed_state": { + "name": "detailed_state", + "type": "any", + "description": "The state of the synced table.", + "required": false, + "output_only": true + }, + "failed_status": { + "name": "failed_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "required": false + }, + "last_sync": { + "name": "last_sync", + "type": "any", + "description": "Summary of the last successful synchronization from source to destination.\n\nWill always be present if there has been a successful sync. Even if the most recent syncs have failed.\n\nLimitation:\nThe only exception is if the synced table is doing a FULL REFRESH, then the last sync information\nwill not be available until the full refresh is complete. This limitation will be addressed in a future version.\n\nThis top-level field is a convenience for consumers who want easy access to last sync information\nwithout having to traverse detailed_status.", + "required": false, + "output_only": true + }, + "message": { + "name": "message", + "type": "any", + "description": "A text description of the current state of the synced table.", + "required": false, + "output_only": true + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "ID of the associated pipeline. The pipeline ID may have been provided by the client\n(in the case of bin packing), or generated by the server (when creating a new pipeline).", + "required": false, + "output_only": true + }, + "provisioning_status": { + "name": "provisioning_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "required": false + }, + "triggered_update_status": { + "name": "triggered_update_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", + "required": false + } + } + }, + "database.SyncedTableTriggeredUpdateStatus": { + "name": "SyncedTableTriggeredUpdateStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.", + "required": false, + "output_only": true + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", + "required": false, + "output_only": true + }, + "triggered_update_progress": { + "name": "triggered_update_progress", + "type": "any", + "description": "Progress of the active data synchronization pipeline.", + "required": false, + "output_only": true + } + } + }, + "jobs.AuthenticationMethod": { + "name": "AuthenticationMethod", + "package": "jobs", + "description": "authentication method configuration.", + "fields": {} + }, + "jobs.CleanRoomsNotebookTask": { + "name": "CleanRoomsNotebookTask", + "package": "jobs", + "description": "Clean Rooms notebook task for V1 Clean Room service (GA).\nReplaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service.", + "fields": { + "clean_room_name": { + "name": "clean_room_name", + "type": "string", + "description": "The clean room that the notebook belongs to.", + "required": false + }, + "etag": { + "name": "etag", + "type": "any", + "description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.", + "required": false + }, + "notebook_base_parameters": { + "name": "notebook_base_parameters", + "type": "any", + "description": "Base parameters to be used for the clean room notebook job.", + "required": false + }, + "notebook_name": { + "name": "notebook_name", + "type": "string", + "description": "Name of the notebook being run.", + "required": false + } + } + }, + "jobs.ComputeConfig": { + "name": "ComputeConfig", + "package": "jobs", + "description": "compute config configuration.", + "fields": { + "gpu_node_pool_id": { + "name": "gpu_node_pool_id", + "type": "string", + "description": "IDof the GPU pool to use.", + "required": false + }, + "gpu_type": { + "name": "gpu_type", + "type": "any", + "description": "GPU type.", + "required": false + }, + "num_gpus": { + "name": "num_gpus", + "type": "any", + "description": "Number of GPUs.", + "required": false + } + } + }, + "jobs.Condition": { + "name": "Condition", + "package": "jobs", + "description": "condition configuration.", + "fields": {} + }, + "jobs.ConditionTask": { + "name": "ConditionTask", + "package": "jobs", + "description": "condition task configuration.", + "fields": { + "left": { + "name": "left", + "type": "any", + "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference.", + "required": false + }, + "op": { + "name": "op", + "type": "any", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.", + "required": false + }, + "right": { + "name": "right", + "type": "any", + "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference.", + "required": false + } + } + }, + "jobs.ConditionTaskOp": { + "name": "ConditionTaskOp", + "package": "jobs", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.", + "fields": {} + }, + "jobs.Continuous": { + "name": "Continuous", + "package": "jobs", + "description": "continuous configuration.", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "any", + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED.", + "required": false + }, + "task_retry_mode": { + "name": "task_retry_mode", + "type": "any", + "description": "Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.", + "required": false + } + } + }, + "jobs.CronSchedule": { + "name": "CronSchedule", + "package": "jobs", + "description": "cron schedule configuration.", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "any", + "description": "Indicate whether this schedule is paused or not.", + "required": false + }, + "quartz_cron_expression": { + "name": "quartz_cron_expression", + "type": "any", + "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required.", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required.", + "required": false + } + } + }, + "jobs.DashboardTask": { + "name": "DashboardTask", + "package": "jobs", + "description": "Configures the Lakeview Dashboard job task type.", + "fields": { + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "The identifier of the dashboard to refresh.", + "required": false + }, + "subscription": { + "name": "subscription", + "type": "any", + "description": "Optional: subscription configuration for sending the dashboard snapshot.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional: The warehouse id to execute the dashboard with for the schedule.\nIf not specified, the default warehouse of the dashboard will be used.", + "required": false + } + } + }, + "jobs.DbtCloudTask": { + "name": "DbtCloudTask", + "package": "jobs", + "description": "Deprecated in favor of DbtPlatformTask", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection that authenticates the dbt Cloud for this task", + "required": false + }, + "dbt_cloud_job_id": { + "name": "dbt_cloud_job_id", + "type": "string", + "description": "Id of the dbt Cloud job to be triggered", + "required": false + } + } + }, + "jobs.DbtPlatformTask": { + "name": "DbtPlatformTask", + "package": "jobs", + "description": "dbt platform task configuration.", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection that authenticates the dbt platform for this task", + "required": false + }, + "dbt_platform_job_id": { + "name": "dbt_platform_job_id", + "type": "string", + "description": "Id of the dbt platform job to be triggered. Specified as a string for maximum compatibility with clients.", + "required": false + } + } + }, + "jobs.DbtTask": { + "name": "DbtTask", + "package": "jobs", + "description": "dbt task configuration.", + "fields": { + "catalog": { + "name": "catalog", + "type": "any", + "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1.", + "required": false + }, + "commands": { + "name": "commands", + "type": "any", + "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", + "required": false + }, + "profiles_directory": { + "name": "profiles_directory", + "type": "any", + "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used.", + "required": false + }, + "project_directory": { + "name": "project_directory", + "type": "any", + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used.", + "required": false + }, + "schema": { + "name": "schema", + "type": "any", + "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument.", + "required": false + } + } + }, + "jobs.FileArrivalTriggerConfiguration": { + "name": "FileArrivalTriggerConfiguration", + "package": "jobs", + "description": "file arrival trigger configuration configuration.", + "fields": { + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds", + "required": false + }, + "url": { + "name": "url", + "type": "any", + "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.", + "required": false + } + } + }, + "jobs.ForEachTask": { + "name": "ForEachTask", + "package": "jobs", + "description": "for each task configuration.", + "fields": { + "concurrency": { + "name": "concurrency", + "type": "any", + "description": "An optional maximum allowed number of concurrent runs of the task.\nSet this value if you want to be able to execute multiple runs of the task concurrently.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "any", + "description": "Array for task to iterate on. This can be a JSON string or a reference to\nan array parameter.", + "required": false + }, + "task": { + "name": "task", + "type": "any", + "description": "Configuration for the task that will be run for each element in the array", + "required": false + } + } + }, + "jobs.Format": { + "name": "Format", + "package": "jobs", + "description": "format configuration.", + "fields": {} + }, + "jobs.GenAiComputeTask": { + "name": "GenAiComputeTask", + "package": "jobs", + "description": "gen ai compute task configuration.", + "fields": { + "command": { + "name": "command", + "type": "any", + "description": "Command launcher to run the actual script, e.g. bash, python etc.", + "required": false + }, + "compute": { + "name": "compute", + "type": "any", + "description": "", + "required": false + }, + "dl_runtime_image": { + "name": "dl_runtime_image", + "type": "any", + "description": "Runtime image", + "required": false + }, + "mlflow_experiment_name": { + "name": "mlflow_experiment_name", + "type": "string", + "description": "Optional string containing the name of the MLflow experiment to log the run to. If name is not\nfound, backend will create the mlflow experiment using the name.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Script is located in Databricks workspace.\n* `GIT`: Script is located in cloud Git provider.", + "required": false + }, + "training_script_path": { + "name": "training_script_path", + "type": "string", + "description": "The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.", + "required": false + }, + "yaml_parameters": { + "name": "yaml_parameters", + "type": "any", + "description": "Optional string containing model parameters passed to the training script in yaml format.\nIf present, then the content in yaml_parameters_file_path will be ignored.", + "required": false + }, + "yaml_parameters_file_path": { + "name": "yaml_parameters_file_path", + "type": "string", + "description": "Optional path to a YAML file containing model parameters passed to the training script.", + "required": false + } + } + }, + "jobs.GitProvider": { + "name": "GitProvider", + "package": "jobs", + "description": "git provider configuration.", + "fields": {} + }, + "jobs.GitSnapshot": { + "name": "GitSnapshot", + "package": "jobs", + "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "fields": { + "used_commit": { + "name": "used_commit", + "type": "any", + "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to.", + "required": false + } + } + }, + "jobs.GitSource": { + "name": "GitSource", + "package": "jobs", + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", + "fields": { + "git_branch": { + "name": "git_branch", + "type": "any", + "description": "Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit.", + "required": false + }, + "git_commit": { + "name": "git_commit", + "type": "any", + "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "any", + "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive.", + "required": false + }, + "git_snapshot": { + "name": "git_snapshot", + "type": "any", + "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "required": false + }, + "git_tag": { + "name": "git_tag", + "type": "any", + "description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit.", + "required": false + }, + "git_url": { + "name": "git_url", + "type": "string", + "description": "URL of the repository to be cloned by this job.", + "required": false + }, + "job_source": { + "name": "job_source", + "type": "any", + "description": "The source of the job specification in the remote repository when the job is source controlled.", + "required": false, + "deprecated": true + } + } + }, + "jobs.JobCluster": { + "name": "JobCluster", + "package": "jobs", + "description": "job cluster configuration.", + "fields": { + "job_cluster_key": { + "name": "job_cluster_key", + "type": "any", + "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "any", + "description": "If new_cluster, a description of a cluster that is created for each task.", + "required": false + } + } + }, + "jobs.JobDeployment": { + "name": "JobDeployment", + "package": "jobs", + "description": "job deployment configuration.", + "fields": { + "kind": { + "name": "kind", + "type": "any", + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "required": false + }, + "metadata_file_path": { + "name": "metadata_file_path", + "type": "string", + "description": "Path of the file that contains deployment metadata.", + "required": false + } + } + }, + "jobs.JobDeploymentKind": { + "name": "JobDeploymentKind", + "package": "jobs", + "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "fields": {} + }, + "jobs.JobEditMode": { + "name": "JobEditMode", + "package": "jobs", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", + "fields": {} + }, + "jobs.JobEmailNotifications": { + "name": "JobEmailNotifications", + "package": "jobs", + "description": "job email notifications configuration.", + "fields": { + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "any", + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.", + "required": false, + "deprecated": true + }, + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "any", + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "any", + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "any", + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "any", + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "any", + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "required": false + } + } + }, + "jobs.JobEnvironment": { + "name": "JobEnvironment", + "package": "jobs", + "description": "job environment configuration.", + "fields": { + "environment_key": { + "name": "environment_key", + "type": "any", + "description": "The key of an environment. It has to be unique within a job.", + "required": false + }, + "spec": { + "name": "spec", + "type": "any", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "required": false + } + } + }, + "jobs.JobNotificationSettings": { + "name": "JobNotificationSettings", + "package": "jobs", + "description": "Configuration settings for job notification.", + "fields": { + "no_alert_for_canceled_runs": { + "name": "no_alert_for_canceled_runs", + "type": "any", + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled.", + "required": false + }, + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "any", + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped.", + "required": false + } + } + }, + "jobs.JobParameterDefinition": { + "name": "JobParameterDefinition", + "package": "jobs", + "description": "job parameter definition configuration.", + "fields": { + "default": { + "name": "default", + "type": "any", + "description": "Default value of the parameter.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.`", + "required": false + } + } + }, + "jobs.JobRunAs": { + "name": "JobRunAs", + "package": "jobs", + "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "Group name of an account group assigned to the workspace. Setting this field requires being a member of the group.", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Non-admin users can only set this field to their own email.", + "required": false + } + } + }, + "jobs.JobSource": { + "name": "JobSource", + "package": "jobs", + "description": "The source of the job specification in the remote repository when the job is source controlled.", + "fields": { + "dirty_state": { + "name": "dirty_state", + "type": "any", + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.", + "required": false + }, + "import_from_git_branch": { + "name": "import_from_git_branch", + "type": "any", + "description": "Name of the branch which the job is imported from.", + "required": false + }, + "job_config_path": { + "name": "job_config_path", + "type": "string", + "description": "Path of the job YAML file that contains the job specification.", + "required": false + } + } + }, + "jobs.JobSourceDirtyState": { + "name": "JobSourceDirtyState", + "package": "jobs", + "description": "Dirty state indicates the job is not fully synced with the job specification\nin the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.", + "fields": {} + }, + "jobs.JobsHealthMetric": { + "name": "JobsHealthMetric", + "package": "jobs", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", + "fields": {} + }, + "jobs.JobsHealthOperator": { + "name": "JobsHealthOperator", + "package": "jobs", + "description": "Specifies the operator used to compare the health metric value with the specified threshold.", + "fields": {} + }, + "jobs.JobsHealthRule": { + "name": "JobsHealthRule", + "package": "jobs", + "description": "jobs health rule configuration.", + "fields": { + "metric": { + "name": "metric", + "type": "any", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", + "required": false + }, + "op": { + "name": "op", + "type": "any", + "description": "Specifies the operator used to compare the health metric value with the specified threshold.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule.", + "required": false + } + } + }, + "jobs.JobsHealthRules": { + "name": "JobsHealthRules", + "package": "jobs", + "description": "An optional set of health rules that can be defined for this job.", + "fields": { + "rules": { + "name": "rules", + "type": "any", + "description": "", + "required": false + } + } + }, + "jobs.ModelTriggerConfiguration": { + "name": "ModelTriggerConfiguration", + "package": "jobs", + "description": "model trigger configuration configuration.", + "fields": { + "aliases": { + "name": "aliases", + "type": "any", + "description": "Aliases of the model versions to monitor. Can only be used in conjunction with condition MODEL_ALIAS_SET.", + "required": false + }, + "condition": { + "name": "condition", + "type": "any", + "description": "The condition based on which to trigger a job run.", + "required": false + }, + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.", + "required": false + }, + "securable_name": { + "name": "securable_name", + "type": "string", + "description": "Name of the securable to monitor (\"mycatalog.myschema.mymodel\" in the case of model-level triggers,\n\"mycatalog.myschema\" in the case of schema-level triggers) or empty in the case of metastore-level triggers.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no model updates have occurred for the specified time\nand can be used to wait for a series of model updates before triggering a run. The\nminimum allowed value is 60 seconds.", + "required": false + } + } + }, + "jobs.ModelTriggerConfigurationCondition": { + "name": "ModelTriggerConfigurationCondition", + "package": "jobs", + "description": "model trigger configuration condition configuration.", + "fields": {} + }, + "jobs.NotebookTask": { + "name": "NotebookTask", + "package": "jobs", + "description": "notebook task configuration.", + "fields": { + "base_parameters": { + "name": "base_parameters", + "type": "any", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", + "required": false + }, + "notebook_path": { + "name": "notebook_path", + "type": "string", + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail.", + "required": false + } + } + }, + "jobs.PauseStatus": { + "name": "PauseStatus", + "package": "jobs", + "description": "pause status configuration.", + "fields": {} + }, + "jobs.PerformanceTarget": { + "name": "PerformanceTarget", + "package": "jobs", + "description": "PerformanceTarget defines how performant (lower latency) or cost efficient the execution of run on serverless compute should be.\nThe performance mode on the job or pipeline should map to a performance setting that is passed to Cluster Manager\n(see cluster-common PerformanceTarget).", + "fields": {} + }, + "jobs.PeriodicTriggerConfiguration": { + "name": "PeriodicTriggerConfiguration", + "package": "jobs", + "description": "periodic trigger configuration configuration.", + "fields": { + "interval": { + "name": "interval", + "type": "any", + "description": "The interval at which the trigger should run.", + "required": false + }, + "unit": { + "name": "unit", + "type": "any", + "description": "The unit of time for the interval.", + "required": false + } + } + }, + "jobs.PeriodicTriggerConfigurationTimeUnit": { + "name": "PeriodicTriggerConfigurationTimeUnit", + "package": "jobs", + "description": "periodic trigger configuration time unit configuration.", + "fields": {} + }, + "jobs.PipelineParams": { + "name": "PipelineParams", + "package": "jobs", + "description": "pipeline params configuration.", + "fields": { + "full_refresh": { + "name": "full_refresh", + "type": "any", + "description": "If true, triggers a full refresh on the delta live table.", + "required": false + } + } + }, + "jobs.PipelineTask": { + "name": "PipelineTask", + "package": "jobs", + "description": "pipeline task configuration.", + "fields": { + "full_refresh": { + "name": "full_refresh", + "type": "any", + "description": "If true, triggers a full refresh on the delta live table.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The full name of the pipeline task to execute.", + "required": false + } + } + }, + "jobs.PowerBiModel": { + "name": "PowerBiModel", + "package": "jobs", + "description": "power bi model configuration.", + "fields": { + "authentication_method": { + "name": "authentication_method", + "type": "any", + "description": "How the published Power BI model authenticates to Databricks", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the Power BI model", + "required": false + }, + "overwrite_existing": { + "name": "overwrite_existing", + "type": "any", + "description": "Whether to overwrite existing Power BI models", + "required": false + }, + "storage_mode": { + "name": "storage_mode", + "type": "any", + "description": "The default storage mode of the Power BI model", + "required": false + }, + "workspace_name": { + "name": "workspace_name", + "type": "string", + "description": "The name of the Power BI workspace of the model", + "required": false + } + } + }, + "jobs.PowerBiTable": { + "name": "PowerBiTable", + "package": "jobs", + "description": "power bi table configuration.", + "fields": { + "catalog": { + "name": "catalog", + "type": "any", + "description": "The catalog name in Databricks", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The table name in Databricks", + "required": false + }, + "schema": { + "name": "schema", + "type": "any", + "description": "The schema name in Databricks", + "required": false + }, + "storage_mode": { + "name": "storage_mode", + "type": "any", + "description": "The Power BI storage mode of the table", + "required": false + } + } + }, + "jobs.PowerBiTask": { + "name": "PowerBiTask", + "package": "jobs", + "description": "power bi task configuration.", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection to authenticate from Databricks to Power BI", + "required": false + }, + "power_bi_model": { + "name": "power_bi_model", + "type": "any", + "description": "The semantic model to update", + "required": false + }, + "refresh_after_update": { + "name": "refresh_after_update", + "type": "any", + "description": "Whether the model should be refreshed after the update", + "required": false + }, + "tables": { + "name": "tables", + "type": "any", + "description": "The tables to be exported to Power BI", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The SQL warehouse ID to use as the Power BI data source", + "required": false + } + } + }, + "jobs.PythonWheelTask": { + "name": "PythonWheelTask", + "package": "jobs", + "description": "python wheel task configuration.", + "fields": { + "entry_point": { + "name": "entry_point", + "type": "any", + "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`", + "required": false + }, + "named_parameters": { + "name": "named_parameters", + "type": "any", + "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", + "required": false + }, + "package_name": { + "name": "package_name", + "type": "string", + "description": "Name of the package to execute", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "any", + "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", + "required": false + } + } + }, + "jobs.QueueSettings": { + "name": "QueueSettings", + "package": "jobs", + "description": "Configuration settings for queue.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "If true, enable queueing for the job. This is a required field.", + "required": false + } + } + }, + "jobs.RunIf": { + "name": "RunIf", + "package": "jobs", + "description": "An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n\nPossible values are:\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed", + "fields": {} + }, + "jobs.RunJobTask": { + "name": "RunJobTask", + "package": "jobs", + "description": "run job task configuration.", + "fields": { + "dbt_commands": { + "name": "dbt_commands", + "type": "any", + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "required": false, + "deprecated": true + }, + "jar_params": { + "name": "jar_params", + "type": "any", + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "required": false, + "deprecated": true + }, + "job_id": { + "name": "job_id", + "type": "string", + "description": "ID of the job to trigger.", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "any", + "description": "Job-level parameters used to trigger the job.", + "required": false + }, + "notebook_params": { + "name": "notebook_params", + "type": "any", + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "required": false, + "deprecated": true + }, + "pipeline_params": { + "name": "pipeline_params", + "type": "any", + "description": "Controls whether the pipeline should perform a full refresh", + "required": false + }, + "python_named_params": { + "name": "python_named_params", + "type": "any", + "description": "", + "required": false, + "deprecated": true + }, + "python_params": { + "name": "python_params", + "type": "any", + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "required": false, + "deprecated": true + }, + "spark_submit_params": { + "name": "spark_submit_params", + "type": "any", + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "required": false, + "deprecated": true + }, + "sql_params": { + "name": "sql_params", + "type": "any", + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "required": false, + "deprecated": true + } + } + }, + "jobs.Source": { + "name": "Source", + "package": "jobs", + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\\\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider.", + "fields": {} + }, + "jobs.SparkJarTask": { + "name": "SparkJarTask", + "package": "jobs", + "description": "spark jar task configuration.", + "fields": { + "jar_uri": { + "name": "jar_uri", + "type": "any", + "description": "Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list.\n\nSee the examples of classic and serverless compute usage at the top of the page.", + "required": false, + "deprecated": true + }, + "main_class_name": { + "name": "main_class_name", + "type": "string", + "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "any", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "required": false + }, + "run_as_repl": { + "name": "run_as_repl", + "type": "any", + "description": "Deprecated. A value of `false` is no longer supported.", + "required": false, + "deprecated": true + } + } + }, + "jobs.SparkPythonTask": { + "name": "SparkPythonTask", + "package": "jobs", + "description": "spark python task configuration.", + "fields": { + "parameters": { + "name": "parameters", + "type": "any", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "required": false + }, + "python_file": { + "name": "python_file", + "type": "any", + "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.", + "required": false + } + } + }, + "jobs.SparkSubmitTask": { + "name": "SparkSubmitTask", + "package": "jobs", + "description": "spark submit task configuration.", + "fields": { + "parameters": { + "name": "parameters", + "type": "any", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "required": false + } + } + }, + "jobs.SqlTask": { + "name": "SqlTask", + "package": "jobs", + "description": "sql task configuration.", + "fields": { + "alert": { + "name": "alert", + "type": "any", + "description": "If alert, indicates that this job must refresh a SQL alert.", + "required": false + }, + "dashboard": { + "name": "dashboard", + "type": "any", + "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", + "required": false + }, + "file": { + "name": "file", + "type": "any", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "any", + "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", + "required": false + }, + "query": { + "name": "query", + "type": "any", + "description": "If query, indicates that this job must execute a SQL query.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs.", + "required": false + } + } + }, + "jobs.SqlTaskAlert": { + "name": "SqlTaskAlert", + "package": "jobs", + "description": "sql task alert configuration.", + "fields": { + "alert_id": { + "name": "alert_id", + "type": "string", + "description": "The canonical identifier of the SQL alert.", + "required": false + }, + "pause_subscriptions": { + "name": "pause_subscriptions", + "type": "any", + "description": "If true, the alert notifications are not sent to subscribers.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "any", + "description": "If specified, alert notifications are sent to subscribers.", + "required": false + } + } + }, + "jobs.SqlTaskDashboard": { + "name": "SqlTaskDashboard", + "package": "jobs", + "description": "sql task dashboard configuration.", + "fields": { + "custom_subject": { + "name": "custom_subject", + "type": "any", + "description": "Subject of the email sent to subscribers of this task.", + "required": false + }, + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "The canonical identifier of the SQL dashboard.", + "required": false + }, + "pause_subscriptions": { + "name": "pause_subscriptions", + "type": "any", + "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "any", + "description": "If specified, dashboard snapshots are sent to subscriptions.", + "required": false + } + } + }, + "jobs.SqlTaskFile": { + "name": "SqlTaskFile", + "package": "jobs", + "description": "sql task file configuration.", + "fields": { + "path": { + "name": "path", + "type": "any", + "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider.", + "required": false + } + } + }, + "jobs.SqlTaskQuery": { + "name": "SqlTaskQuery", + "package": "jobs", + "description": "sql task query configuration.", + "fields": { + "query_id": { + "name": "query_id", + "type": "string", + "description": "The canonical identifier of the SQL query.", + "required": false + } + } + }, + "jobs.SqlTaskSubscription": { + "name": "SqlTaskSubscription", + "package": "jobs", + "description": "sql task subscription configuration.", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications.", + "required": false + } + } + }, + "jobs.StorageMode": { + "name": "StorageMode", + "package": "jobs", + "description": "storage mode configuration.", + "fields": {} + }, + "jobs.Subscription": { + "name": "Subscription", + "package": "jobs", + "description": "subscription configuration.", + "fields": { + "custom_subject": { + "name": "custom_subject", + "type": "any", + "description": "Optional: Allows users to specify a custom subject line on the email sent\nto subscribers.", + "required": false + }, + "paused": { + "name": "paused", + "type": "any", + "description": "When true, the subscription will not send emails.", + "required": false + }, + "subscribers": { + "name": "subscribers", + "type": "any", + "description": "The list of subscribers to send the snapshot of the dashboard to.", + "required": false + } + } + }, + "jobs.SubscriptionSubscriber": { + "name": "SubscriptionSubscriber", + "package": "jobs", + "description": "subscription subscriber configuration.", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "A snapshot of the dashboard will be sent to the destination when the `destination_id` field is present.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "A snapshot of the dashboard will be sent to the user's email when the `user_name` field is present.", + "required": false + } + } + }, + "jobs.TableUpdateTriggerConfiguration": { + "name": "TableUpdateTriggerConfiguration", + "package": "jobs", + "description": "table update trigger configuration configuration.", + "fields": { + "condition": { + "name": "condition", + "type": "any", + "description": "The table(s) condition based on which to trigger a job run.", + "required": false + }, + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.", + "required": false + }, + "table_names": { + "name": "table_names", + "type": "any", + "description": "A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.", + "required": false + } + } + }, + "jobs.Task": { + "name": "Task", + "package": "jobs", + "description": "task configuration.", + "fields": { + "clean_rooms_notebook_task": { + "name": "clean_rooms_notebook_task", + "type": "any", + "description": "The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "required": false + }, + "condition_task": { + "name": "condition_task", + "type": "any", + "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", + "required": false + }, + "dashboard_task": { + "name": "dashboard_task", + "type": "any", + "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", + "required": false + }, + "dbt_cloud_task": { + "name": "dbt_cloud_task", + "type": "any", + "description": "Task type for dbt cloud, deprecated in favor of the new name dbt_platform_task", + "required": false, + "deprecated": true + }, + "dbt_platform_task": { + "name": "dbt_platform_task", + "type": "any", + "description": "", + "required": false + }, + "dbt_task": { + "name": "dbt_task", + "type": "any", + "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "required": false + }, + "depends_on": { + "name": "depends_on", + "type": "any", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for this task.", + "required": false + }, + "disable_auto_optimization": { + "name": "disable_auto_optimization", + "type": "any", + "description": "An option to disable auto optimization in serverless", + "required": false + }, + "disabled": { + "name": "disabled", + "type": "any", + "description": "An optional flag to disable the task. If set to true, the task will not run even if it is part of a job.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "any", + "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", + "required": false + }, + "environment_key": { + "name": "environment_key", + "type": "any", + "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute.", + "required": false + }, + "existing_cluster_id": { + "name": "existing_cluster_id", + "type": "string", + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability", + "required": false + }, + "for_each_task": { + "name": "for_each_task", + "type": "any", + "description": "The task executes a nested task for every input provided when the `for_each_task` field is present.", + "required": false + }, + "gen_ai_compute_task": { + "name": "gen_ai_compute_task", + "type": "any", + "description": "", + "required": false + }, + "health": { + "name": "health", + "type": "any", + "description": "An optional set of health rules that can be defined for this job.", + "required": false + }, + "job_cluster_key": { + "name": "job_cluster_key", + "type": "any", + "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "any", + "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", + "required": false + }, + "max_retries": { + "name": "max_retries", + "type": "any", + "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry.", + "required": false + }, + "min_retry_interval_millis": { + "name": "min_retry_interval_millis", + "type": "any", + "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "any", + "description": "If new_cluster, a description of a new cluster that is created for each run.", + "required": false + }, + "notebook_task": { + "name": "notebook_task", + "type": "any", + "description": "The task runs a notebook when the `notebook_task` field is present.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "any", + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task.", + "required": false + }, + "pipeline_task": { + "name": "pipeline_task", + "type": "any", + "description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.", + "required": false + }, + "power_bi_task": { + "name": "power_bi_task", + "type": "any", + "description": "The task triggers a Power BI semantic model update when the `power_bi_task` field is present.", + "required": false + }, + "python_wheel_task": { + "name": "python_wheel_task", + "type": "any", + "description": "The task runs a Python wheel when the `python_wheel_task` field is present.", + "required": false + }, + "retry_on_timeout": { + "name": "retry_on_timeout", + "type": "any", + "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout.", + "required": false + }, + "run_if": { + "name": "run_if", + "type": "any", + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed", + "required": false + }, + "run_job_task": { + "name": "run_job_task", + "type": "any", + "description": "The task triggers another job when the `run_job_task` field is present.", + "required": false + }, + "spark_jar_task": { + "name": "spark_jar_task", + "type": "any", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", + "required": false + }, + "spark_python_task": { + "name": "spark_python_task", + "type": "any", + "description": "The task runs a Python file when the `spark_python_task` field is present.", + "required": false + }, + "spark_submit_task": { + "name": "spark_submit_task", + "type": "any", + "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).", + "required": false, + "deprecated": true + }, + "sql_task": { + "name": "sql_task", + "type": "any", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "any", + "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "any", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "required": false + } + } + }, + "jobs.TaskDependency": { + "name": "TaskDependency", + "package": "jobs", + "description": "task dependency configuration.", + "fields": { + "outcome": { + "name": "outcome", + "type": "any", + "description": "Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run.", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "any", + "description": "The name of the task this task depends on.", + "required": false + } + } + }, + "jobs.TaskEmailNotifications": { + "name": "TaskEmailNotifications", + "package": "jobs", + "description": "task email notifications configuration.", + "fields": { + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "any", + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.", + "required": false, + "deprecated": true + }, + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "any", + "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "any", + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "any", + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "any", + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "any", + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "required": false + } + } + }, + "jobs.TaskNotificationSettings": { + "name": "TaskNotificationSettings", + "package": "jobs", + "description": "Configuration settings for task notification.", + "fields": { + "alert_on_last_attempt": { + "name": "alert_on_last_attempt", + "type": "any", + "description": "If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run.", + "required": false + }, + "no_alert_for_canceled_runs": { + "name": "no_alert_for_canceled_runs", + "type": "any", + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled.", + "required": false + }, + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "any", + "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped.", + "required": false + } + } + }, + "jobs.TaskRetryMode": { + "name": "TaskRetryMode", + "package": "jobs", + "description": "task retry mode of the continuous job\n* NEVER: The failed task will not be retried.\n* ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt.\nWhen this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started.", + "fields": {} + }, + "jobs.TriggerSettings": { + "name": "TriggerSettings", + "package": "jobs", + "description": "Configuration settings for trigger.", + "fields": { + "file_arrival": { + "name": "file_arrival", + "type": "any", + "description": "File arrival trigger settings.", + "required": false + }, + "model": { + "name": "model", + "type": "any", + "description": "", + "required": false + }, + "pause_status": { + "name": "pause_status", + "type": "any", + "description": "Whether this trigger is paused or not.", + "required": false + }, + "periodic": { + "name": "periodic", + "type": "any", + "description": "Periodic trigger settings.", + "required": false + }, + "table_update": { + "name": "table_update", + "type": "any", + "description": "", + "required": false + } + } + }, + "jobs.Webhook": { + "name": "Webhook", + "package": "jobs", + "description": "webhook configuration.", + "fields": { + "id": { + "name": "id", + "type": "any", + "description": "", + "required": false + } + } + }, + "jobs.WebhookNotifications": { + "name": "WebhookNotifications", + "package": "jobs", + "description": "webhook notifications configuration.", + "fields": { + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "any", + "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "any", + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "any", + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "any", + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "any", + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "required": false + } + } + }, + "ml.ExperimentTag": { + "name": "ExperimentTag", + "package": "ml", + "description": "A tag for an experiment.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The tag value.", + "required": false + } + } + }, + "ml.ModelTag": { + "name": "ModelTag", + "package": "ml", + "description": "Tag for a registered model", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The tag value.", + "required": false + } + } + }, + "pipelines.ConnectionParameters": { + "name": "ConnectionParameters", + "package": "pipelines", + "description": "connection parameters configuration.", + "fields": { + "source_catalog": { + "name": "source_catalog", + "type": "any", + "description": "Source catalog for initial connection.\nThis is necessary for schema exploration in some database systems like Oracle, and optional but nice-to-have\nin some other database systems like Postgres.\nFor Oracle databases, this maps to a service name.", + "required": false + } + } + }, + "pipelines.CronTrigger": { + "name": "CronTrigger", + "package": "pipelines", + "description": "cron trigger configuration.", + "fields": { + "quartz_cron_schedule": { + "name": "quartz_cron_schedule", + "type": "any", + "description": "", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "pipelines.DayOfWeek": { + "name": "DayOfWeek", + "package": "pipelines", + "description": "Days of week in which the window is allowed to happen.\nIf not specified all days of the week will be used.", + "fields": {} + }, + "pipelines.DeploymentKind": { + "name": "DeploymentKind", + "package": "pipelines", + "description": "The deployment method that manages the pipeline:\n- BUNDLE: The pipeline is managed by a Databricks Asset Bundle.", + "fields": {} + }, + "pipelines.EventLogSpec": { + "name": "EventLogSpec", + "package": "pipelines", + "description": "Configurable event log parameters.", + "fields": { + "catalog": { + "name": "catalog", + "type": "any", + "description": "The UC catalog the event log is published under.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name the event log is published to in UC.", + "required": false + }, + "schema": { + "name": "schema", + "type": "any", + "description": "The UC schema the event log is published under.", + "required": false + } + } + }, + "pipelines.FileLibrary": { + "name": "FileLibrary", + "package": "pipelines", + "description": "file library configuration.", + "fields": { + "path": { + "name": "path", + "type": "any", + "description": "The absolute path of the source code.", + "required": false + } + } + }, + "pipelines.Filters": { + "name": "Filters", + "package": "pipelines", + "description": "filters configuration.", + "fields": { + "exclude": { + "name": "exclude", + "type": "any", + "description": "Paths to exclude.", + "required": false + }, + "include": { + "name": "include", + "type": "any", + "description": "Paths to include.", + "required": false + } + } + }, + "pipelines.IngestionConfig": { + "name": "IngestionConfig", + "package": "pipelines", + "description": "ingestion config configuration.", + "fields": { + "report": { + "name": "report", + "type": "any", + "description": "Select a specific source report.", + "required": false + }, + "schema": { + "name": "schema", + "type": "any", + "description": "Select all tables from a specific source schema.", + "required": false + }, + "table": { + "name": "table", + "type": "any", + "description": "Select a specific source table.", + "required": false + } + } + }, + "pipelines.IngestionGatewayPipelineDefinition": { + "name": "IngestionGatewayPipelineDefinition", + "package": "pipelines", + "description": "ingestion gateway pipeline definition configuration.", + "fields": { + "connection_id": { + "name": "connection_id", + "type": "string", + "description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", + "required": false, + "deprecated": true + }, + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", + "required": false + }, + "connection_parameters": { + "name": "connection_parameters", + "type": "any", + "description": "Optional, Internal. Parameters required to establish an initial connection with the source.", + "required": false + }, + "gateway_storage_catalog": { + "name": "gateway_storage_catalog", + "type": "any", + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location.", + "required": false + }, + "gateway_storage_name": { + "name": "gateway_storage_name", + "type": "string", + "description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nSpark Declarative Pipelines system will automatically create the storage location under the catalog and schema.", + "required": false + }, + "gateway_storage_schema": { + "name": "gateway_storage_schema", + "type": "any", + "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinition": { + "name": "IngestionPipelineDefinition", + "package": "pipelines", + "description": "ingestion pipeline definition configuration.", + "fields": { + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.", + "required": false + }, + "ingest_from_uc_foreign_catalog": { + "name": "ingest_from_uc_foreign_catalog", + "type": "any", + "description": "Immutable. If set to true, the pipeline will ingest tables from the\nUC foreign catalogs directly without the need to specify a UC connection or ingestion gateway.\nThe `source_catalog` fields in objects of IngestionConfig are interpreted as\nthe UC foreign catalogs to ingest from.", + "required": false + }, + "ingestion_gateway_id": { + "name": "ingestion_gateway_id", + "type": "string", + "description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.", + "required": false + }, + "netsuite_jar_path": { + "name": "netsuite_jar_path", + "type": "string", + "description": "Netsuite only configuration. When the field is set for a netsuite connector,\nthe jar stored in the field will be validated and added to the classpath of\npipeline's cluster.", + "required": false + }, + "objects": { + "name": "objects", + "type": "any", + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "required": false + }, + "source_configurations": { + "name": "source_configurations", + "type": "any", + "description": "Top-level source configurations", + "required": false + }, + "source_type": { + "name": "source_type", + "type": "any", + "description": "The type of the foreign source.\nThe source type will be inferred from the source connection or ingestion gateway.\nThis field is output only and will be ignored if provided.", + "required": false, + "output_only": true + }, + "table_configuration": { + "name": "table_configuration", + "type": "any", + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig": { + "name": "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", + "package": "pipelines", + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "fields": { + "cursor_columns": { + "name": "cursor_columns", + "type": "any", + "description": "The names of the monotonically increasing columns in the source table that are used to enable\nthe table to be read and ingested incrementally through structured streaming.\nThe columns are allowed to have repeated values but have to be non-decreasing.\nIf the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these\ncolumns will implicitly define the `sequence_by` behavior. You can still explicitly set\n`sequence_by` to override this default.", + "required": false + }, + "deletion_condition": { + "name": "deletion_condition", + "type": "any", + "description": "Specifies a SQL WHERE condition that specifies that the source row has been deleted.\nThis is sometimes referred to as \"soft-deletes\".\nFor example: \"Operation = 'DELETE'\" or \"is_deleted = true\".\nThis field is orthogonal to `hard_deletion_sync_interval_in_seconds`,\none for soft-deletes and the other for hard-deletes.\nSee also the hard_deletion_sync_min_interval_in_seconds field for\nhandling of \"hard deletes\" where the source rows are physically removed from the table.", + "required": false + }, + "hard_deletion_sync_min_interval_in_seconds": { + "name": "hard_deletion_sync_min_interval_in_seconds", + "type": "int", + "description": "Specifies the minimum interval (in seconds) between snapshots on primary keys\nfor detecting and synchronizing hard deletions—i.e., rows that have been\nphysically removed from the source table.\nThis interval acts as a lower bound. If ingestion runs less frequently than\nthis value, hard deletion synchronization will align with the actual ingestion\nfrequency instead of happening more often.\nIf not set, hard deletion synchronization via snapshots is disabled.\nThis field is mutable and can be updated without triggering a full snapshot.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinitionWorkdayReportParameters": { + "name": "IngestionPipelineDefinitionWorkdayReportParameters", + "package": "pipelines", + "description": "ingestion pipeline definition workday report parameters configuration.", + "fields": { + "incremental": { + "name": "incremental", + "type": "any", + "description": "(Optional) Marks the report as incremental.\nThis field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now\ncontrolled by the `parameters` field.", + "required": false, + "deprecated": true + }, + "parameters": { + "name": "parameters", + "type": "any", + "description": "Parameters for the Workday report. Each key represents the parameter name (e.g., \"start_date\", \"end_date\"),\nand the corresponding value is a SQL-like expression used to compute the parameter value at runtime.\nExample:\n{\n\"start_date\": \"{ coalesce(current_offset(), date(\\\"2025-02-01\\\")) }\",\n\"end_date\": \"{ current_date() - INTERVAL 1 DAY }\"\n}", + "required": false + }, + "report_parameters": { + "name": "report_parameters", + "type": "any", + "description": "(Optional) Additional custom parameters for Workday Report\nThis field is deprecated and should not be used. Use `parameters` instead.", + "required": false, + "deprecated": true + } + } + }, + "pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue": { + "name": "IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", + "package": "pipelines", + "description": "ingestion pipeline definition workday report parameters query key value configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "Key for the report parameter, can be a column name or other metadata", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "Value for the report parameter.\nPossible values it can take are these sql functions:\n1. coalesce(current_offset(), date(\"YYYY-MM-DD\")) -\u003e if current_offset() is null, then the passed date, else current_offset()\n2. current_date()\n3. date_sub(current_date(), x) -\u003e subtract x (some non-negative integer) days from current date", + "required": false + } + } + }, + "pipelines.IngestionSourceType": { + "name": "IngestionSourceType", + "package": "pipelines", + "description": "ingestion source type configuration.", + "fields": {} + }, + "pipelines.ManualTrigger": { + "name": "ManualTrigger", + "package": "pipelines", + "description": "manual trigger configuration.", + "fields": {} + }, + "pipelines.NotebookLibrary": { + "name": "NotebookLibrary", + "package": "pipelines", + "description": "notebook library configuration.", + "fields": { + "path": { + "name": "path", + "type": "any", + "description": "The absolute path of the source code.", + "required": false + } + } + }, + "pipelines.Notifications": { + "name": "Notifications", + "package": "pipelines", + "description": "notifications configuration.", + "fields": { + "alerts": { + "name": "alerts", + "type": "any", + "description": "A list of alerts that trigger the sending of notifications to the configured\ndestinations. The supported alerts are:\n\n* `on-update-success`: A pipeline update completes successfully.\n* `on-update-failure`: Each time a pipeline update fails.\n* `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error.\n* `on-flow-failure`: A single data flow fails.", + "required": false + }, + "email_recipients": { + "name": "email_recipients", + "type": "any", + "description": "A list of email addresses notified when a configured alert is triggered.", + "required": false + } + } + }, + "pipelines.PathPattern": { + "name": "PathPattern", + "package": "pipelines", + "description": "path pattern configuration.", + "fields": { + "include": { + "name": "include", + "type": "any", + "description": "The source code to include for pipelines", + "required": false + } + } + }, + "pipelines.PipelineCluster": { + "name": "PipelineCluster", + "package": "pipelines", + "description": "pipeline cluster configuration.", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "any", + "description": "Note: This field won't be persisted. Only API users will check this field.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "any", + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "any", + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "any", + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "any", + "description": "The configuration for delivering spark logs to a long-term storage destination.\nOnly dbfs destinations are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable local disk encryption for the cluster.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "any", + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "any", + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "label": { + "name": "label", + "type": "any", + "description": "A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`.", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "any", + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "any", + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "any", + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "any", + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "required": false + } + } + }, + "pipelines.PipelineClusterAutoscale": { + "name": "PipelineClusterAutoscale", + "package": "pipelines", + "description": "pipeline cluster autoscale configuration.", + "fields": { + "max_workers": { + "name": "max_workers", + "type": "any", + "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`.", + "required": false + }, + "min_workers": { + "name": "min_workers", + "type": "any", + "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation.", + "required": false + }, + "mode": { + "name": "mode", + "type": "any", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.", + "required": false + } + } + }, + "pipelines.PipelineClusterAutoscaleMode": { + "name": "PipelineClusterAutoscaleMode", + "package": "pipelines", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.", + "fields": {} + }, + "pipelines.PipelineDeployment": { + "name": "PipelineDeployment", + "package": "pipelines", + "description": "pipeline deployment configuration.", + "fields": { + "kind": { + "name": "kind", + "type": "any", + "description": "The deployment method that manages the pipeline.", + "required": false + }, + "metadata_file_path": { + "name": "metadata_file_path", + "type": "string", + "description": "The path to the file containing metadata about the deployment.", + "required": false + } + } + }, + "pipelines.PipelineLibrary": { + "name": "PipelineLibrary", + "package": "pipelines", + "description": "pipeline library configuration.", + "fields": { + "file": { + "name": "file", + "type": "any", + "description": "The path to a file that defines a pipeline and is stored in the Databricks Repos.", + "required": false + }, + "glob": { + "name": "glob", + "type": "any", + "description": "The unified field to include source codes.\nEach entry can be a notebook path, a file path, or a folder path that ends `/**`.\nThis field cannot be used together with `notebook` or `file`.", + "required": false + }, + "jar": { + "name": "jar", + "type": "any", + "description": "URI of the jar to be installed. Currently only DBFS is supported.", + "required": false + }, + "maven": { + "name": "maven", + "type": "any", + "description": "Specification of a maven library to be installed.", + "required": false + }, + "notebook": { + "name": "notebook", + "type": "any", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.", + "required": false + }, + "whl": { + "name": "whl", + "type": "any", + "description": "URI of the whl to be installed.", + "required": false, + "deprecated": true + } + } + }, + "pipelines.PipelineTrigger": { + "name": "PipelineTrigger", + "package": "pipelines", + "description": "pipeline trigger configuration.", + "fields": { + "cron": { + "name": "cron", + "type": "any", + "description": "", + "required": false + }, + "manual": { + "name": "manual", + "type": "any", + "description": "", + "required": false + } + } + }, + "pipelines.PipelinesEnvironment": { + "name": "PipelinesEnvironment", + "package": "pipelines", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "fields": { + "dependencies": { + "name": "dependencies", + "type": "any", + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e", + "required": false + } + } + }, + "pipelines.PostgresCatalogConfig": { + "name": "PostgresCatalogConfig", + "package": "pipelines", + "description": "PG-specific catalog-level configuration parameters", + "fields": { + "slot_config": { + "name": "slot_config", + "type": "any", + "description": "Optional. The Postgres slot configuration to use for logical replication", + "required": false + } + } + }, + "pipelines.PostgresSlotConfig": { + "name": "PostgresSlotConfig", + "package": "pipelines", + "description": "PostgresSlotConfig contains the configuration for a Postgres logical replication slot", + "fields": { + "publication_name": { + "name": "publication_name", + "type": "string", + "description": "The name of the publication to use for the Postgres source", + "required": false + }, + "slot_name": { + "name": "slot_name", + "type": "string", + "description": "The name of the logical replication slot to use for the Postgres source", + "required": false + } + } + }, + "pipelines.ReportSpec": { + "name": "ReportSpec", + "package": "pipelines", + "description": "Specification for report.", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "any", + "description": "Required. Destination catalog to store table.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "any", + "description": "Required. Destination schema to store table.", + "required": false + }, + "destination_table": { + "name": "destination_table", + "type": "any", + "description": "Required. Destination table name. The pipeline fails if a table with that name already exists.", + "required": false + }, + "source_url": { + "name": "source_url", + "type": "string", + "description": "Required. Report URL in the source system.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "any", + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.", + "required": false + } + } + }, + "pipelines.RestartWindow": { + "name": "RestartWindow", + "package": "pipelines", + "description": "restart window configuration.", + "fields": { + "days_of_week": { + "name": "days_of_week", + "type": "any", + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "required": false + }, + "start_hour": { + "name": "start_hour", + "type": "any", + "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", + "required": false + }, + "time_zone_id": { + "name": "time_zone_id", + "type": "string", + "description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.", + "required": false + } + } + }, + "pipelines.RunAs": { + "name": "RunAs", + "package": "pipelines", + "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", + "fields": { + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Users can only set this field to their own email.", + "required": false + } + } + }, + "pipelines.SchemaSpec": { + "name": "SchemaSpec", + "package": "pipelines", + "description": "Specification for schema.", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "any", + "description": "Required. Destination catalog to store tables.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "any", + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists.", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "any", + "description": "The source catalog name. Might be optional depending on the type of source.", + "required": false + }, + "source_schema": { + "name": "source_schema", + "type": "any", + "description": "Required. Schema name in the source database.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "any", + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.", + "required": false + } + } + }, + "pipelines.SourceCatalogConfig": { + "name": "SourceCatalogConfig", + "package": "pipelines", + "description": "SourceCatalogConfig contains catalog-level custom configuration parameters for each source", + "fields": { + "postgres": { + "name": "postgres", + "type": "any", + "description": "Postgres-specific catalog-level configuration parameters", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "any", + "description": "Source catalog name", + "required": false + } + } + }, + "pipelines.SourceConfig": { + "name": "SourceConfig", + "package": "pipelines", + "description": "source config configuration.", + "fields": { + "catalog": { + "name": "catalog", + "type": "any", + "description": "Catalog-level source configuration parameters", + "required": false + } + } + }, + "pipelines.TableSpec": { + "name": "TableSpec", + "package": "pipelines", + "description": "Specification for table.", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "any", + "description": "Required. Destination catalog to store table.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "any", + "description": "Required. Destination schema to store table.", + "required": false + }, + "destination_table": { + "name": "destination_table", + "type": "any", + "description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "any", + "description": "Source catalog name. Might be optional depending on the type of source.", + "required": false + }, + "source_schema": { + "name": "source_schema", + "type": "any", + "description": "Schema name in the source database. Might be optional depending on the type of source.", + "required": false + }, + "source_table": { + "name": "source_table", + "type": "any", + "description": "Required. Table name in the source database.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "any", + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.", + "required": false + } + } + }, + "pipelines.TableSpecificConfig": { + "name": "TableSpecificConfig", + "package": "pipelines", + "description": "table specific config configuration.", + "fields": { + "exclude_columns": { + "name": "exclude_columns", + "type": "any", + "description": "A list of column names to be excluded for the ingestion.\nWhen not specified, include_columns fully controls what columns to be ingested.\nWhen specified, all other columns including future ones will be automatically included for ingestion.\nThis field in mutually exclusive with `include_columns`.", + "required": false + }, + "include_columns": { + "name": "include_columns", + "type": "any", + "description": "A list of column names to be included for the ingestion.\nWhen not specified, all columns except ones in exclude_columns will be included. Future\ncolumns will be automatically included.\nWhen specified, all other future columns will be automatically excluded from ingestion.\nThis field in mutually exclusive with `exclude_columns`.", + "required": false + }, + "primary_keys": { + "name": "primary_keys", + "type": "any", + "description": "The primary key of the table used to apply changes.", + "required": false + }, + "query_based_connector_config": { + "name": "query_based_connector_config", + "type": "any", + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "required": false + }, + "row_filter": { + "name": "row_filter", + "type": "any", + "description": "(Optional, Immutable) The row filter condition to be applied to the table.\nIt must not contain the WHERE keyword, only the actual filter condition.\nIt must be in DBSQL format.", + "required": false + }, + "salesforce_include_formula_fields": { + "name": "salesforce_include_formula_fields", + "type": "any", + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector", + "required": false + }, + "scd_type": { + "name": "scd_type", + "type": "any", + "description": "The SCD type to use to ingest the table.", + "required": false + }, + "sequence_by": { + "name": "sequence_by", + "type": "any", + "description": "The column names specifying the logical order of events in the source data. Spark Declarative Pipelines uses this sequencing to handle change events that arrive out of order.", + "required": false + }, + "workday_report_parameters": { + "name": "workday_report_parameters", + "type": "any", + "description": "(Optional) Additional custom parameters for Workday Report", + "required": false + } + } + }, + "pipelines.TableSpecificConfigScdType": { + "name": "TableSpecificConfigScdType", + "package": "pipelines", + "description": "The SCD type to use to ingest the table.", + "fields": {} + }, + "serving.Ai21LabsConfig": { + "name": "Ai21LabsConfig", + "package": "serving", + "description": "ai21 labs config configuration.", + "fields": { + "ai21labs_api_key": { + "name": "ai21labs_api_key", + "type": "any", + "description": "The Databricks secret key reference for an AI21 Labs API key. If you\nprefer to paste your API key directly, see `ai21labs_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "required": false + }, + "ai21labs_api_key_plaintext": { + "name": "ai21labs_api_key_plaintext", + "type": "any", + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `ai21labs_api_key`. You\nmust provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "required": false + } + } + }, + "serving.AiGatewayConfig": { + "name": "AiGatewayConfig", + "package": "serving", + "description": "ai gateway config configuration.", + "fields": { + "fallback_config": { + "name": "fallback_config", + "type": "any", + "description": "Configuration for traffic fallback which auto fallbacks to other served entities if the request to a served\nentity fails with certain error codes, to increase availability.", + "required": false + }, + "guardrails": { + "name": "guardrails", + "type": "any", + "description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses.", + "required": false + }, + "inference_table_config": { + "name": "inference_table_config", + "type": "any", + "description": "Configuration for payload logging using inference tables.\nUse these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "any", + "description": "Configuration for rate limits which can be set to limit endpoint traffic.", + "required": false + }, + "usage_tracking_config": { + "name": "usage_tracking_config", + "type": "any", + "description": "Configuration to enable usage tracking using system tables.\nThese tables allow you to monitor operational usage on endpoints and their associated costs.", + "required": false + } + } + }, + "serving.AiGatewayGuardrailParameters": { + "name": "AiGatewayGuardrailParameters", + "package": "serving", + "description": "ai gateway guardrail parameters configuration.", + "fields": { + "invalid_keywords": { + "name": "invalid_keywords", + "type": "any", + "description": "List of invalid keywords.\nAI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.", + "required": false, + "deprecated": true + }, + "pii": { + "name": "pii", + "type": "any", + "description": "Configuration for guardrail PII filter.", + "required": false + }, + "safety": { + "name": "safety", + "type": "any", + "description": "Indicates whether the safety filter is enabled.", + "required": false + }, + "valid_topics": { + "name": "valid_topics", + "type": "any", + "description": "The list of allowed topics.\nGiven a chat request, this guardrail flags the request if its topic is not in the allowed topics.", + "required": false, + "deprecated": true + } + } + }, + "serving.AiGatewayGuardrailPiiBehavior": { + "name": "AiGatewayGuardrailPiiBehavior", + "package": "serving", + "description": "ai gateway guardrail pii behavior configuration.", + "fields": { + "behavior": { + "name": "behavior", + "type": "any", + "description": "Configuration for input guardrail filters.", + "required": false + } + } + }, + "serving.AiGatewayGuardrailPiiBehaviorBehavior": { + "name": "AiGatewayGuardrailPiiBehaviorBehavior", + "package": "serving", + "description": "ai gateway guardrail pii behavior behavior configuration.", + "fields": {} + }, + "serving.AiGatewayGuardrails": { + "name": "AiGatewayGuardrails", + "package": "serving", + "description": "ai gateway guardrails configuration.", + "fields": { + "input": { + "name": "input", + "type": "any", + "description": "Configuration for input guardrail filters.", + "required": false + }, + "output": { + "name": "output", + "type": "any", + "description": "Configuration for output guardrail filters.", + "required": false + } + } + }, + "serving.AiGatewayInferenceTableConfig": { + "name": "AiGatewayInferenceTableConfig", + "package": "serving", + "description": "ai gateway inference table config configuration.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the catalog name.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Indicates whether the inference table is enabled.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the schema name.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "any", + "description": "The prefix of the table in Unity Catalog.\nNOTE: On update, you have to disable inference table first in order to change the prefix name.", + "required": false + } + } + }, + "serving.AiGatewayRateLimit": { + "name": "AiGatewayRateLimit", + "package": "serving", + "description": "ai gateway rate limit configuration.", + "fields": { + "calls": { + "name": "calls", + "type": "any", + "description": "Used to specify how many calls are allowed for a key within the renewal_period.", + "required": false + }, + "key": { + "name": "key", + "type": "any", + "description": "Key field for a rate limit. Currently, 'user', 'user_group, 'service_principal', and 'endpoint' are supported,\nwith 'endpoint' being the default if not specified.", + "required": false + }, + "principal": { + "name": "principal", + "type": "any", + "description": "Principal field for a user, user group, or service principal to apply rate limiting to. Accepts a user email, group name, or service principal application ID.", + "required": false + }, + "renewal_period": { + "name": "renewal_period", + "type": "any", + "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", + "required": false + }, + "tokens": { + "name": "tokens", + "type": "any", + "description": "Used to specify how many tokens are allowed for a key within the renewal_period.", + "required": false + } + } + }, + "serving.AiGatewayRateLimitKey": { + "name": "AiGatewayRateLimitKey", + "package": "serving", + "description": "ai gateway rate limit key configuration.", + "fields": {} + }, + "serving.AiGatewayRateLimitRenewalPeriod": { + "name": "AiGatewayRateLimitRenewalPeriod", + "package": "serving", + "description": "ai gateway rate limit renewal period configuration.", + "fields": {} + }, + "serving.AiGatewayUsageTrackingConfig": { + "name": "AiGatewayUsageTrackingConfig", + "package": "serving", + "description": "ai gateway usage tracking config configuration.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable usage tracking.", + "required": false + } + } + }, + "serving.AmazonBedrockConfig": { + "name": "AmazonBedrockConfig", + "package": "serving", + "description": "amazon bedrock config configuration.", + "fields": { + "aws_access_key_id": { + "name": "aws_access_key_id", + "type": "string", + "description": "The Databricks secret key reference for an AWS access key ID with\npermissions to interact with Bedrock services. If you prefer to paste\nyour API key directly, see `aws_access_key_id_plaintext`. You must provide an API\nkey using one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.", + "required": false + }, + "aws_access_key_id_plaintext": { + "name": "aws_access_key_id_plaintext", + "type": "any", + "description": "An AWS access key ID with permissions to interact with Bedrock services\nprovided as a plaintext string. If you prefer to reference your key using\nDatabricks Secrets, see `aws_access_key_id`. You must provide an API key\nusing one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.", + "required": false + }, + "aws_region": { + "name": "aws_region", + "type": "any", + "description": "The AWS region to use. Bedrock has to be enabled there.", + "required": false + }, + "aws_secret_access_key": { + "name": "aws_secret_access_key", + "type": "any", + "description": "The Databricks secret key reference for an AWS secret access key paired\nwith the access key ID, with permissions to interact with Bedrock\nservices. If you prefer to paste your API key directly, see\n`aws_secret_access_key_plaintext`. You must provide an API key using one\nof the following fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "required": false + }, + "aws_secret_access_key_plaintext": { + "name": "aws_secret_access_key_plaintext", + "type": "any", + "description": "An AWS secret access key paired with the access key ID, with permissions\nto interact with Bedrock services provided as a plaintext string. If you\nprefer to reference your key using Databricks Secrets, see\n`aws_secret_access_key`. You must provide an API key using one of the\nfollowing fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "required": false + }, + "bedrock_provider": { + "name": "bedrock_provider", + "type": "any", + "description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "ARN of the instance profile that the external model will use to access AWS resources.\nYou must authenticate using an instance profile or access keys.\nIf you prefer to authenticate using access keys, see `aws_access_key_id`,\n`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.", + "required": false + } + } + }, + "serving.AmazonBedrockConfigBedrockProvider": { + "name": "AmazonBedrockConfigBedrockProvider", + "package": "serving", + "description": "amazon bedrock config bedrock provider configuration.", + "fields": {} + }, + "serving.AnthropicConfig": { + "name": "AnthropicConfig", + "package": "serving", + "description": "anthropic config configuration.", + "fields": { + "anthropic_api_key": { + "name": "anthropic_api_key", + "type": "any", + "description": "The Databricks secret key reference for an Anthropic API key. If you\nprefer to paste your API key directly, see `anthropic_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + "required": false + }, + "anthropic_api_key_plaintext": { + "name": "anthropic_api_key_plaintext", + "type": "any", + "description": "The Anthropic API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `anthropic_api_key`. You\nmust provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + "required": false + } + } + }, + "serving.ApiKeyAuth": { + "name": "ApiKeyAuth", + "package": "serving", + "description": "api key auth configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "The name of the API key parameter used for authentication.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The Databricks secret key reference for an API Key.\nIf you prefer to paste your token directly, see `value_plaintext`.", + "required": false + }, + "value_plaintext": { + "name": "value_plaintext", + "type": "any", + "description": "The API Key provided as a plaintext string. If you prefer to reference your\ntoken using Databricks Secrets, see `value`.", + "required": false + } + } + }, + "serving.AutoCaptureConfigInput": { + "name": "AutoCaptureConfigInput", + "package": "serving", + "description": "auto capture config input configuration.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Indicates whether the inference table is enabled.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "any", + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled.", + "required": false + } + } + }, + "serving.BearerTokenAuth": { + "name": "BearerTokenAuth", + "package": "serving", + "description": "bearer token auth configuration.", + "fields": { + "token": { + "name": "token", + "type": "any", + "description": "The Databricks secret key reference for a token.\nIf you prefer to paste your token directly, see `token_plaintext`.", + "required": false + }, + "token_plaintext": { + "name": "token_plaintext", + "type": "any", + "description": "The token provided as a plaintext string. If you prefer to reference your\ntoken using Databricks Secrets, see `token`.", + "required": false + } + } + }, + "serving.CohereConfig": { + "name": "CohereConfig", + "package": "serving", + "description": "cohere config configuration.", + "fields": { + "cohere_api_base": { + "name": "cohere_api_base", + "type": "any", + "description": "This is an optional field to provide a customized base URL for the Cohere\nAPI. If left unspecified, the standard Cohere base URL is used.", + "required": false + }, + "cohere_api_key": { + "name": "cohere_api_key", + "type": "any", + "description": "The Databricks secret key reference for a Cohere API key. If you prefer\nto paste your API key directly, see `cohere_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `cohere_api_key` or\n`cohere_api_key_plaintext`.", + "required": false + }, + "cohere_api_key_plaintext": { + "name": "cohere_api_key_plaintext", + "type": "any", + "description": "The Cohere API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `cohere_api_key`. You\nmust provide an API key using one of the following fields:\n`cohere_api_key` or `cohere_api_key_plaintext`.", + "required": false + } + } + }, + "serving.CustomProviderConfig": { + "name": "CustomProviderConfig", + "package": "serving", + "description": "Configs needed to create a custom provider model route.", + "fields": { + "api_key_auth": { + "name": "api_key_auth", + "type": "any", + "description": "This is a field to provide API key authentication for the custom provider API.\nYou can only specify one authentication method.", + "required": false + }, + "bearer_token_auth": { + "name": "bearer_token_auth", + "type": "any", + "description": "This is a field to provide bearer token authentication for the custom provider API.\nYou can only specify one authentication method.", + "required": false + }, + "custom_provider_url": { + "name": "custom_provider_url", + "type": "string", + "description": "This is a field to provide the URL of the custom provider API.", + "required": false + } + } + }, + "serving.DatabricksModelServingConfig": { + "name": "DatabricksModelServingConfig", + "package": "serving", + "description": "databricks model serving config configuration.", + "fields": { + "databricks_api_token": { + "name": "databricks_api_token", + "type": "any", + "description": "The Databricks secret key reference for a Databricks API token that\ncorresponds to a user or service principal with Can Query access to the\nmodel serving endpoint pointed to by this external model. If you prefer\nto paste your API key directly, see `databricks_api_token_plaintext`. You\nmust provide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "required": false + }, + "databricks_api_token_plaintext": { + "name": "databricks_api_token_plaintext", + "type": "any", + "description": "The Databricks API token that corresponds to a user or service principal\nwith Can Query access to the model serving endpoint pointed to by this\nexternal model provided as a plaintext string. If you prefer to reference\nyour key using Databricks Secrets, see `databricks_api_token`. You must\nprovide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "required": false + }, + "databricks_workspace_url": { + "name": "databricks_workspace_url", + "type": "string", + "description": "The URL of the Databricks workspace containing the model serving endpoint\npointed to by this external model.", + "required": false + } + } + }, + "serving.EmailNotifications": { + "name": "EmailNotifications", + "package": "serving", + "description": "email notifications configuration.", + "fields": { + "on_update_failure": { + "name": "on_update_failure", + "type": "any", + "description": "A list of email addresses to be notified when an endpoint fails to update its configuration or state.", + "required": false + }, + "on_update_success": { + "name": "on_update_success", + "type": "any", + "description": "A list of email addresses to be notified when an endpoint successfully updates its configuration or state.", + "required": false + } + } + }, + "serving.EndpointCoreConfigInput": { + "name": "EndpointCoreConfigInput", + "package": "serving", + "description": "endpoint core config input configuration.", + "fields": { + "auto_capture_config": { + "name": "auto_capture_config", + "type": "any", + "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.\nNote: this field is deprecated for creating new provisioned throughput endpoints,\nor updating existing provisioned throughput endpoints that never have inference table configured;\nin these cases please use AI Gateway to manage inference tables.", + "required": false + }, + "served_entities": { + "name": "served_entities", + "type": "any", + "description": "The list of served entities under the serving endpoint config.", + "required": false + }, + "served_models": { + "name": "served_models", + "type": "any", + "description": "(Deprecated, use served_entities instead) The list of served models under the serving endpoint config.", + "required": false + }, + "traffic_config": { + "name": "traffic_config", + "type": "any", + "description": "The traffic configuration associated with the serving endpoint config.", + "required": false + } + } + }, + "serving.EndpointTag": { + "name": "EndpointTag", + "package": "serving", + "description": "endpoint tag configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "Key field for a serving endpoint tag.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "Optional value field for a serving endpoint tag.", + "required": false + } + } + }, + "serving.ExternalModel": { + "name": "ExternalModel", + "package": "serving", + "description": "external model configuration.", + "fields": { + "ai21labs_config": { + "name": "ai21labs_config", + "type": "any", + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", + "required": false + }, + "amazon_bedrock_config": { + "name": "amazon_bedrock_config", + "type": "any", + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", + "required": false + }, + "anthropic_config": { + "name": "anthropic_config", + "type": "any", + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "required": false + }, + "cohere_config": { + "name": "cohere_config", + "type": "any", + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "required": false + }, + "custom_provider_config": { + "name": "custom_provider_config", + "type": "any", + "description": "Custom Provider Config. Only required if the provider is 'custom'.", + "required": false + }, + "databricks_model_serving_config": { + "name": "databricks_model_serving_config", + "type": "any", + "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "required": false + }, + "google_cloud_vertex_ai_config": { + "name": "google_cloud_vertex_ai_config", + "type": "any", + "description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of the external model.", + "required": false + }, + "openai_config": { + "name": "openai_config", + "type": "any", + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "required": false + }, + "palm_config": { + "name": "palm_config", + "type": "any", + "description": "PaLM Config. Only required if the provider is 'palm'.", + "required": false + }, + "provider": { + "name": "provider", + "type": "any", + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', 'palm', and 'custom'.", + "required": false + }, + "task": { + "name": "task", + "type": "any", + "description": "The task type of the external model.", + "required": false + } + } + }, + "serving.ExternalModelProvider": { + "name": "ExternalModelProvider", + "package": "serving", + "description": "external model provider configuration.", + "fields": {} + }, + "serving.FallbackConfig": { + "name": "FallbackConfig", + "package": "serving", + "description": "fallback config configuration.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable traffic fallback. When a served entity in the serving endpoint returns specific error\ncodes (e.g. 500), the request will automatically be round-robin attempted with other served entities in the same\nendpoint, following the order of served entity list, until a successful response is returned.\nIf all attempts fail, return the last response with the error code.", + "required": false + } + } + }, + "serving.GoogleCloudVertexAiConfig": { + "name": "GoogleCloudVertexAiConfig", + "package": "serving", + "description": "google cloud vertex ai config configuration.", + "fields": { + "private_key": { + "name": "private_key", + "type": "any", + "description": "The Databricks secret key reference for a private key for the service\naccount which has access to the Google Cloud Vertex AI Service. See [Best\npractices for managing service account keys]. If you prefer to paste your\nAPI key directly, see `private_key_plaintext`. You must provide an API\nkey using one of the following fields: `private_key` or\n`private_key_plaintext`\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys", + "required": false + }, + "private_key_plaintext": { + "name": "private_key_plaintext", + "type": "any", + "description": "The private key for the service account which has access to the Google\nCloud Vertex AI Service provided as a plaintext secret. See [Best\npractices for managing service account keys]. If you prefer to reference\nyour key using Databricks Secrets, see `private_key`. You must provide an\nAPI key using one of the following fields: `private_key` or\n`private_key_plaintext`.\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys", + "required": false + }, + "project_id": { + "name": "project_id", + "type": "string", + "description": "This is the Google Cloud project id that the service account is\nassociated with.", + "required": false + }, + "region": { + "name": "region", + "type": "any", + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported\nregions] for more details. Some models are only available in specific\nregions.\n\n[supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations", + "required": false + } + } + }, + "serving.OpenAiConfig": { + "name": "OpenAiConfig", + "package": "serving", + "description": "Configs needed to create an OpenAI model route.", + "fields": { + "microsoft_entra_client_id": { + "name": "microsoft_entra_client_id", + "type": "string", + "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Client ID.", + "required": false + }, + "microsoft_entra_client_secret": { + "name": "microsoft_entra_client_secret", + "type": "any", + "description": "The Databricks secret key reference for a client secret used for\nMicrosoft Entra ID authentication. If you prefer to paste your client\nsecret directly, see `microsoft_entra_client_secret_plaintext`. You must\nprovide an API key using one of the following fields:\n`microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "required": false + }, + "microsoft_entra_client_secret_plaintext": { + "name": "microsoft_entra_client_secret_plaintext", + "type": "any", + "description": "The client secret used for Microsoft Entra ID authentication provided as\na plaintext string. If you prefer to reference your key using Databricks\nSecrets, see `microsoft_entra_client_secret`. You must provide an API key\nusing one of the following fields: `microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "required": false + }, + "microsoft_entra_tenant_id": { + "name": "microsoft_entra_tenant_id", + "type": "string", + "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Tenant ID.", + "required": false + }, + "openai_api_base": { + "name": "openai_api_base", + "type": "any", + "description": "This is a field to provide a customized base URl for the OpenAI API. For\nAzure OpenAI, this field is required, and is the base URL for the Azure\nOpenAI API service provided by Azure. For other OpenAI API types, this\nfield is optional, and if left unspecified, the standard OpenAI base URL\nis used.", + "required": false + }, + "openai_api_key": { + "name": "openai_api_key", + "type": "any", + "description": "The Databricks secret key reference for an OpenAI API key using the\nOpenAI or Azure service. If you prefer to paste your API key directly,\nsee `openai_api_key_plaintext`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + "required": false + }, + "openai_api_key_plaintext": { + "name": "openai_api_key_plaintext", + "type": "any", + "description": "The OpenAI API key using the OpenAI or Azure service provided as a\nplaintext string. If you prefer to reference your key using Databricks\nSecrets, see `openai_api_key`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + "required": false + }, + "openai_api_type": { + "name": "openai_api_type", + "type": "any", + "description": "This is an optional field to specify the type of OpenAI API to use. For\nAzure OpenAI, this field is required, and adjust this parameter to\nrepresent the preferred security access validation protocol. For access\ntoken validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.", + "required": false + }, + "openai_api_version": { + "name": "openai_api_version", + "type": "any", + "description": "This is an optional field to specify the OpenAI API version. For Azure\nOpenAI, this field is required, and is the version of the Azure OpenAI\nservice to utilize, specified by a date.", + "required": false + }, + "openai_deployment_name": { + "name": "openai_deployment_name", + "type": "string", + "description": "This field is only required for Azure OpenAI and is the name of the\ndeployment resource for the Azure OpenAI service.", + "required": false + }, + "openai_organization": { + "name": "openai_organization", + "type": "any", + "description": "This is an optional field to specify the organization in OpenAI or Azure\nOpenAI.", + "required": false + } + } + }, + "serving.PaLmConfig": { + "name": "PaLmConfig", + "package": "serving", + "description": "pa lm config configuration.", + "fields": { + "palm_api_key": { + "name": "palm_api_key", + "type": "any", + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to\npaste your API key directly, see `palm_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "required": false + }, + "palm_api_key_plaintext": { + "name": "palm_api_key_plaintext", + "type": "any", + "description": "The PaLM API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `palm_api_key`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "required": false + } + } + }, + "serving.RateLimit": { + "name": "RateLimit", + "package": "serving", + "description": "rate limit configuration.", + "fields": { + "calls": { + "name": "calls", + "type": "any", + "description": "Used to specify how many calls are allowed for a key within the renewal_period.", + "required": false + }, + "key": { + "name": "key", + "type": "any", + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "required": false + }, + "renewal_period": { + "name": "renewal_period", + "type": "any", + "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", + "required": false + } + } + }, + "serving.RateLimitKey": { + "name": "RateLimitKey", + "package": "serving", + "description": "rate limit key configuration.", + "fields": {} + }, + "serving.RateLimitRenewalPeriod": { + "name": "RateLimitRenewalPeriod", + "package": "serving", + "description": "rate limit renewal period configuration.", + "fields": {} + }, + "serving.Route": { + "name": "Route", + "package": "serving", + "description": "route configuration.", + "fields": { + "served_entity_name": { + "name": "served_entity_name", + "type": "string", + "description": "", + "required": false + }, + "served_model_name": { + "name": "served_model_name", + "type": "string", + "description": "The name of the served model this route configures traffic for.", + "required": false + }, + "traffic_percentage": { + "name": "traffic_percentage", + "type": "any", + "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive.", + "required": false + } + } + }, + "serving.ServedEntityInput": { + "name": "ServedEntityInput", + "package": "serving", + "description": "served entity input configuration.", + "fields": { + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.", + "required": false + }, + "entity_version": { + "name": "entity_version", + "type": "any", + "description": "", + "required": false + }, + "environment_vars": { + "name": "environment_vars", + "type": "any", + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "external_model": { + "name": "external_model", + "type": "any", + "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. The task type of all external models within an endpoint must be the same.", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "ARN of the instance profile that the served entity uses to access AWS resources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "any", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "required": false + }, + "max_provisioned_throughput": { + "name": "max_provisioned_throughput", + "type": "any", + "description": "The maximum tokens per second that the endpoint can scale up to.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "any", + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "required": false + }, + "min_provisioned_throughput": { + "name": "min_provisioned_throughput", + "type": "any", + "description": "The minimum tokens per second that the endpoint can scale down to.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "any", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to zero.", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "int", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "any", + "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).", + "required": false + } + } + }, + "serving.ServedModelInput": { + "name": "ServedModelInput", + "package": "serving", + "description": "served model input configuration.", + "fields": { + "environment_vars": { + "name": "environment_vars", + "type": "any", + "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "ARN of the instance profile that the served entity uses to access AWS resources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "any", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "required": false + }, + "max_provisioned_throughput": { + "name": "max_provisioned_throughput", + "type": "any", + "description": "The maximum tokens per second that the endpoint can scale up to.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "any", + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "required": false + }, + "min_provisioned_throughput": { + "name": "min_provisioned_throughput", + "type": "any", + "description": "The minimum tokens per second that the endpoint can scale down to.", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "", + "required": false + }, + "model_version": { + "name": "model_version", + "type": "any", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "any", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to zero.", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "int", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "any", + "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).", + "required": false + } + } + }, + "serving.ServedModelInputWorkloadType": { + "name": "ServedModelInputWorkloadType", + "package": "serving", + "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", + "fields": {} + }, + "serving.ServingModelWorkloadType": { + "name": "ServingModelWorkloadType", + "package": "serving", + "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", + "fields": {} + }, + "serving.TrafficConfig": { + "name": "TrafficConfig", + "package": "serving", + "description": "traffic config configuration.", + "fields": { + "routes": { + "name": "routes", + "type": "any", + "description": "The list of routes that define traffic to each served entity.", + "required": false + } + } + }, + "sql.Aggregation": { + "name": "Aggregation", + "package": "sql", + "description": "aggregation configuration.", + "fields": {} + }, + "sql.AlertEvaluationState": { + "name": "AlertEvaluationState", + "package": "sql", + "description": "UNSPECIFIED - default unspecify value for proto enum, do not use it in the code\nUNKNOWN - alert not yet evaluated\nTRIGGERED - alert is triggered\nOK - alert is not triggered\nERROR - alert evaluation failed", + "fields": {} + }, + "sql.AlertLifecycleState": { + "name": "AlertLifecycleState", + "package": "sql", + "description": "alert lifecycle state configuration.", + "fields": {} + }, + "sql.AlertV2Evaluation": { + "name": "AlertV2Evaluation", + "package": "sql", + "description": "alert v2 evaluation configuration.", + "fields": { + "comparison_operator": { + "name": "comparison_operator", + "type": "any", + "description": "Operator used for comparison in alert evaluation.", + "required": false + }, + "empty_result_state": { + "name": "empty_result_state", + "type": "any", + "description": "Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because `UNKNOWN` state is planned to be deprecated.", + "required": false + }, + "last_evaluated_at": { + "name": "last_evaluated_at", + "type": "string (timestamp)", + "description": "Timestamp of the last evaluation.", + "required": false, + "output_only": true + }, + "notification": { + "name": "notification", + "type": "any", + "description": "User or Notification Destination to notify when alert is triggered.", + "required": false + }, + "source": { + "name": "source", + "type": "any", + "description": "Source column from result to use to evaluate alert", + "required": false + }, + "state": { + "name": "state", + "type": "any", + "description": "Latest state of alert evaluation.", + "required": false, + "output_only": true + }, + "threshold": { + "name": "threshold", + "type": "any", + "description": "Threshold to user for alert evaluation, can be a column or a value.", + "required": false + } + } + }, + "sql.AlertV2Notification": { + "name": "AlertV2Notification", + "package": "sql", + "description": "alert v2 notification configuration.", + "fields": { + "notify_on_ok": { + "name": "notify_on_ok", + "type": "any", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "retrigger_seconds": { + "name": "retrigger_seconds", + "type": "int", + "description": "Number of seconds an alert waits after being triggered before it is allowed to send another notification.\nIf set to 0 or omitted, the alert will not send any further notifications after the first trigger\nSetting this value to 1 allows the alert to send a notification on every evaluation where the condition is met, effectively making it always retrigger for notification purposes.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.AlertV2Operand": { + "name": "AlertV2Operand", + "package": "sql", + "description": "alert v2 operand configuration.", + "fields": { + "column": { + "name": "column", + "type": "any", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.AlertV2OperandColumn": { + "name": "AlertV2OperandColumn", + "package": "sql", + "description": "alert v2 operand column configuration.", + "fields": { + "aggregation": { + "name": "aggregation", + "type": "any", + "description": "If not set, the behavior is equivalent to using `First row` in the UI.", + "required": false + }, + "display": { + "name": "display", + "type": "any", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.AlertV2OperandValue": { + "name": "AlertV2OperandValue", + "package": "sql", + "description": "alert v2 operand value configuration.", + "fields": { + "bool_value": { + "name": "bool_value", + "type": "any", + "description": "", + "required": false + }, + "double_value": { + "name": "double_value", + "type": "any", + "description": "", + "required": false + }, + "string_value": { + "name": "string_value", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.AlertV2RunAs": { + "name": "AlertV2RunAs", + "package": "sql", + "description": "alert v2 run as configuration.", + "fields": { + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Can only set this field to their own email.", + "required": false + } + } + }, + "sql.AlertV2Subscription": { + "name": "AlertV2Subscription", + "package": "sql", + "description": "alert v2 subscription configuration.", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "", + "required": false + }, + "user_email": { + "name": "user_email", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.Channel": { + "name": "Channel", + "package": "sql", + "description": "Configures the channel name and DBSQL version of the warehouse. CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified.", + "fields": { + "dbsql_version": { + "name": "dbsql_version", + "type": "any", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.ChannelName": { + "name": "ChannelName", + "package": "sql", + "description": "channel name configuration.", + "fields": {} + }, + "sql.ComparisonOperator": { + "name": "ComparisonOperator", + "package": "sql", + "description": "comparison operator configuration.", + "fields": {} + }, + "sql.CreateWarehouseRequestWarehouseType": { + "name": "CreateWarehouseRequestWarehouseType", + "package": "sql", + "description": "create warehouse request warehouse type configuration.", + "fields": {} + }, + "sql.CronSchedule": { + "name": "CronSchedule", + "package": "sql", + "description": "cron schedule configuration.", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "any", + "description": "Indicate whether this schedule is paused or not.", + "required": false + }, + "quartz_cron_schedule": { + "name": "quartz_cron_schedule", + "type": "any", + "description": "A cron expression using quartz syntax that specifies the schedule for this pipeline.\nShould use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "A Java timezone id. The schedule will be resolved using this timezone.\nThis will be combined with the quartz_cron_schedule to determine the schedule.\nSee https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.", + "required": false + } + } + }, + "sql.EndpointTagPair": { + "name": "EndpointTagPair", + "package": "sql", + "description": "endpoint tag pair configuration.", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "", + "required": false + } + } + }, + "sql.EndpointTags": { + "name": "EndpointTags", + "package": "sql", + "description": "endpoint tags configuration.", + "fields": { + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "", + "required": false + } + } + }, + "sql.SchedulePauseStatus": { + "name": "SchedulePauseStatus", + "package": "sql", + "description": "schedule pause status configuration.", + "fields": {} + }, + "sql.SpotInstancePolicy": { + "name": "SpotInstancePolicy", + "package": "sql", + "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", + "fields": {} + }, + "workspace.AzureKeyVaultSecretScopeMetadata": { + "name": "AzureKeyVaultSecretScopeMetadata", + "package": "workspace", + "description": "The metadata of the Azure KeyVault for a secret scope of type `AZURE_KEYVAULT`", + "fields": { + "dns_name": { + "name": "dns_name", + "type": "string", + "description": "The DNS of the KeyVault", + "required": false + }, + "resource_id": { + "name": "resource_id", + "type": "string", + "description": "The resource id of the azure KeyVault that user wants to associate the scope with.", + "required": false + } + } + }, + "workspace.ScopeBackendType": { + "name": "ScopeBackendType", + "package": "workspace", + "description": "The types of secret scope backends in the Secret Manager. Azure KeyVault backed secret scopes\nwill be supported in a later release.", + "fields": {} + } + }, + "enums": { + "compute.State": { + "name": "State", + "package": "compute", + "description": "The state of a cluster.", + "values": [ + "ERROR", + "PENDING", + "RESIZING", + "RESTARTING", + "RUNNING", + "TERMINATED", + "TERMINATING", + "UNKNOWN" + ] + }, + "jobs.RunLifeCycleState": { + "name": "RunLifeCycleState", + "package": "jobs", + "description": "The current state of the run lifecycle.", + "values": [ + "INTERNAL_ERROR", + "PENDING", + "RUNNING", + "SKIPPED", + "TERMINATED", + "TERMINATING" + ] + }, + "pipelines.PipelineState": { + "name": "PipelineState", + "package": "pipelines", + "description": "The state of a pipeline.", + "values": [ + "DELETED", + "FAILED", + "IDLE", + "RECOVERING", + "RESETTING", + "RUNNING", + "STARTING", + "STOPPING" + ] + } + } +} \ No newline at end of file diff --git a/experimental/aitools/lib/providers/sdkdocs/search.go b/experimental/aitools/lib/providers/sdkdocs/search.go new file mode 100644 index 0000000000..8428df29ac --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/search.go @@ -0,0 +1,265 @@ +package sdkdocs + +import ( + "sort" + "strings" +) + +// SearchResult represents a single search result. +type SearchResult struct { + Type string `json:"type"` // "service", "method", "type", "enum" + Name string `json:"name"` + Path string `json:"path"` + Service string `json:"service,omitempty"` + Description string `json:"description"` + Score float64 `json:"score"` +} + +// SearchOptions configures the search behavior. +type SearchOptions struct { + Query string + Category string // "services", "methods", "types", "enums", or empty for all + Service string // filter by specific service + Limit int +} + +// Search performs a search across the SDK documentation index. +func (idx *SDKDocsIndex) Search(opts SearchOptions) []SearchResult { + if opts.Limit <= 0 { + opts.Limit = 10 + } + if opts.Limit > 50 { + opts.Limit = 50 + } + + query := strings.ToLower(strings.TrimSpace(opts.Query)) + if query == "" { + return nil + } + + terms := tokenize(query) + var results []SearchResult + + // Search services + if opts.Category == "" || opts.Category == "services" { + for name, service := range idx.Services { + if opts.Service != "" && name != opts.Service { + continue + } + score := computeScore(terms, name, service.Name, service.Description) + if score > 0 { + results = append(results, SearchResult{ + Type: "service", + Name: service.Name, + Path: name, + Description: truncate(service.Description, 200), + Score: score, + }) + } + } + } + + // Search methods + if opts.Category == "" || opts.Category == "methods" { + for serviceName, service := range idx.Services { + if opts.Service != "" && serviceName != opts.Service { + continue + } + for methodName, method := range service.Methods { + score := computeScore(terms, methodName, method.Name, method.Description) + // Boost score if query contains the service name + if containsAny(query, serviceName, service.Name) { + score *= 1.5 + } + if score > 0 { + results = append(results, SearchResult{ + Type: "method", + Name: methodName, + Path: serviceName + "." + methodName, + Service: serviceName, + Description: truncate(method.Description, 200), + Score: score, + }) + } + } + } + } + + // Search types + if opts.Category == "" || opts.Category == "types" { + for typePath, typeDoc := range idx.Types { + if opts.Service != "" && !strings.HasPrefix(typePath, opts.Service+".") { + continue + } + score := computeScore(terms, typeDoc.Name, typePath, typeDoc.Description) + if score > 0 { + results = append(results, SearchResult{ + Type: "type", + Name: typeDoc.Name, + Path: typePath, + Service: typeDoc.Package, + Description: truncate(typeDoc.Description, 200), + Score: score, + }) + } + } + } + + // Search enums + if opts.Category == "" || opts.Category == "enums" { + for enumPath, enumDoc := range idx.Enums { + if opts.Service != "" && !strings.HasPrefix(enumPath, opts.Service+".") { + continue + } + // Include enum values in search + valuesStr := strings.Join(enumDoc.Values, " ") + score := computeScore(terms, enumDoc.Name, enumPath, enumDoc.Description+" "+valuesStr) + if score > 0 { + results = append(results, SearchResult{ + Type: "enum", + Name: enumDoc.Name, + Path: enumPath, + Service: enumDoc.Package, + Description: truncate(enumDoc.Description, 200), + Score: score, + }) + } + } + } + + // Sort by score descending + sort.Slice(results, func(i, j int) bool { + if results[i].Score != results[j].Score { + return results[i].Score > results[j].Score + } + // Secondary sort by name for stability + return results[i].Name < results[j].Name + }) + + // Apply limit + if len(results) > opts.Limit { + results = results[:opts.Limit] + } + + return results +} + +// tokenize splits a query into searchable terms. +func tokenize(query string) []string { + // Split on common separators + query = strings.NewReplacer( + "_", " ", + "-", " ", + ".", " ", + ",", " ", + "?", " ", + "!", " ", + ).Replace(query) + + words := strings.Fields(query) + terms := make([]string, 0, len(words)) + + // Filter out common stop words + stopWords := map[string]bool{ + "a": true, "an": true, "the": true, "is": true, "are": true, + "to": true, "for": true, "in": true, "on": true, "of": true, + "how": true, "do": true, "i": true, "can": true, "what": true, + "get": true, "use": true, "with": true, "from": true, + } + + for _, word := range words { + word = strings.ToLower(word) + if len(word) >= 2 && !stopWords[word] { + terms = append(terms, word) + } + } + + return terms +} + +// computeScore calculates a relevance score for a document. +func computeScore(queryTerms []string, names ...string) float64 { + if len(queryTerms) == 0 { + return 0 + } + + // Combine all searchable text + combined := strings.ToLower(strings.Join(names, " ")) + + var totalScore float64 + matchedTerms := 0 + + for _, term := range queryTerms { + termScore := 0.0 + + // Exact word match (highest score) + if containsWord(combined, term) { + termScore = 10.0 + matchedTerms++ + } else if strings.Contains(combined, term) { + // Substring match (lower score) + termScore = 5.0 + matchedTerms++ + } else { + // Try prefix matching + words := strings.Fields(combined) + for _, word := range words { + if strings.HasPrefix(word, term) { + termScore = 3.0 + matchedTerms++ + break + } + } + } + + // Boost if term appears in first name (usually the identifier) + if len(names) > 0 && strings.Contains(strings.ToLower(names[0]), term) { + termScore *= 1.5 + } + + totalScore += termScore + } + + // Require at least one term to match + if matchedTerms == 0 { + return 0 + } + + // Normalize by number of query terms and boost by match ratio + matchRatio := float64(matchedTerms) / float64(len(queryTerms)) + return totalScore * matchRatio +} + +// containsWord checks if text contains word as a complete word. +func containsWord(text, word string) bool { + words := strings.Fields(text) + for _, w := range words { + if w == word { + return true + } + } + return false +} + +// containsAny checks if text contains any of the given substrings. +func containsAny(text string, substrs ...string) bool { + text = strings.ToLower(text) + for _, s := range substrs { + if strings.Contains(text, strings.ToLower(s)) { + return true + } + } + return false +} + +// truncate shortens a string to the specified length. +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + // Find last space before maxLen to avoid cutting words + if idx := strings.LastIndex(s[:maxLen], " "); idx > maxLen/2 { + return s[:idx] + "..." + } + return s[:maxLen-3] + "..." +} diff --git a/experimental/aitools/lib/providers/sdkdocs/search_test.go b/experimental/aitools/lib/providers/sdkdocs/search_test.go new file mode 100644 index 0000000000..1a4d45feb0 --- /dev/null +++ b/experimental/aitools/lib/providers/sdkdocs/search_test.go @@ -0,0 +1,274 @@ +package sdkdocs + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTokenize(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "simple query", + input: "create job", + expected: []string{"create", "job"}, + }, + { + name: "query with stop words", + input: "how do I create a job", + expected: []string{"create", "job"}, + }, + { + name: "query with underscores", + input: "cluster_name field", + expected: []string{"cluster", "name", "field"}, + }, + { + name: "empty query", + input: "", + expected: []string{}, + }, + { + name: "only stop words", + input: "how do I", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tokenize(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestComputeScore(t *testing.T) { + tests := []struct { + name string + queryTerms []string + names []string + expectZero bool + }{ + { + name: "exact match", + queryTerms: []string{"create"}, + names: []string{"Create", "Create a new job"}, + expectZero: false, + }, + { + name: "no match", + queryTerms: []string{"delete"}, + names: []string{"Create", "Create a new job"}, + expectZero: true, + }, + { + name: "partial match", + queryTerms: []string{"job"}, + names: []string{"CreateJob", "Creates a job"}, + expectZero: false, + }, + { + name: "empty query", + queryTerms: []string{}, + names: []string{"Create", "Create a new job"}, + expectZero: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeScore(tt.queryTerms, tt.names...) + if tt.expectZero { + assert.Equal(t, float64(0), score) + } else { + assert.Greater(t, score, float64(0)) + } + }) + } +} + +func TestTruncate(t *testing.T) { + tests := []struct { + name string + input string + maxLen int + expected string + }{ + { + name: "short string", + input: "hello", + maxLen: 10, + expected: "hello", + }, + { + name: "exact length", + input: "hello", + maxLen: 5, + expected: "hello", + }, + { + name: "long string truncated at word boundary", + input: "hello world this is a long string", + maxLen: 15, + expected: "hello world...", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := truncate(tt.input, tt.maxLen) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestSearch(t *testing.T) { + // Create a test index + index := &SDKDocsIndex{ + Version: "1.0", + Services: map[string]*ServiceDoc{ + "jobs": { + Name: "Jobs", + Description: "The Jobs API allows you to create, edit, and delete jobs.", + Package: "github.com/databricks/databricks-sdk-go/service/jobs", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Create a new job.", + Signature: "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", + }, + "List": { + Name: "List", + Description: "List all jobs.", + Signature: "List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]", + }, + "Delete": { + Name: "Delete", + Description: "Delete a job.", + Signature: "Delete(ctx context.Context, request DeleteJob) error", + }, + }, + }, + "compute": { + Name: "Clusters", + Description: "The Clusters API allows you to create and manage clusters.", + Package: "github.com/databricks/databricks-sdk-go/service/compute", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Create a new cluster.", + Signature: "Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)", + }, + }, + }, + }, + Types: map[string]*TypeDoc{ + "jobs.CreateJob": { + Name: "CreateJob", + Package: "jobs", + Description: "Job creation settings.", + Fields: map[string]*FieldDoc{ + "name": { + Name: "name", + Type: "string", + Description: "The job name.", + }, + }, + }, + }, + Enums: map[string]*EnumDoc{ + "jobs.RunLifeCycleState": { + Name: "RunLifeCycleState", + Package: "jobs", + Description: "The current state of the run lifecycle.", + Values: []string{"PENDING", "RUNNING", "TERMINATED"}, + }, + }, + } + + t.Run("search for create job", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "create job", + Limit: 10, + }) + + require.NotEmpty(t, results) + // Should find the Jobs.Create method + found := false + for _, r := range results { + if r.Type == "method" && r.Name == "Create" && r.Service == "jobs" { + found = true + break + } + } + assert.True(t, found, "Should find Jobs.Create method") + }) + + t.Run("search with service filter", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "create", + Service: "jobs", + Limit: 10, + }) + + for _, r := range results { + if r.Type == "method" { + assert.Equal(t, "jobs", r.Service, "All method results should be from jobs service") + } + } + }) + + t.Run("search with category filter", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "job", + Category: "types", + Limit: 10, + }) + + for _, r := range results { + assert.Equal(t, "type", r.Type, "All results should be types") + } + }) + + t.Run("search for enum values", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "lifecycle state", + Category: "enums", + Limit: 10, + }) + + require.NotEmpty(t, results) + found := false + for _, r := range results { + if r.Name == "RunLifeCycleState" { + found = true + break + } + } + assert.True(t, found, "Should find RunLifeCycleState enum") + }) + + t.Run("empty query returns no results", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "", + Limit: 10, + }) + + assert.Empty(t, results) + }) + + t.Run("limit is enforced", func(t *testing.T) { + results := index.Search(SearchOptions{ + Query: "create", + Limit: 1, + }) + + assert.LessOrEqual(t, len(results), 1) + }) +} diff --git a/experimental/aitools/lib/server/server.go b/experimental/aitools/lib/server/server.go index ff0016f4be..fd081dad31 100644 --- a/experimental/aitools/lib/server/server.go +++ b/experimental/aitools/lib/server/server.go @@ -8,6 +8,7 @@ import ( mcpsdk "github.com/databricks/cli/experimental/aitools/lib/mcp" "github.com/databricks/cli/experimental/aitools/lib/middlewares" "github.com/databricks/cli/experimental/aitools/lib/providers/clitools" + "github.com/databricks/cli/experimental/aitools/lib/providers/sdkdocs" "github.com/databricks/cli/experimental/aitools/lib/session" "github.com/databricks/cli/experimental/aitools/lib/trajectory" "github.com/databricks/cli/internal/build" @@ -77,6 +78,11 @@ func (s *Server) RegisterTools(ctx context.Context) error { return err } + // Register SDK docs provider + if err := s.registerSDKDocsProvider(ctx); err != nil { + return err + } + return nil } @@ -96,6 +102,22 @@ func (s *Server) registerCLIToolsProvider(ctx context.Context) error { return nil } +// registerSDKDocsProvider registers the SDK documentation provider +func (s *Server) registerSDKDocsProvider(ctx context.Context) error { + log.Info(ctx, "Registering SDK docs provider") + + provider, err := sdkdocs.NewProvider(ctx, s.config, s.session) + if err != nil { + return err + } + + if err := provider.RegisterTools(s.server); err != nil { + return err + } + + return nil +} + // Run starts the MCP server with STDIO transport and blocks until the context is cancelled. // The server communicates via standard input/output following the MCP protocol. func (s *Server) Run(ctx context.Context) error { diff --git a/tools/gen_sdk_docs_index.go b/tools/gen_sdk_docs_index.go new file mode 100644 index 0000000000..8bd7bb2ace --- /dev/null +++ b/tools/gen_sdk_docs_index.go @@ -0,0 +1,664 @@ +// Package main generates SDK documentation index for MCP tools. +// +// Usage: +// +// go run tools/gen_sdk_docs_index.go -output experimental/aitools/lib/providers/sdkdocs/ +// +// This tool parses the annotations_openapi.yml file and Go SDK interfaces to generate +// a comprehensive SDK documentation index that is embedded into the CLI binary. +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// SDKDocsIndex represents the complete SDK documentation index. +type SDKDocsIndex struct { + Version string `json:"version"` + GeneratedAt string `json:"generated_at"` + Services map[string]*ServiceDoc `json:"services"` + Types map[string]*TypeDoc `json:"types"` + Enums map[string]*EnumDoc `json:"enums"` +} + +// ServiceDoc represents documentation for an API service. +type ServiceDoc struct { + Name string `json:"name"` + Description string `json:"description"` + Package string `json:"package"` + Methods map[string]*MethodDoc `json:"methods"` +} + +// MethodDoc represents documentation for an API method. +type MethodDoc struct { + Name string `json:"name"` + Description string `json:"description"` + Signature string `json:"signature"` + Parameters []ParamDoc `json:"parameters"` + Returns *ReturnDoc `json:"returns,omitempty"` + Example string `json:"example,omitempty"` + HTTPMethod string `json:"http_method,omitempty"` + HTTPPath string `json:"http_path,omitempty"` +} + +// ParamDoc represents documentation for a method parameter. +type ParamDoc struct { + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description"` + Required bool `json:"required"` +} + +// ReturnDoc represents documentation for a method return type. +type ReturnDoc struct { + Type string `json:"type"` + Description string `json:"description"` +} + +// TypeDoc represents documentation for a data type. +type TypeDoc struct { + Name string `json:"name"` + Package string `json:"package"` + Description string `json:"description"` + Fields map[string]*FieldDoc `json:"fields"` +} + +// FieldDoc represents documentation for a struct field. +type FieldDoc struct { + Name string `json:"name"` + Type string `json:"type"` + Description string `json:"description"` + Required bool `json:"required"` + OutputOnly bool `json:"output_only,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +// EnumDoc represents documentation for an enum type. +type EnumDoc struct { + Name string `json:"name"` + Package string `json:"package"` + Description string `json:"description"` + Values []string `json:"values"` +} + +// AnnotationsFile represents the structure of annotations_openapi.yml +type AnnotationsFile map[string]map[string]FieldAnnotation + +// FieldAnnotation represents annotations for a single field +type FieldAnnotation struct { + Description string `yaml:"description"` + OutputOnly string `yaml:"x-databricks-field-behaviors_output_only"` + DeprecationMessage string `yaml:"deprecation_message"` +} + +func main() { + outputDir := flag.String("output", "experimental/aitools/lib/providers/sdkdocs/", "Output directory for generated index") + annotationsPath := flag.String("annotations", "bundle/internal/schema/annotations_openapi.yml", "Path to annotations file") + flag.Parse() + + // Find project root + projectRoot, err := findProjectRoot() + if err != nil { + fmt.Fprintf(os.Stderr, "Error finding project root: %v\n", err) + os.Exit(1) + } + + // Load annotations + annotations, err := loadAnnotations(filepath.Join(projectRoot, *annotationsPath)) + if err != nil { + fmt.Fprintf(os.Stderr, "Error loading annotations: %v\n", err) + os.Exit(1) + } + + // Generate index + index := generateIndex(annotations) + + // Write output + outputPath := filepath.Join(projectRoot, *outputDir, "sdk_docs_index.json") + if err := writeIndex(index, outputPath); err != nil { + fmt.Fprintf(os.Stderr, "Error writing index: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Generated SDK docs index: %s\n", outputPath) + fmt.Printf(" Services: %d\n", len(index.Services)) + fmt.Printf(" Types: %d\n", len(index.Types)) + fmt.Printf(" Enums: %d\n", len(index.Enums)) +} + +func findProjectRoot() (string, error) { + dir, err := os.Getwd() + if err != nil { + return "", err + } + + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return dir, nil + } + parent := filepath.Dir(dir) + if parent == dir { + return "", fmt.Errorf("could not find project root (go.mod)") + } + dir = parent + } +} + +func loadAnnotations(path string) (AnnotationsFile, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read annotations file: %w", err) + } + + var annotations AnnotationsFile + if err := yaml.Unmarshal(data, &annotations); err != nil { + return nil, fmt.Errorf("failed to parse annotations: %w", err) + } + + return annotations, nil +} + +func generateIndex(annotations AnnotationsFile) *SDKDocsIndex { + index := &SDKDocsIndex{ + Version: "1.0", + GeneratedAt: time.Now().UTC().Format(time.RFC3339), + Services: make(map[string]*ServiceDoc), + Types: make(map[string]*TypeDoc), + Enums: make(map[string]*EnumDoc), + } + + // Extract types from annotations + for fullTypeName, fields := range annotations { + typeName := extractTypeName(fullTypeName) + packageName := extractPackageName(fullTypeName) + + if typeName == "" { + continue + } + + typeDoc := &TypeDoc{ + Name: typeName, + Package: packageName, + Description: inferTypeDescription(typeName), + Fields: make(map[string]*FieldDoc), + } + + for fieldName, annotation := range fields { + if fieldName == "_" { + // Type-level description + if annotation.Description != "" { + typeDoc.Description = annotation.Description + } + continue + } + + fieldDoc := &FieldDoc{ + Name: fieldName, + Type: inferFieldType(fieldName), + Description: annotation.Description, + OutputOnly: annotation.OutputOnly == "true", + Deprecated: annotation.DeprecationMessage != "", + } + typeDoc.Fields[fieldName] = fieldDoc + } + + // Determine the service this type belongs to + service := inferServiceFromPackage(packageName) + typePath := service + "." + typeName + index.Types[typePath] = typeDoc + } + + // Add well-known services with common methods + addCoreServices(index) + + return index +} + +func extractTypeName(fullPath string) string { + // Extract type name from paths like "github.com/databricks/cli/bundle/config/resources.Alert" + parts := strings.Split(fullPath, ".") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return "" +} + +func extractPackageName(fullPath string) string { + // Extract package from paths like "github.com/databricks/cli/bundle/config/resources.Alert" + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + lastPart := parts[len(parts)-1] + if idx := strings.Index(lastPart, "."); idx > 0 { + return lastPart[:idx] + } + return lastPart + } + return "" +} + +func inferServiceFromPackage(packageName string) string { + // Map package names to service names + serviceMap := map[string]string{ + "resources": "bundle", + "jobs": "jobs", + "clusters": "compute", + "compute": "compute", + "pipelines": "pipelines", + "catalog": "catalog", + "sql": "sql", + "apps": "apps", + "serving": "serving", + "ml": "ml", + "workspace": "workspace", + "iam": "iam", + "settings": "settings", + "files": "files", + "sharing": "sharing", + } + + if service, ok := serviceMap[packageName]; ok { + return service + } + return packageName +} + +func inferTypeDescription(typeName string) string { + // Generate reasonable descriptions based on type name patterns + if strings.HasSuffix(typeName, "Request") { + base := strings.TrimSuffix(typeName, "Request") + return fmt.Sprintf("Request parameters for %s operation.", toSentenceCase(base)) + } + if strings.HasSuffix(typeName, "Response") { + base := strings.TrimSuffix(typeName, "Response") + return fmt.Sprintf("Response from %s operation.", toSentenceCase(base)) + } + if strings.HasSuffix(typeName, "Settings") { + base := strings.TrimSuffix(typeName, "Settings") + return fmt.Sprintf("Configuration settings for %s.", toSentenceCase(base)) + } + if strings.HasSuffix(typeName, "Spec") { + base := strings.TrimSuffix(typeName, "Spec") + return fmt.Sprintf("Specification for %s.", toSentenceCase(base)) + } + return fmt.Sprintf("%s configuration.", toSentenceCase(typeName)) +} + +func inferFieldType(fieldName string) string { + // Infer type from common field name patterns + patterns := map[*regexp.Regexp]string{ + regexp.MustCompile(`(?i)_id$`): "string", + regexp.MustCompile(`(?i)_ids$`): "[]string", + regexp.MustCompile(`(?i)_time$`): "string (timestamp)", + regexp.MustCompile(`(?i)_at$`): "string (timestamp)", + regexp.MustCompile(`(?i)^is_`): "bool", + regexp.MustCompile(`(?i)^has_`): "bool", + regexp.MustCompile(`(?i)^enable`): "bool", + regexp.MustCompile(`(?i)_enabled$`): "bool", + regexp.MustCompile(`(?i)_count$`): "int", + regexp.MustCompile(`(?i)_size$`): "int", + regexp.MustCompile(`(?i)_minutes$`): "int", + regexp.MustCompile(`(?i)_seconds$`): "int", + regexp.MustCompile(`(?i)_name$`): "string", + regexp.MustCompile(`(?i)_path$`): "string", + regexp.MustCompile(`(?i)_url$`): "string", + regexp.MustCompile(`(?i)description`): "string", + regexp.MustCompile(`(?i)tags$`): "map[string]string", + } + + for pattern, typeName := range patterns { + if pattern.MatchString(fieldName) { + return typeName + } + } + + return "any" +} + +func toSentenceCase(s string) string { + // Convert CamelCase to sentence case + var result strings.Builder + for i, r := range s { + if i > 0 && r >= 'A' && r <= 'Z' { + result.WriteRune(' ') + } + result.WriteRune(r) + } + return strings.ToLower(result.String()) +} + +func addCoreServices(index *SDKDocsIndex) { + // Jobs service + index.Services["jobs"] = &ServiceDoc{ + Name: "Jobs", + Description: "The Jobs API allows you to create, edit, and delete jobs. Jobs are the primary unit of scheduled execution in Databricks.", + Package: "github.com/databricks/databricks-sdk-go/service/jobs", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Create a new job.", + Signature: "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "CreateJob", Description: "Job creation parameters including name, tasks, and schedule", Required: true}, + }, + Returns: &ReturnDoc{Type: "*CreateResponse", Description: "Contains the job_id of the created job"}, + Example: "resp, err := w.Jobs.Create(ctx, jobs.CreateJob{\n Name: \"my-job\",\n Tasks: []jobs.Task{{TaskKey: \"main\", ...}},\n})", + }, + "List": { + Name: "List", + Description: "Retrieves a list of jobs.", + Signature: "List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]", + Parameters: []ParamDoc{ + {Name: "request", Type: "ListJobsRequest", Description: "Filter and pagination parameters", Required: false}, + }, + Returns: &ReturnDoc{Type: "listing.Iterator[BaseJob]", Description: "Iterator over jobs matching the filter"}, + }, + "Get": { + Name: "Get", + Description: "Retrieves the details for a single job.", + Signature: "Get(ctx context.Context, request GetJobRequest) (*Job, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "GetJobRequest", Description: "Contains job_id to retrieve", Required: true}, + }, + Returns: &ReturnDoc{Type: "*Job", Description: "Full job details including settings and run history"}, + }, + "Delete": { + Name: "Delete", + Description: "Deletes a job.", + Signature: "Delete(ctx context.Context, request DeleteJob) error", + Parameters: []ParamDoc{ + {Name: "request", Type: "DeleteJob", Description: "Contains job_id to delete", Required: true}, + }, + }, + "RunNow": { + Name: "RunNow", + Description: "Triggers an immediate run of a job.", + Signature: "RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "RunNow", Description: "Job ID and optional parameters for the run", Required: true}, + }, + Returns: &ReturnDoc{Type: "*RunNowResponse", Description: "Contains run_id of the triggered run"}, + }, + }, + } + + // Clusters/Compute service + index.Services["compute"] = &ServiceDoc{ + Name: "Clusters", + Description: "The Clusters API allows you to create, start, edit, and terminate clusters. Clusters are managed cloud resources for running Spark workloads.", + Package: "github.com/databricks/databricks-sdk-go/service/compute", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Create a new Spark cluster.", + Signature: "Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "CreateCluster", Description: "Cluster configuration including node types, autoscaling, and Spark version", Required: true}, + }, + Returns: &ReturnDoc{Type: "*CreateClusterResponse", Description: "Contains cluster_id of the created cluster"}, + }, + "List": { + Name: "List", + Description: "Returns information about all clusters.", + Signature: "List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails]", + Returns: &ReturnDoc{Type: "listing.Iterator[ClusterDetails]", Description: "Iterator over cluster details"}, + }, + "Get": { + Name: "Get", + Description: "Retrieves the information for a cluster given its identifier.", + Signature: "Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "GetClusterRequest", Description: "Contains cluster_id", Required: true}, + }, + Returns: &ReturnDoc{Type: "*ClusterDetails", Description: "Full cluster configuration and state"}, + }, + "Start": { + Name: "Start", + Description: "Starts a terminated cluster.", + Signature: "Start(ctx context.Context, request StartCluster) error", + Parameters: []ParamDoc{ + {Name: "request", Type: "StartCluster", Description: "Contains cluster_id to start", Required: true}, + }, + }, + "Delete": { + Name: "Delete", + Description: "Permanently deletes a Spark cluster.", + Signature: "Delete(ctx context.Context, request DeleteCluster) error", + Parameters: []ParamDoc{ + {Name: "request", Type: "DeleteCluster", Description: "Contains cluster_id to delete", Required: true}, + }, + }, + }, + } + + // Pipelines service + index.Services["pipelines"] = &ServiceDoc{ + Name: "Pipelines", + Description: "The Delta Live Tables API allows you to create, edit, and run pipelines for data transformation and ingestion.", + Package: "github.com/databricks/databricks-sdk-go/service/pipelines", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Creates a new data processing pipeline.", + Signature: "Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "CreatePipeline", Description: "Pipeline configuration including clusters, libraries, and target", Required: true}, + }, + Returns: &ReturnDoc{Type: "*CreatePipelineResponse", Description: "Contains pipeline_id of the created pipeline"}, + }, + "List": { + Name: "List", + Description: "Lists pipelines defined in the workspace.", + Signature: "List(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]", + Returns: &ReturnDoc{Type: "listing.Iterator[PipelineStateInfo]", Description: "Iterator over pipeline info"}, + }, + "StartUpdate": { + Name: "StartUpdate", + Description: "Starts a new update for the pipeline.", + Signature: "StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "StartUpdate", Description: "Pipeline ID and update options", Required: true}, + }, + Returns: &ReturnDoc{Type: "*StartUpdateResponse", Description: "Contains update_id of the started update"}, + }, + }, + } + + // Catalog service + index.Services["catalog"] = &ServiceDoc{ + Name: "Catalog", + Description: "Unity Catalog APIs for managing catalogs, schemas, tables, and other data assets.", + Package: "github.com/databricks/databricks-sdk-go/service/catalog", + Methods: map[string]*MethodDoc{ + "ListCatalogs": { + Name: "ListCatalogs", + Description: "Lists all catalogs in the metastore.", + Signature: "List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo]", + Returns: &ReturnDoc{Type: "listing.Iterator[CatalogInfo]", Description: "Iterator over catalog information"}, + }, + "ListSchemas": { + Name: "ListSchemas", + Description: "Lists all schemas in a catalog.", + Signature: "List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo]", + Parameters: []ParamDoc{ + {Name: "request", Type: "ListSchemasRequest", Description: "Contains catalog_name to list schemas from", Required: true}, + }, + Returns: &ReturnDoc{Type: "listing.Iterator[SchemaInfo]", Description: "Iterator over schema information"}, + }, + "ListTables": { + Name: "ListTables", + Description: "Lists all tables in a schema.", + Signature: "List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo]", + Parameters: []ParamDoc{ + {Name: "request", Type: "ListTablesRequest", Description: "Contains catalog_name and schema_name", Required: true}, + }, + Returns: &ReturnDoc{Type: "listing.Iterator[TableInfo]", Description: "Iterator over table information"}, + }, + }, + } + + // Apps service + index.Services["apps"] = &ServiceDoc{ + Name: "Apps", + Description: "Databricks Apps API for deploying and managing web applications on Databricks.", + Package: "github.com/databricks/databricks-sdk-go/service/apps", + Methods: map[string]*MethodDoc{ + "Create": { + Name: "Create", + Description: "Creates a new app.", + Signature: "Create(ctx context.Context, request CreateAppRequest) (*App, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "CreateAppRequest", Description: "App configuration including name and description", Required: true}, + }, + Returns: &ReturnDoc{Type: "*App", Description: "The created app details"}, + }, + "Deploy": { + Name: "Deploy", + Description: "Deploys an app to Databricks Apps.", + Signature: "Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "CreateAppDeploymentRequest", Description: "Deployment configuration", Required: true}, + }, + Returns: &ReturnDoc{Type: "*AppDeployment", Description: "Deployment status and details"}, + }, + "List": { + Name: "List", + Description: "Lists all apps in the workspace.", + Signature: "List(ctx context.Context, request ListAppsRequest) listing.Iterator[App]", + Returns: &ReturnDoc{Type: "listing.Iterator[App]", Description: "Iterator over apps"}, + }, + }, + } + + // SQL service + index.Services["sql"] = &ServiceDoc{ + Name: "SQL", + Description: "Databricks SQL APIs for managing warehouses, queries, and dashboards.", + Package: "github.com/databricks/databricks-sdk-go/service/sql", + Methods: map[string]*MethodDoc{ + "ExecuteStatement": { + Name: "ExecuteStatement", + Description: "Execute a SQL statement and return results.", + Signature: "ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "ExecuteStatementRequest", Description: "SQL statement, warehouse ID, and execution options", Required: true}, + }, + Returns: &ReturnDoc{Type: "*ExecuteStatementResponse", Description: "Query results or statement ID for async execution"}, + }, + "ListWarehouses": { + Name: "ListWarehouses", + Description: "Lists all SQL warehouses.", + Signature: "List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo]", + Returns: &ReturnDoc{Type: "listing.Iterator[EndpointInfo]", Description: "Iterator over warehouse information"}, + }, + }, + } + + // Workspace service + index.Services["workspace"] = &ServiceDoc{ + Name: "Workspace", + Description: "Workspace API for managing notebooks, folders, and other workspace objects.", + Package: "github.com/databricks/databricks-sdk-go/service/workspace", + Methods: map[string]*MethodDoc{ + "List": { + Name: "List", + Description: "Lists the contents of a directory.", + Signature: "List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo]", + Parameters: []ParamDoc{ + {Name: "request", Type: "ListWorkspaceRequest", Description: "Contains path to list", Required: true}, + }, + Returns: &ReturnDoc{Type: "listing.Iterator[ObjectInfo]", Description: "Iterator over workspace objects"}, + }, + "GetStatus": { + Name: "GetStatus", + Description: "Gets the status of a workspace object.", + Signature: "GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error)", + Parameters: []ParamDoc{ + {Name: "request", Type: "GetStatusRequest", Description: "Contains path to get status for", Required: true}, + }, + Returns: &ReturnDoc{Type: "*ObjectInfo", Description: "Object information including type and path"}, + }, + "Import": { + Name: "Import", + Description: "Imports a notebook or file into the workspace.", + Signature: "Import(ctx context.Context, request Import) error", + Parameters: []ParamDoc{ + {Name: "request", Type: "Import", Description: "Path, content, and format of the object to import", Required: true}, + }, + }, + }, + } + + // Add some common enums + index.Enums["jobs.RunLifeCycleState"] = &EnumDoc{ + Name: "RunLifeCycleState", + Package: "jobs", + Description: "The current state of the run lifecycle.", + Values: []string{"PENDING", "RUNNING", "TERMINATING", "TERMINATED", "SKIPPED", "INTERNAL_ERROR"}, + } + + index.Enums["compute.State"] = &EnumDoc{ + Name: "State", + Package: "compute", + Description: "The state of a cluster.", + Values: []string{"PENDING", "RUNNING", "RESTARTING", "RESIZING", "TERMINATING", "TERMINATED", "ERROR", "UNKNOWN"}, + } + + index.Enums["pipelines.PipelineState"] = &EnumDoc{ + Name: "PipelineState", + Package: "pipelines", + Description: "The state of a pipeline.", + Values: []string{"IDLE", "RUNNING", "STARTING", "STOPPING", "DELETED", "RECOVERING", "FAILED", "RESETTING"}, + } +} + +func writeIndex(index *SDKDocsIndex, path string) error { + // Ensure directory exists + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Sort maps for deterministic output + sortIndex(index) + + // Marshal with indentation for readability + data, err := json.MarshalIndent(index, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal index: %w", err) + } + + if err := os.WriteFile(path, data, 0644); err != nil { + return fmt.Errorf("failed to write index file: %w", err) + } + + return nil +} + +func sortIndex(index *SDKDocsIndex) { + // Sort service methods + for _, service := range index.Services { + // Methods are already in a map, which will be sorted by JSON marshaling + _ = service + } + + // Sort type fields + for _, typeDoc := range index.Types { + // Sort fields by converting to sorted slice would require changing structure + // For now, rely on JSON marshaling order + _ = typeDoc + } + + // Sort enum values + for _, enumDoc := range index.Enums { + sort.Strings(enumDoc.Values) + } +} From 142b8c92b17fea698adbc4e008a426dcae0309b5 Mon Sep 17 00:00:00 2001 From: Evgenii Kniazev Date: Mon, 12 Jan 2026 15:56:00 +0000 Subject: [PATCH 2/4] Add Claude Code skill for SDK documentation queries Adds a skill that helps Claude Code users discover and use the databricks_query_sdk_docs MCP tool effectively when asking about SDK methods, types, and parameters. Co-Authored-By: Claude Opus 4.5 --- .claude/skills/sdk-docs/SKILL.md | 62 +++++++++++ .claude/skills/sdk-docs/query-sdk-docs.sh | 129 ++++++++++++++++++++++ 2 files changed, 191 insertions(+) create mode 100644 .claude/skills/sdk-docs/SKILL.md create mode 100755 .claude/skills/sdk-docs/query-sdk-docs.sh diff --git a/.claude/skills/sdk-docs/SKILL.md b/.claude/skills/sdk-docs/SKILL.md new file mode 100644 index 0000000000..287055816a --- /dev/null +++ b/.claude/skills/sdk-docs/SKILL.md @@ -0,0 +1,62 @@ +--- +name: databricks-sdk-docs +description: Use this skill when the user asks about Databricks SDK methods, API signatures, parameter types, return types, or how to use specific Databricks APIs programmatically. Triggers on questions like "how do I create a job", "what parameters does X take", "SDK method for Y", or "JobSettings fields". +allowed-tools: mcp__databricks-mcp__databricks_query_sdk_docs +--- + +# Databricks SDK Documentation Skill + +When users ask about Databricks SDK usage, API methods, or type definitions, use the `databricks_query_sdk_docs` MCP tool to find accurate documentation. + +## When to Use This Skill + +- User asks "how do I create a job/cluster/pipeline using the SDK?" +- User needs method signatures: "what's the signature for Jobs.Create?" +- User asks about type fields: "what fields does CreateJob have?" +- User needs enum values: "what are the possible run lifecycle states?" +- User is confused about SDK API parameters or return types + +## How to Query + +Use the `databricks_query_sdk_docs` tool with these parameters: + +```json +{ + "query": "search terms", + "category": "methods|types|enums|services", // optional filter + "service": "jobs|clusters|pipelines|...", // optional filter + "limit": 10 // default 10, max 50 +} +``` + +## Example Queries + +| User Question | Tool Query | +|---------------|------------| +| "How do I create a job?" | `{"query": "create job", "category": "methods"}` | +| "What fields does JobSettings have?" | `{"query": "JobSettings", "category": "types"}` | +| "What are the run states?" | `{"query": "run lifecycle state", "category": "enums"}` | +| "List all jobs API methods" | `{"query": "jobs", "service": "jobs", "category": "methods"}` | + +## Response Guidelines + +After querying, provide: +1. The method signature with parameter types +2. A brief description of what the method does +3. Key parameters the user likely needs +4. A simple code example if applicable + +Keep responses focused on what the user asked - don't dump all documentation. + +## CLI Fallback + +If MCP is unavailable, use the helper script: + +```bash +# From the CLI repo root +.claude/skills/sdk-docs/query-sdk-docs.sh "create job" +.claude/skills/sdk-docs/query-sdk-docs.sh "JobSettings" types +.claude/skills/sdk-docs/query-sdk-docs.sh "list" methods jobs 20 +``` + +The script searches the embedded SDK docs index directly using `jq`. diff --git a/.claude/skills/sdk-docs/query-sdk-docs.sh b/.claude/skills/sdk-docs/query-sdk-docs.sh new file mode 100755 index 0000000000..dc36bb7d9d --- /dev/null +++ b/.claude/skills/sdk-docs/query-sdk-docs.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash +# +# Query Databricks SDK documentation from the command line. +# Usage: ./query-sdk-docs.sh [category] [service] [limit] +# +# Examples: +# ./query-sdk-docs.sh "create job" +# ./query-sdk-docs.sh "JobSettings" types +# ./query-sdk-docs.sh "list" methods jobs +# ./query-sdk-docs.sh "cluster" methods compute 20 +# +# Categories: methods, types, enums, services +# Services: jobs, clusters, pipelines, workspace, etc. + +set -euo pipefail + +QUERY="${1:-}" +CATEGORY="${2:-}" +SERVICE="${3:-}" +LIMIT="${4:-10}" + +if [[ -z "$QUERY" ]]; then + echo "Usage: $0 [category] [service] [limit]" + echo "" + echo "Examples:" + echo " $0 'create job' # Search for 'create job'" + echo " $0 'JobSettings' types # Search types for 'JobSettings'" + echo " $0 'list' methods jobs # Search jobs service methods for 'list'" + echo "" + echo "Categories: methods, types, enums, services" + exit 1 +fi + +# Build the JSON input for the MCP tool +build_json_input() { + local json="{\"query\": \"$QUERY\"" + + if [[ -n "$CATEGORY" ]]; then + json+=", \"category\": \"$CATEGORY\"" + fi + + if [[ -n "$SERVICE" ]]; then + json+=", \"service\": \"$SERVICE\"" + fi + + json+=", \"limit\": $LIMIT}" + echo "$json" +} + +# Try to find the SDK docs index file for direct search +SDK_DOCS_INDEX="${SDK_DOCS_INDEX:-}" +if [[ -z "$SDK_DOCS_INDEX" ]]; then + # Look for the index in common locations + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + CLI_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" + + POSSIBLE_PATHS=( + "$CLI_ROOT/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json" + "./sdk_docs_index.json" + ) + + for path in "${POSSIBLE_PATHS[@]}"; do + if [[ -f "$path" ]]; then + SDK_DOCS_INDEX="$path" + break + fi + done +fi + +# If we have jq and the index file, do a direct search +if command -v jq &>/dev/null && [[ -n "$SDK_DOCS_INDEX" && -f "$SDK_DOCS_INDEX" ]]; then + echo "Searching SDK docs for: $QUERY" + echo "---" + + QUERY_LOWER=$(echo "$QUERY" | tr '[:upper:]' '[:lower:]') + + # Search methods + if [[ -z "$CATEGORY" || "$CATEGORY" == "methods" ]]; then + echo "" + echo "## Methods" + jq -r --arg q "$QUERY_LOWER" --arg svc "$SERVICE" ' + .services | to_entries[] | + select($svc == "" or .key == $svc) | + .key as $service | + .value.methods // {} | to_entries[] | + select( + (.key | ascii_downcase | contains($q)) or + (.value.description // "" | ascii_downcase | contains($q)) + ) | + "- \($service).\(.key): \(.value.description // "No description")[signature: \(.value.signature // "N/A")]" + ' "$SDK_DOCS_INDEX" 2>/dev/null | head -n "$LIMIT" || echo " (no matches)" + fi + + # Search types + if [[ -z "$CATEGORY" || "$CATEGORY" == "types" ]]; then + echo "" + echo "## Types" + jq -r --arg q "$QUERY_LOWER" ' + .types // {} | to_entries[] | + select( + (.key | ascii_downcase | contains($q)) or + (.value.description // "" | ascii_downcase | contains($q)) + ) | + "- \(.key): \(.value.description // "No description")" + ' "$SDK_DOCS_INDEX" 2>/dev/null | head -n "$LIMIT" || echo " (no matches)" + fi + + # Search enums + if [[ -z "$CATEGORY" || "$CATEGORY" == "enums" ]]; then + echo "" + echo "## Enums" + jq -r --arg q "$QUERY_LOWER" ' + .enums // {} | to_entries[] | + select( + (.key | ascii_downcase | contains($q)) or + (.value.description // "" | ascii_downcase | contains($q)) + ) | + "- \(.key): \(.value.values // [] | join(", "))" + ' "$SDK_DOCS_INDEX" 2>/dev/null | head -n "$LIMIT" || echo " (no matches)" + fi +else + # Fallback: show how to use the MCP tool + echo "SDK docs index not found locally. Use the MCP tool instead:" + echo "" + echo "databricks_query_sdk_docs with input:" + build_json_input + echo "" + echo "Or set SDK_DOCS_INDEX environment variable to point to sdk_docs_index.json" +fi From 3a98348f5cc85d761eded3fb460cee6b402543d4 Mon Sep 17 00:00:00 2001 From: Evgenii Kniazev Date: Tue, 20 Jan 2026 11:59:28 +0000 Subject: [PATCH 3/4] Auto-generate SDK docs index from Go SDK using go/ast This commit enhances the SDK documentation generator to parse the actual Go SDK source code instead of using hardcoded service definitions. Changes: - Rewrote tools/gen_sdk_docs_index.go to parse SDK using go/ast - Extracts service interfaces, method signatures, and descriptions - Parses struct types and enums automatically - Added tools/verify_sdk_docs_index.py for CI staleness check - Added Makefile targets: sdk-docs-index and verify-sdk-docs-index Results: - Previous: 7 services, 277 types, 3 enums (mostly hardcoded) - Now: 11 services, 1302 types, 263 enums (auto-generated) Usage: - make sdk-docs-index # Regenerate index - make verify-sdk-docs-index # Check if index is up to date Co-Authored-By: Claude Opus 4.5 --- Makefile | 8 +- .../lib/providers/sdkdocs/sdk_docs_index.json | 50727 ++++++++++++++-- tools/gen_sdk_docs_index.go | 901 +- tools/verify_sdk_docs_index.py | 105 + 4 files changed, 45519 insertions(+), 6222 deletions(-) create mode 100755 tools/verify_sdk_docs_index.py diff --git a/Makefile b/Makefile index 0a57cee7d0..12036f7902 100644 --- a/Makefile +++ b/Makefile @@ -133,6 +133,12 @@ snapshot-release: schema: go run ./bundle/internal/schema ./bundle/internal/schema ./bundle/schema/jsonschema.json +sdk-docs-index: + go run ./tools/gen_sdk_docs_index.go + +verify-sdk-docs-index: + python3 ./tools/verify_sdk_docs_index.py + docs: go run ./bundle/docsgen ./bundle/internal/schema ./bundle/docsgen @@ -171,7 +177,7 @@ generate: $(GENKIT_BINARY) update-sdk -.PHONY: lint lintfull tidy lintcheck fmt fmtfull test test-unit test-acc test-slow test-slow-unit test-slow-acc cover showcover build snapshot snapshot-release schema integration integration-short acc-cover acc-showcover docs ws wsfix links checks test-update test-update-templates generate-out-test-toml test-update-aws test-update-all generate-validation +.PHONY: lint lintfull tidy lintcheck fmt fmtfull test test-unit test-acc test-slow test-slow-unit test-slow-acc cover showcover build snapshot snapshot-release schema sdk-docs-index verify-sdk-docs-index integration integration-short acc-cover acc-showcover docs ws wsfix links checks test-update test-update-templates generate-out-test-toml test-update-aws test-update-all generate-validation test-exp-aitools: make test TEST_PACKAGES="./experimental/aitools/..." ACCEPTANCE_TEST_FILTER="TestAccept/idontexistyet/aitools" diff --git a/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json b/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json index 140559415c..27486ec34e 100644 --- a/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json +++ b/experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json @@ -1,10 +1,11 @@ { "version": "1.0", - "generated_at": "2026-01-12T14:13:07Z", + "generated_at": "2026-01-20T09:44:28Z", + "sdk_version": "v0.96.0", "services": { "apps": { "name": "Apps", - "description": "Databricks Apps API for deploying and managing web applications on Databricks.", + "description": "Apps run directly on a customer’s Databricks instance, integrate with their\ndata, use and extend Databricks services, and enable users to interact\nthrough single sign-on.", "package": "github.com/databricks/databricks-sdk-go/service/apps", "methods": { "Create": { @@ -15,2997 +16,3763 @@ { "name": "request", "type": "CreateAppRequest", - "description": "App configuration including name and description", + "description": "", "required": true } ], "returns": { "type": "*App", - "description": "The created app details" + "description": "" + } + }, + "CreateUpdate": { + "name": "CreateUpdate", + "description": "Creates an app update and starts the update process. The update process\nis asynchronous and the status of the update can be checked with the\nGetAppUpdate method.", + "signature": "CreateUpdate(ctx context.Context, request AsyncUpdateAppRequest) (*AppUpdate, error)", + "parameters": [ + { + "name": "request", + "type": "AsyncUpdateAppRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*AppUpdate", + "description": "" + } + }, + "Delete": { + "name": "Delete", + "description": "Deletes an app.", + "signature": "Delete(ctx context.Context, request DeleteAppRequest) (*App, error)", + "parameters": [ + { + "name": "request", + "type": "DeleteAppRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*App", + "description": "" } }, "Deploy": { "name": "Deploy", - "description": "Deploys an app to Databricks Apps.", + "description": "Creates an app deployment for the app with the supplied name.", "signature": "Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error)", "parameters": [ { "name": "request", "type": "CreateAppDeploymentRequest", - "description": "Deployment configuration", + "description": "", + "required": true + } + ], + "returns": { + "type": "*AppDeployment", + "description": "" + } + }, + "Get": { + "name": "Get", + "description": "Retrieves information for the app with the supplied name.", + "signature": "Get(ctx context.Context, request GetAppRequest) (*App, error)", + "parameters": [ + { + "name": "request", + "type": "GetAppRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*App", + "description": "" + } + }, + "GetDeployment": { + "name": "GetDeployment", + "description": "Retrieves information for the app deployment with the supplied name and\ndeployment id.", + "signature": "GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error)", + "parameters": [ + { + "name": "request", + "type": "GetAppDeploymentRequest", + "description": "", "required": true } ], "returns": { "type": "*AppDeployment", - "description": "Deployment status and details" + "description": "" + } + }, + "GetPermissionLevels": { + "name": "GetPermissionLevels", + "description": "Gets the permission levels that a user can have on an object.", + "signature": "GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetAppPermissionLevelsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetAppPermissionLevelsResponse", + "description": "" + } + }, + "GetPermissions": { + "name": "GetPermissions", + "description": "Gets the permissions of an app. Apps can inherit permissions from their\nroot object.", + "signature": "GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "GetAppPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*AppPermissions", + "description": "" + } + }, + "GetUpdate": { + "name": "GetUpdate", + "description": "Gets the status of an app update.", + "signature": "GetUpdate(ctx context.Context, request GetAppUpdateRequest) (*AppUpdate, error)", + "parameters": [ + { + "name": "request", + "type": "GetAppUpdateRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*AppUpdate", + "description": "" } }, "List": { "name": "List", "description": "Lists all apps in the workspace.", - "signature": "List(ctx context.Context, request ListAppsRequest) listing.Iterator[App]", - "parameters": null, + "signature": "List(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListAppsRequest", + "description": "", + "required": true + } + ], "returns": { - "type": "listing.Iterator[App]", - "description": "Iterator over apps" + "type": "*ListAppsResponse", + "description": "" } - } - } - }, - "catalog": { - "name": "Catalog", - "description": "Unity Catalog APIs for managing catalogs, schemas, tables, and other data assets.", - "package": "github.com/databricks/databricks-sdk-go/service/catalog", - "methods": { - "ListCatalogs": { - "name": "ListCatalogs", - "description": "Lists all catalogs in the metastore.", - "signature": "List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo]", - "parameters": null, + }, + "ListDeployments": { + "name": "ListDeployments", + "description": "Lists all app deployments for the app with the supplied name.", + "signature": "ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListAppDeploymentsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListAppDeploymentsResponse", + "description": "" + } + }, + "SetPermissions": { + "name": "SetPermissions", + "description": "Sets permissions on an object, replacing existing permissions if they\nexist. Deletes all direct permissions if none are specified. Objects can\ninherit permissions from their root object.", + "signature": "SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "AppPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*AppPermissions", + "description": "" + } + }, + "Start": { + "name": "Start", + "description": "Start the last active deployment of the app in the workspace.", + "signature": "Start(ctx context.Context, request StartAppRequest) (*App, error)", + "parameters": [ + { + "name": "request", + "type": "StartAppRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*App", + "description": "" + } + }, + "Stop": { + "name": "Stop", + "description": "Stops the active deployment of the app in the workspace.", + "signature": "Stop(ctx context.Context, request StopAppRequest) (*App, error)", + "parameters": [ + { + "name": "request", + "type": "StopAppRequest", + "description": "", + "required": true + } + ], "returns": { - "type": "listing.Iterator[CatalogInfo]", - "description": "Iterator over catalog information" + "type": "*App", + "description": "" } }, - "ListSchemas": { - "name": "ListSchemas", - "description": "Lists all schemas in a catalog.", - "signature": "List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo]", + "Update": { + "name": "Update", + "description": "Updates the app with the supplied name.", + "signature": "Update(ctx context.Context, request UpdateAppRequest) (*App, error)", "parameters": [ { "name": "request", - "type": "ListSchemasRequest", - "description": "Contains catalog_name to list schemas from", + "type": "UpdateAppRequest", + "description": "", "required": true } ], "returns": { - "type": "listing.Iterator[SchemaInfo]", - "description": "Iterator over schema information" + "type": "*App", + "description": "" } }, - "ListTables": { - "name": "ListTables", - "description": "Lists all tables in a schema.", - "signature": "List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo]", + "UpdatePermissions": { + "name": "UpdatePermissions", + "description": "Updates the permissions on an app. Apps can inherit permissions from\ntheir root object.", + "signature": "UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error)", "parameters": [ { "name": "request", - "type": "ListTablesRequest", - "description": "Contains catalog_name and schema_name", + "type": "AppPermissionsRequest", + "description": "", "required": true } ], "returns": { - "type": "listing.Iterator[TableInfo]", - "description": "Iterator over table information" + "type": "*AppPermissions", + "description": "" } } } }, - "compute": { - "name": "Clusters", - "description": "The Clusters API allows you to create, start, edit, and terminate clusters. Clusters are managed cloud resources for running Spark workloads.", - "package": "github.com/databricks/databricks-sdk-go/service/compute", + "catalog": { + "name": "AccountMetastoreAssignments", + "description": "These APIs manage metastore assignments to a workspace.", + "package": "github.com/databricks/databricks-sdk-go/service/catalog", "methods": { "Create": { "name": "Create", - "description": "Create a new Spark cluster.", - "signature": "Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)", + "description": "Creates an assignment to a metastore for a workspace", + "signature": "Create(ctx context.Context, request AccountsCreateMetastoreAssignment) (*AccountsCreateMetastoreAssignmentResponse, error)", "parameters": [ { "name": "request", - "type": "CreateCluster", - "description": "Cluster configuration including node types, autoscaling, and Spark version", + "type": "AccountsCreateMetastoreAssignment", + "description": "", "required": true } ], "returns": { - "type": "*CreateClusterResponse", - "description": "Contains cluster_id of the created cluster" + "type": "*AccountsCreateMetastoreAssignmentResponse", + "description": "" } }, "Delete": { "name": "Delete", - "description": "Permanently deletes a Spark cluster.", - "signature": "Delete(ctx context.Context, request DeleteCluster) error", + "description": "Deletes a metastore assignment to a workspace, leaving the workspace with\nno metastore.", + "signature": "Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) (*AccountsDeleteMetastoreAssignmentResponse, error)", "parameters": [ { "name": "request", - "type": "DeleteCluster", - "description": "Contains cluster_id to delete", + "type": "DeleteAccountMetastoreAssignmentRequest", + "description": "", "required": true } - ] + ], + "returns": { + "type": "*AccountsDeleteMetastoreAssignmentResponse", + "description": "" + } }, "Get": { "name": "Get", - "description": "Retrieves the information for a cluster given its identifier.", - "signature": "Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)", + "description": "Gets the metastore assignment, if any, for the workspace specified by ID.\nIf the workspace is assigned a metastore, the mapping will be returned.\nIf no metastore is assigned to the workspace, the assignment will not be\nfound and a 404 returned.", + "signature": "Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error)", "parameters": [ { "name": "request", - "type": "GetClusterRequest", - "description": "Contains cluster_id", + "type": "GetAccountMetastoreAssignmentRequest", + "description": "", "required": true } ], "returns": { - "type": "*ClusterDetails", - "description": "Full cluster configuration and state" + "type": "*AccountsMetastoreAssignment", + "description": "" } }, "List": { "name": "List", - "description": "Returns information about all clusters.", - "signature": "List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails]", - "parameters": null, + "description": "Gets a list of all Databricks workspace IDs that have been assigned to\ngiven metastore.", + "signature": "List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListAccountMetastoreAssignmentsRequest", + "description": "", + "required": true + } + ], "returns": { - "type": "listing.Iterator[ClusterDetails]", - "description": "Iterator over cluster details" + "type": "*ListAccountMetastoreAssignmentsResponse", + "description": "" } }, - "Start": { - "name": "Start", - "description": "Starts a terminated cluster.", - "signature": "Start(ctx context.Context, request StartCluster) error", + "Update": { + "name": "Update", + "description": "Updates an assignment to a metastore for a workspace. Currently, only the\ndefault catalog may be updated.", + "signature": "Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) (*AccountsUpdateMetastoreAssignmentResponse, error)", "parameters": [ { "name": "request", - "type": "StartCluster", - "description": "Contains cluster_id to start", + "type": "AccountsUpdateMetastoreAssignment", + "description": "", "required": true } - ] + ], + "returns": { + "type": "*AccountsUpdateMetastoreAssignmentResponse", + "description": "" + } } } }, - "jobs": { - "name": "Jobs", - "description": "The Jobs API allows you to create, edit, and delete jobs. Jobs are the primary unit of scheduled execution in Databricks.", - "package": "github.com/databricks/databricks-sdk-go/service/jobs", + "compute": { + "name": "ClusterPolicies", + "description": "You can use cluster policies to control users' ability to configure clusters\nbased on a set of rules. These rules specify which attributes or attribute\nvalues can be used during cluster creation. Cluster policies have ACLs that\nlimit their use to specific users and groups.\n\nWith cluster policies, you can: - Auto-install cluster libraries on the next\nrestart by listing them in the policy's \"libraries\" field (Public Preview). -\nLimit users to creating clusters with the prescribed settings. - Si...", + "package": "github.com/databricks/databricks-sdk-go/service/compute", "methods": { "Create": { "name": "Create", - "description": "Create a new job.", - "signature": "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", + "description": "Creates a new policy with prescribed settings.", + "signature": "Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error)", "parameters": [ { "name": "request", - "type": "CreateJob", - "description": "Job creation parameters including name, tasks, and schedule", + "type": "CreatePolicy", + "description": "", "required": true } ], "returns": { - "type": "*CreateResponse", - "description": "Contains the job_id of the created job" - }, - "example": "resp, err := w.Jobs.Create(ctx, jobs.CreateJob{\n Name: \"my-job\",\n Tasks: []jobs.Task{{TaskKey: \"main\", ...}},\n})" + "type": "*CreatePolicyResponse", + "description": "" + } }, "Delete": { "name": "Delete", - "description": "Deletes a job.", - "signature": "Delete(ctx context.Context, request DeleteJob) error", + "description": "Delete a policy for a cluster. Clusters governed by this policy can still\nrun, but cannot be edited.", + "signature": "Delete(ctx context.Context, request DeletePolicy) error", "parameters": [ { "name": "request", - "type": "DeleteJob", - "description": "Contains job_id to delete", + "type": "DeletePolicy", + "description": "", + "required": true + } + ] + }, + "Edit": { + "name": "Edit", + "description": "Update an existing policy for cluster. This operation may make some\nclusters governed by the previous policy invalid.", + "signature": "Edit(ctx context.Context, request EditPolicy) error", + "parameters": [ + { + "name": "request", + "type": "EditPolicy", + "description": "", "required": true } ] }, "Get": { "name": "Get", - "description": "Retrieves the details for a single job.", - "signature": "Get(ctx context.Context, request GetJobRequest) (*Job, error)", + "description": "Get a cluster policy entity. Creation and editing is available to admins\nonly.", + "signature": "Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error)", "parameters": [ { "name": "request", - "type": "GetJobRequest", - "description": "Contains job_id to retrieve", + "type": "GetClusterPolicyRequest", + "description": "", "required": true } ], "returns": { - "type": "*Job", - "description": "Full job details including settings and run history" + "type": "*Policy", + "description": "" } }, - "List": { - "name": "List", - "description": "Retrieves a list of jobs.", - "signature": "List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]", + "GetPermissionLevels": { + "name": "GetPermissionLevels", + "description": "Gets the permission levels that a user can have on an object.", + "signature": "GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error)", "parameters": [ { "name": "request", - "type": "ListJobsRequest", - "description": "Filter and pagination parameters", - "required": false + "type": "GetClusterPolicyPermissionLevelsRequest", + "description": "", + "required": true } ], "returns": { - "type": "listing.Iterator[BaseJob]", - "description": "Iterator over jobs matching the filter" + "type": "*GetClusterPolicyPermissionLevelsResponse", + "description": "" } }, - "RunNow": { - "name": "RunNow", - "description": "Triggers an immediate run of a job.", - "signature": "RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)", + "GetPermissions": { + "name": "GetPermissions", + "description": "Gets the permissions of a cluster policy. Cluster policies can inherit\npermissions from their root object.", + "signature": "GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)", "parameters": [ { "name": "request", - "type": "RunNow", - "description": "Job ID and optional parameters for the run", + "type": "GetClusterPolicyPermissionsRequest", + "description": "", "required": true } ], "returns": { - "type": "*RunNowResponse", - "description": "Contains run_id of the triggered run" + "type": "*ClusterPolicyPermissions", + "description": "" } - } - } - }, - "pipelines": { - "name": "Pipelines", - "description": "The Delta Live Tables API allows you to create, edit, and run pipelines for data transformation and ingestion.", - "package": "github.com/databricks/databricks-sdk-go/service/pipelines", - "methods": { - "Create": { - "name": "Create", - "description": "Creates a new data processing pipeline.", - "signature": "Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)", + }, + "List": { + "name": "List", + "description": "Returns a list of policies accessible by the requesting user.", + "signature": "List(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error)", "parameters": [ { "name": "request", - "type": "CreatePipeline", - "description": "Pipeline configuration including clusters, libraries, and target", + "type": "ListClusterPoliciesRequest", + "description": "", "required": true } ], "returns": { - "type": "*CreatePipelineResponse", - "description": "Contains pipeline_id of the created pipeline" + "type": "*ListPoliciesResponse", + "description": "" } }, - "List": { - "name": "List", - "description": "Lists pipelines defined in the workspace.", - "signature": "List(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]", - "parameters": null, + "SetPermissions": { + "name": "SetPermissions", + "description": "Sets permissions on an object, replacing existing permissions if they\nexist. Deletes all direct permissions if none are specified. Objects can\ninherit permissions from their root object.", + "signature": "SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "ClusterPolicyPermissionsRequest", + "description": "", + "required": true + } + ], "returns": { - "type": "listing.Iterator[PipelineStateInfo]", - "description": "Iterator over pipeline info" + "type": "*ClusterPolicyPermissions", + "description": "" } }, - "StartUpdate": { - "name": "StartUpdate", - "description": "Starts a new update for the pipeline.", - "signature": "StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)", + "UpdatePermissions": { + "name": "UpdatePermissions", + "description": "Updates the permissions on a cluster policy. Cluster policies can inherit\npermissions from their root object.", + "signature": "UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)", "parameters": [ { "name": "request", - "type": "StartUpdate", - "description": "Pipeline ID and update options", + "type": "ClusterPolicyPermissionsRequest", + "description": "", "required": true } ], "returns": { - "type": "*StartUpdateResponse", - "description": "Contains update_id of the started update" + "type": "*ClusterPolicyPermissions", + "description": "" } } } }, - "sql": { - "name": "SQL", - "description": "Databricks SQL APIs for managing warehouses, queries, and dashboards.", - "package": "github.com/databricks/databricks-sdk-go/service/sql", + "files": { + "name": "Dbfs", + "description": "DBFS API makes it simple to interact with various data sources without having\nto include a users credentials every time to read a file.", + "package": "github.com/databricks/databricks-sdk-go/service/files", "methods": { - "ExecuteStatement": { - "name": "ExecuteStatement", - "description": "Execute a SQL statement and return results.", - "signature": "ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)", + "AddBlock": { + "name": "AddBlock", + "description": "Appends a block of data to the stream specified by the input handle. If\nthe handle does not exist, this call will throw an exception with\n``RESOURCE_DOES_NOT_EXIST``.\n\nIf the block of data exceeds 1 MB, this call will throw an exception with\n``MAX_BLOCK_SIZE_EXCEEDED``.", + "signature": "AddBlock(ctx context.Context, request AddBlock) error", "parameters": [ { "name": "request", - "type": "ExecuteStatementRequest", - "description": "SQL statement, warehouse ID, and execution options", + "type": "AddBlock", + "description": "", "required": true } - ], - "returns": { - "type": "*ExecuteStatementResponse", - "description": "Query results or statement ID for async execution" - } + ] }, - "ListWarehouses": { - "name": "ListWarehouses", - "description": "Lists all SQL warehouses.", - "signature": "List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo]", - "parameters": null, - "returns": { - "type": "listing.Iterator[EndpointInfo]", - "description": "Iterator over warehouse information" - } - } - } - }, - "workspace": { - "name": "Workspace", - "description": "Workspace API for managing notebooks, folders, and other workspace objects.", - "package": "github.com/databricks/databricks-sdk-go/service/workspace", - "methods": { - "GetStatus": { - "name": "GetStatus", - "description": "Gets the status of a workspace object.", - "signature": "GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error)", + "Close": { + "name": "Close", + "description": "Closes the stream specified by the input handle. If the handle does not\nexist, this call throws an exception with ``RESOURCE_DOES_NOT_EXIST``.", + "signature": "Close(ctx context.Context, request Close) error", "parameters": [ { "name": "request", - "type": "GetStatusRequest", - "description": "Contains path to get status for", + "type": "Close", + "description": "", + "required": true + } + ] + }, + "Create": { + "name": "Create", + "description": "Opens a stream to write to a file and returns a handle to this stream.\nThere is a 10 minute idle timeout on this handle. If a file or directory\nalready exists on the given path and __overwrite__ is set to false, this\ncall will throw an exception with ``RESOURCE_ALREADY_EXISTS``.\n\nA typical workflow for file upload would be:\n\n1. Issue a ``create`` call and get a handle. 2. Issue one or more\n``add-block`` calls with the handle you have. 3. Issue a ``close`` call\nwith the handle you have.", + "signature": "Create(ctx context.Context, request Create) (*CreateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "Create", + "description": "", "required": true } ], "returns": { - "type": "*ObjectInfo", - "description": "Object information including type and path" + "type": "*CreateResponse", + "description": "" } }, - "Import": { - "name": "Import", - "description": "Imports a notebook or file into the workspace.", - "signature": "Import(ctx context.Context, request Import) error", + "Delete": { + "name": "Delete", + "description": "Delete the file or directory (optionally recursively delete all files in\nthe directory). This call throws an exception with `IO_ERROR` if the path\nis a non-empty directory and `recursive` is set to `false` or on other\nsimilar errors.\n\nWhen you delete a large number of files, the delete operation is done in\nincrements. The call returns a response after approximately 45 seconds\nwith an error message (503 Service Unavailable) asking you to re-invoke\nthe delete operation until the directory struc...", + "signature": "Delete(ctx context.Context, request Delete) error", "parameters": [ { "name": "request", - "type": "Import", - "description": "Path, content, and format of the object to import", + "type": "Delete", + "description": "", "required": true } ] }, + "GetStatus": { + "name": "GetStatus", + "description": "Gets the file information for a file or directory. If the file or\ndirectory does not exist, this call throws an exception with\n`RESOURCE_DOES_NOT_EXIST`.", + "signature": "GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error)", + "parameters": [ + { + "name": "request", + "type": "GetStatusRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*FileInfo", + "description": "" + } + }, "List": { "name": "List", - "description": "Lists the contents of a directory.", - "signature": "List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo]", + "description": "List the contents of a directory, or details of the file. If the file or\ndirectory does not exist, this call throws an exception with\n`RESOURCE_DOES_NOT_EXIST`.\n\nWhen calling list on a large directory, the list operation will time out\nafter approximately 60 seconds. We strongly recommend using list only on\ndirectories containing less than 10K files and discourage using the DBFS\nREST API for operations that list more than 10K files. Instead, we\nrecommend that you perform such operations in the...", + "signature": "List(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error)", "parameters": [ { "name": "request", - "type": "ListWorkspaceRequest", - "description": "Contains path to list", + "type": "ListDbfsRequest", + "description": "", "required": true } ], "returns": { - "type": "listing.Iterator[ObjectInfo]", - "description": "Iterator over workspace objects" + "type": "*ListStatusResponse", + "description": "" } - } - } - } - }, - "types": { - "apps.AppDeployment": { - "name": "AppDeployment", - "package": "apps", - "description": "app deployment configuration.", - "fields": { - "command": { - "name": "command", - "type": "any", - "description": "The command with which to run the app. This will override the command specified in the app.yaml file.", - "required": false }, - "create_time": { - "name": "create_time", - "type": "string (timestamp)", - "description": "The creation time of the deployment. Formatted timestamp in ISO 6801.", - "required": false, - "output_only": true - }, - "creator": { - "name": "creator", - "type": "any", - "description": "The email of the user creates the deployment.", - "required": false, - "output_only": true - }, - "deployment_artifacts": { - "name": "deployment_artifacts", - "type": "any", - "description": "The deployment artifacts for an app.", - "required": false, - "output_only": true - }, - "deployment_id": { - "name": "deployment_id", - "type": "string", - "description": "The unique id of the deployment.", - "required": false - }, - "env_vars": { - "name": "env_vars", - "type": "any", - "description": "The environment variables to set in the app runtime environment. This will override the environment variables specified in the app.yaml file.", - "required": false - }, - "git_source": { - "name": "git_source", - "type": "any", - "description": "Git repository to use as the source for the app deployment.", - "required": false - }, - "mode": { - "name": "mode", - "type": "any", - "description": "The mode of which the deployment will manage the source code.", - "required": false + "Mkdirs": { + "name": "Mkdirs", + "description": "Creates the given directory and necessary parent directories if they do\nnot exist. If a file (not a directory) exists at any prefix of the input\npath, this call throws an exception with `RESOURCE_ALREADY_EXISTS`.\n**Note**: If this operation fails, it might have succeeded in creating\nsome of the necessary parent directories.", + "signature": "Mkdirs(ctx context.Context, request MkDirs) error", + "parameters": [ + { + "name": "request", + "type": "MkDirs", + "description": "", + "required": true + } + ] }, - "source_code_path": { - "name": "source_code_path", - "type": "string", - "description": "The workspace file system path of the source code used to create the app deployment. This is different from\n`deployment_artifacts.source_code_path`, which is the path used by the deployed app. The former refers\nto the original source code location of the app in the workspace during deployment creation, whereas\nthe latter provides a system generated stable snapshotted source code path used by the deployment.", - "required": false + "Move": { + "name": "Move", + "description": "Moves a file from one location to another location within DBFS. If the\nsource file does not exist, this call throws an exception with\n`RESOURCE_DOES_NOT_EXIST`. If a file already exists in the destination\npath, this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If\nthe given source path is a directory, this call always recursively moves\nall files.", + "signature": "Move(ctx context.Context, request Move) error", + "parameters": [ + { + "name": "request", + "type": "Move", + "description": "", + "required": true + } + ] }, - "status": { - "name": "status", - "type": "any", - "description": "Status and status message of the deployment", - "required": false, - "output_only": true + "Put": { + "name": "Put", + "description": "Uploads a file through the use of multipart form post. It is mainly used\nfor streaming uploads, but can also be used as a convenient single call\nfor data upload.\n\nAlternatively you can pass contents as base64 string.\n\nThe amount of data that can be passed (when not streaming) using the\n__contents__ parameter is limited to 1 MB. `MAX_BLOCK_SIZE_EXCEEDED` will\nbe thrown if this limit is exceeded.\n\nIf you want to upload large files, use the streaming upload. For details,\nsee :method:dbfs/create,...", + "signature": "Put(ctx context.Context, request Put) error", + "parameters": [ + { + "name": "request", + "type": "Put", + "description": "", + "required": true + } + ] }, - "update_time": { - "name": "update_time", - "type": "string (timestamp)", - "description": "The update time of the deployment. Formatted timestamp in ISO 6801.", - "required": false, - "output_only": true + "Read": { + "name": "Read", + "description": "Returns the contents of a file. If the file does not exist, this call\nthrows an exception with `RESOURCE_DOES_NOT_EXIST`. If the path is a\ndirectory, the read length is negative, or if the offset is negative,\nthis call throws an exception with `INVALID_PARAMETER_VALUE`. If the read\nlength exceeds 1 MB, this call throws an exception with\n`MAX_READ_SIZE_EXCEEDED`.\n\nIf `offset + length` exceeds the number of bytes in a file, it reads the\ncontents until the end of file.", + "signature": "Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ReadDbfsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ReadResponse", + "description": "" + } } } }, - "apps.AppDeploymentArtifacts": { - "name": "AppDeploymentArtifacts", - "package": "apps", - "description": "app deployment artifacts configuration.", - "fields": { - "source_code_path": { - "name": "source_code_path", - "type": "string", - "description": "The snapshotted workspace file system path of the source code loaded by the deployed app.", - "required": false + "iam": { + "name": "AccessControl", + "description": "Rule based Access Control for Databricks Resources.", + "package": "github.com/databricks/databricks-sdk-go/service/iam", + "methods": { + "CheckPolicy": { + "name": "CheckPolicy", + "description": "Check access policy to a resource.", + "signature": "CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CheckPolicyRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CheckPolicyResponse", + "description": "" + } } } }, - "apps.AppDeploymentMode": { - "name": "AppDeploymentMode", - "package": "apps", - "description": "app deployment mode configuration.", - "fields": {} - }, - "apps.AppDeploymentState": { - "name": "AppDeploymentState", - "package": "apps", - "description": "app deployment state configuration.", - "fields": {} - }, - "apps.AppDeploymentStatus": { - "name": "AppDeploymentStatus", - "package": "apps", - "description": "app deployment status configuration.", - "fields": { - "message": { - "name": "message", - "type": "any", - "description": "Message corresponding with the deployment state.", - "required": false, - "output_only": true + "jobs": { + "name": "Jobs", + "description": "The Jobs API allows you to create, edit, and delete jobs.\n\nYou can use a Databricks job to run a data processing or data analysis task\nin a Databricks cluster with scalable resources. Your job can consist of a\nsingle task or can be a large, multi-task workflow with complex dependencies.\nDatabricks manages the task orchestration, cluster management, monitoring,\nand error reporting for all of your jobs. You can run your jobs immediately\nor periodically through an easy-to-use scheduling system. ...", + "package": "github.com/databricks/databricks-sdk-go/service/jobs", + "methods": { + "CancelAllRuns": { + "name": "CancelAllRuns", + "description": "Cancels all active runs of a job. The runs are canceled asynchronously,\nso it doesn't prevent new runs from being started.", + "signature": "CancelAllRuns(ctx context.Context, request CancelAllRuns) error", + "parameters": [ + { + "name": "request", + "type": "CancelAllRuns", + "description": "", + "required": true + } + ] }, - "state": { - "name": "state", - "type": "any", - "description": "State of the deployment.", - "required": false, - "output_only": true - } - } - }, - "apps.AppResource": { - "name": "AppResource", - "package": "apps", - "description": "app resource configuration.", - "fields": { - "database": { - "name": "database", - "type": "any", - "description": "", - "required": false + "CancelRun": { + "name": "CancelRun", + "description": "Cancels a job run or a task run. The run is canceled asynchronously, so\nit may still be running when this request completes.", + "signature": "CancelRun(ctx context.Context, request CancelRun) error", + "parameters": [ + { + "name": "request", + "type": "CancelRun", + "description": "", + "required": true + } + ] }, - "description": { - "name": "description", - "type": "string", - "description": "Description of the App Resource.", - "required": false + "Create": { + "name": "Create", + "description": "Create a new job.", + "signature": "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateJob", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreateResponse", + "description": "" + } }, - "experiment": { - "name": "experiment", - "type": "any", - "description": "", - "required": false + "Delete": { + "name": "Delete", + "description": "Deletes a job.", + "signature": "Delete(ctx context.Context, request DeleteJob) error", + "parameters": [ + { + "name": "request", + "type": "DeleteJob", + "description": "", + "required": true + } + ] }, - "genie_space": { - "name": "genie_space", - "type": "any", - "description": "", - "required": false + "DeleteRun": { + "name": "DeleteRun", + "description": "Deletes a non-active run. Returns an error if the run is active.", + "signature": "DeleteRun(ctx context.Context, request DeleteRun) error", + "parameters": [ + { + "name": "request", + "type": "DeleteRun", + "description": "", + "required": true + } + ] }, - "job": { - "name": "job", - "type": "any", - "description": "", - "required": false + "ExportRun": { + "name": "ExportRun", + "description": "Export and retrieve the job run task.", + "signature": "ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error)", + "parameters": [ + { + "name": "request", + "type": "ExportRunRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ExportRunOutput", + "description": "" + } }, - "name": { - "name": "name", - "type": "any", - "description": "Name of the App Resource.", - "required": false + "Get": { + "name": "Get", + "description": "Retrieves the details for a single job.\n\nLarge arrays in the results will be paginated when they exceed 100\nelements. A request for a single job will return all properties for that\njob, and the first 100 elements of array properties (`tasks`,\n`job_clusters`, `environments` and `parameters`). Use the\n`next_page_token` field to check for more results and pass its value as\nthe `page_token` in subsequent requests. If any array properties have\nmore than 100 elements, additional results will be ret...", + "signature": "Get(ctx context.Context, request GetJobRequest) (*Job, error)", + "parameters": [ + { + "name": "request", + "type": "GetJobRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*Job", + "description": "" + } }, - "secret": { - "name": "secret", - "type": "any", - "description": "", - "required": false + "GetPermissionLevels": { + "name": "GetPermissionLevels", + "description": "Gets the permission levels that a user can have on an object.", + "signature": "GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetJobPermissionLevelsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetJobPermissionLevelsResponse", + "description": "" + } }, - "serving_endpoint": { - "name": "serving_endpoint", - "type": "any", - "description": "", - "required": false + "GetPermissions": { + "name": "GetPermissions", + "description": "Gets the permissions of a job. Jobs can inherit permissions from their\nroot object.", + "signature": "GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "GetJobPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*JobPermissions", + "description": "" + } }, - "sql_warehouse": { - "name": "sql_warehouse", - "type": "any", - "description": "", - "required": false + "GetRun": { + "name": "GetRun", + "description": "Retrieves the metadata of a run.\n\nLarge arrays in the results will be paginated when they exceed 100\nelements. A request for a single run will return all properties for that\nrun, and the first 100 elements of array properties (`tasks`,\n`job_clusters`, `job_parameters` and `repair_history`). Use the\nnext_page_token field to check for more results and pass its value as the\npage_token in subsequent requests. If any array properties have more than\n100 elements, additional results will be returned...", + "signature": "GetRun(ctx context.Context, request GetRunRequest) (*Run, error)", + "parameters": [ + { + "name": "request", + "type": "GetRunRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*Run", + "description": "" + } }, - "uc_securable": { - "name": "uc_securable", - "type": "any", - "description": "", - "required": false - } - } - }, - "apps.AppResourceDatabase": { - "name": "AppResourceDatabase", - "package": "apps", - "description": "app resource database configuration.", - "fields": { - "database_name": { - "name": "database_name", - "type": "string", - "description": "", - "required": false + "GetRunOutput": { + "name": "GetRunOutput", + "description": "Retrieve the output and metadata of a single task run. When a notebook\ntask returns a value through the `dbutils.notebook.exit()` call, you can\nuse this endpoint to retrieve that value. Databricks restricts this API\nto returning the first 5 MB of the output. To return a larger result, you\ncan store job results in a cloud storage service.\n\nThis endpoint validates that the __run_id__ parameter is valid and\nreturns an HTTP status code 400 if the __run_id__ parameter is invalid.\nRuns are automati...", + "signature": "GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error)", + "parameters": [ + { + "name": "request", + "type": "GetRunOutputRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*RunOutput", + "description": "" + } }, - "instance_name": { - "name": "instance_name", - "type": "string", - "description": "", - "required": false + "List": { + "name": "List", + "description": "Retrieves a list of jobs.", + "signature": "List(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListJobsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListJobsResponse", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "", - "required": false - } - } - }, - "apps.AppResourceDatabaseDatabasePermission": { - "name": "AppResourceDatabaseDatabasePermission", - "package": "apps", - "description": "app resource database database permission configuration.", - "fields": {} - }, - "apps.AppResourceExperiment": { - "name": "AppResourceExperiment", - "package": "apps", - "description": "app resource experiment configuration.", - "fields": { - "experiment_id": { - "name": "experiment_id", - "type": "string", - "description": "", - "required": false + "ListRuns": { + "name": "ListRuns", + "description": "List runs in descending order by start time.", + "signature": "ListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListRunsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListRunsResponse", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "", - "required": false - } - } - }, - "apps.AppResourceExperimentExperimentPermission": { - "name": "AppResourceExperimentExperimentPermission", - "package": "apps", - "description": "app resource experiment experiment permission configuration.", - "fields": {} - }, - "apps.AppResourceGenieSpace": { - "name": "AppResourceGenieSpace", - "package": "apps", - "description": "app resource genie space configuration.", - "fields": { - "name": { - "name": "name", - "type": "any", - "description": "", - "required": false + "RepairRun": { + "name": "RepairRun", + "description": "Re-run one or more tasks. Tasks are re-run as part of the original job\nrun. They use the current job and task settings, and can be viewed in the\nhistory for the original job run.", + "signature": "RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error)", + "parameters": [ + { + "name": "request", + "type": "RepairRun", + "description": "", + "required": true + } + ], + "returns": { + "type": "*RepairRunResponse", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "", - "required": false + "Reset": { + "name": "Reset", + "description": "Overwrite all settings for the given job. Use the [_Update_\nendpoint](:method:jobs/update) to update job settings partially.", + "signature": "Reset(ctx context.Context, request ResetJob) error", + "parameters": [ + { + "name": "request", + "type": "ResetJob", + "description": "", + "required": true + } + ] }, - "space_id": { - "name": "space_id", - "type": "string", - "description": "", - "required": false - } - } - }, - "apps.AppResourceGenieSpaceGenieSpacePermission": { - "name": "AppResourceGenieSpaceGenieSpacePermission", - "package": "apps", - "description": "app resource genie space genie space permission configuration.", - "fields": {} - }, - "apps.AppResourceJob": { - "name": "AppResourceJob", - "package": "apps", - "description": "app resource job configuration.", - "fields": { - "id": { - "name": "id", - "type": "any", - "description": "Id of the job to grant permission on.", - "required": false + "RunNow": { + "name": "RunNow", + "description": "Run a job and return the `run_id` of the triggered run.", + "signature": "RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)", + "parameters": [ + { + "name": "request", + "type": "RunNow", + "description": "", + "required": true + } + ], + "returns": { + "type": "*RunNowResponse", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "Permissions to grant on the Job. Supported permissions are: \"CAN_MANAGE\", \"IS_OWNER\", \"CAN_MANAGE_RUN\", \"CAN_VIEW\".", - "required": false - } - } - }, - "apps.AppResourceJobJobPermission": { - "name": "AppResourceJobJobPermission", - "package": "apps", - "description": "app resource job job permission configuration.", - "fields": {} - }, - "apps.AppResourceSecret": { - "name": "AppResourceSecret", - "package": "apps", - "description": "app resource secret configuration.", - "fields": { - "key": { - "name": "key", - "type": "any", - "description": "Key of the secret to grant permission on.", - "required": false + "SetPermissions": { + "name": "SetPermissions", + "description": "Sets permissions on an object, replacing existing permissions if they\nexist. Deletes all direct permissions if none are specified. Objects can\ninherit permissions from their root object.", + "signature": "SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "JobPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*JobPermissions", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: \"READ\", \"WRITE\", \"MANAGE\".", - "required": false + "Submit": { + "name": "Submit", + "description": "Submit a one-time run. This endpoint allows you to submit a workload\ndirectly without creating a job. Runs submitted using this endpoint\ndon’t display in the UI. Use the `jobs/runs/get` API to check the run\nstate after the job is submitted.", + "signature": "Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error)", + "parameters": [ + { + "name": "request", + "type": "SubmitRun", + "description": "", + "required": true + } + ], + "returns": { + "type": "*SubmitRunResponse", + "description": "" + } }, - "scope": { - "name": "scope", - "type": "any", - "description": "Scope of the secret to grant permission on.", - "required": false - } - } - }, - "apps.AppResourceSecretSecretPermission": { - "name": "AppResourceSecretSecretPermission", - "package": "apps", - "description": "Permission to grant on the secret scope. Supported permissions are: \"READ\", \"WRITE\", \"MANAGE\".", - "fields": {} - }, - "apps.AppResourceServingEndpoint": { - "name": "AppResourceServingEndpoint", - "package": "apps", - "description": "app resource serving endpoint configuration.", - "fields": { - "name": { - "name": "name", - "type": "any", - "description": "Name of the serving endpoint to grant permission on.", - "required": false + "Update": { + "name": "Update", + "description": "Add, update, or remove specific settings of an existing job. Use the\n[_Reset_ endpoint](:method:jobs/reset) to overwrite all job settings.", + "signature": "Update(ctx context.Context, request UpdateJob) error", + "parameters": [ + { + "name": "request", + "type": "UpdateJob", + "description": "", + "required": true + } + ] }, - "permission": { - "name": "permission", - "type": "any", - "description": "Permission to grant on the serving endpoint. Supported permissions are: \"CAN_MANAGE\", \"CAN_QUERY\", \"CAN_VIEW\".", - "required": false + "UpdatePermissions": { + "name": "UpdatePermissions", + "description": "Updates the permissions on a job. Jobs can inherit permissions from their\nroot object.", + "signature": "UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "JobPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*JobPermissions", + "description": "" + } } } }, - "apps.AppResourceServingEndpointServingEndpointPermission": { - "name": "AppResourceServingEndpointServingEndpointPermission", - "package": "apps", - "description": "app resource serving endpoint serving endpoint permission configuration.", - "fields": {} - }, - "apps.AppResourceSqlWarehouse": { - "name": "AppResourceSqlWarehouse", - "package": "apps", - "description": "app resource sql warehouse configuration.", - "fields": { - "id": { - "name": "id", - "type": "any", - "description": "Id of the SQL warehouse to grant permission on.", - "required": false + "ml": { + "name": "Experiments", + "description": "Experiments are the primary unit of organization in MLflow; all MLflow runs\nbelong to an experiment. Each experiment lets you visualize, search, and\ncompare runs, as well as download run artifacts or metadata for analysis in\nother tools. Experiments are maintained in a Databricks hosted MLflow\ntracking server.\n\nExperiments are located in the workspace file tree. You manage experiments\nusing the same tools you use to manage other workspace objects such as\nfolders, notebooks, and libraries.", + "package": "github.com/databricks/databricks-sdk-go/service/ml", + "methods": { + "CreateExperiment": { + "name": "CreateExperiment", + "description": "Creates an experiment with a name. Returns the ID of the newly created\nexperiment. Validates that another experiment with the same name does not\nalready exist and fails if another experiment with the same name already\nexists.\n\nThrows `RESOURCE_ALREADY_EXISTS` if an experiment with the given name\nexists.", + "signature": "CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateExperiment", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreateExperimentResponse", + "description": "" + } }, - "permission": { - "name": "permission", - "type": "any", - "description": "Permission to grant on the SQL warehouse. Supported permissions are: \"CAN_MANAGE\", \"CAN_USE\", \"IS_OWNER\".", - "required": false - } - } - }, - "apps.AppResourceSqlWarehouseSqlWarehousePermission": { - "name": "AppResourceSqlWarehouseSqlWarehousePermission", - "package": "apps", - "description": "app resource sql warehouse sql warehouse permission configuration.", - "fields": {} - }, - "apps.AppResourceUcSecurable": { - "name": "AppResourceUcSecurable", - "package": "apps", - "description": "app resource uc securable configuration.", - "fields": { - "permission": { - "name": "permission", - "type": "any", - "description": "", - "required": false - }, - "securable_full_name": { - "name": "securable_full_name", - "type": "string", - "description": "", - "required": false + "CreateLoggedModel": { + "name": "CreateLoggedModel", + "description": "Create a logged model.", + "signature": "CreateLoggedModel(ctx context.Context, request CreateLoggedModelRequest) (*CreateLoggedModelResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateLoggedModelRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreateLoggedModelResponse", + "description": "" + } }, - "securable_type": { - "name": "securable_type", - "type": "any", - "description": "", - "required": false - } - } - }, - "apps.AppResourceUcSecurableUcSecurablePermission": { - "name": "AppResourceUcSecurableUcSecurablePermission", - "package": "apps", - "description": "app resource uc securable uc securable permission configuration.", - "fields": {} - }, - "apps.AppResourceUcSecurableUcSecurableType": { - "name": "AppResourceUcSecurableUcSecurableType", - "package": "apps", - "description": "app resource uc securable uc securable type configuration.", - "fields": {} - }, - "apps.ApplicationState": { - "name": "ApplicationState", - "package": "apps", - "description": "application state configuration.", - "fields": {} - }, - "apps.ApplicationStatus": { - "name": "ApplicationStatus", - "package": "apps", - "description": "application status configuration.", - "fields": { - "message": { - "name": "message", - "type": "any", - "description": "Application status message", - "required": false, - "output_only": true + "CreateRun": { + "name": "CreateRun", + "description": "Creates a new run within an experiment. A run is usually a single\nexecution of a machine learning or data ETL pipeline. MLflow uses runs to\ntrack the `mlflowParam`, `mlflowMetric`, and `mlflowRunTag` associated\nwith a single execution.", + "signature": "CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateRun", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreateRunResponse", + "description": "" + } }, - "state": { - "name": "state", - "type": "any", - "description": "State of the application.", - "required": false, - "output_only": true - } - } - }, - "apps.ComputeSize": { - "name": "ComputeSize", - "package": "apps", - "description": "compute size configuration.", - "fields": {} - }, - "apps.ComputeState": { - "name": "ComputeState", - "package": "apps", - "description": "compute state configuration.", - "fields": {} - }, - "apps.ComputeStatus": { - "name": "ComputeStatus", - "package": "apps", - "description": "compute status configuration.", - "fields": { - "message": { - "name": "message", - "type": "any", - "description": "Compute status message", - "required": false, - "output_only": true + "DeleteExperiment": { + "name": "DeleteExperiment", + "description": "Marks an experiment and associated metadata, runs, metrics, params, and\ntags for deletion. If the experiment uses FileStore, artifacts associated\nwith the experiment are also deleted.", + "signature": "DeleteExperiment(ctx context.Context, request DeleteExperiment) error", + "parameters": [ + { + "name": "request", + "type": "DeleteExperiment", + "description": "", + "required": true + } + ] }, - "state": { - "name": "state", - "type": "any", - "description": "State of the app compute.", - "required": false, - "output_only": true - } - } - }, - "apps.EnvVar": { - "name": "EnvVar", - "package": "apps", - "description": "env var configuration.", - "fields": { - "name": { - "name": "name", - "type": "any", - "description": "The name of the environment variable.", - "required": false + "DeleteLoggedModel": { + "name": "DeleteLoggedModel", + "description": "Delete a logged model.", + "signature": "DeleteLoggedModel(ctx context.Context, request DeleteLoggedModelRequest) error", + "parameters": [ + { + "name": "request", + "type": "DeleteLoggedModelRequest", + "description": "", + "required": true + } + ] }, - "value": { - "name": "value", - "type": "any", - "description": "The value for the environment variable.", - "required": false + "DeleteLoggedModelTag": { + "name": "DeleteLoggedModelTag", + "description": "Delete a tag on a logged model.", + "signature": "DeleteLoggedModelTag(ctx context.Context, request DeleteLoggedModelTagRequest) error", + "parameters": [ + { + "name": "request", + "type": "DeleteLoggedModelTagRequest", + "description": "", + "required": true + } + ] }, - "value_from": { - "name": "value_from", - "type": "any", - "description": "The name of an external Databricks resource that contains the value, such as a secret or a database table.", - "required": false - } - } - }, - "apps.GitRepository": { - "name": "GitRepository", - "package": "apps", - "description": "Git repository configuration specifying the location of the repository.", - "fields": { - "provider": { - "name": "provider", - "type": "any", - "description": "Git provider. Case insensitive. Supported values: gitHub, gitHubEnterprise, bitbucketCloud,\nbitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition, awsCodeCommit.", - "required": false + "DeleteRun": { + "name": "DeleteRun", + "description": "Marks a run for deletion.", + "signature": "DeleteRun(ctx context.Context, request DeleteRun) error", + "parameters": [ + { + "name": "request", + "type": "DeleteRun", + "description": "", + "required": true + } + ] }, - "url": { - "name": "url", - "type": "any", - "description": "URL of the Git repository.", - "required": false - } - } - }, - "apps.GitSource": { - "name": "GitSource", - "package": "apps", - "description": "Complete git source specification including repository location and reference.", - "fields": { - "branch": { - "name": "branch", - "type": "any", - "description": "Git branch to checkout.", - "required": false + "DeleteRuns": { + "name": "DeleteRuns", + "description": "Bulk delete runs in an experiment that were created prior to or at the\nspecified timestamp. Deletes at most max_runs per request. To call this\nAPI from a Databricks Notebook in Python, you can use the client code\nsnippet on", + "signature": "DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "DeleteRuns", + "description": "", + "required": true + } + ], + "returns": { + "type": "*DeleteRunsResponse", + "description": "" + } }, - "commit": { - "name": "commit", - "type": "any", - "description": "Git commit SHA to checkout.", - "required": false + "DeleteTag": { + "name": "DeleteTag", + "description": "Deletes a tag on a run. Tags are run metadata that can be updated during\na run and after a run completes.", + "signature": "DeleteTag(ctx context.Context, request DeleteTag) error", + "parameters": [ + { + "name": "request", + "type": "DeleteTag", + "description": "", + "required": true + } + ] }, - "git_repository": { - "name": "git_repository", - "type": "any", - "description": "Git repository configuration. Populated from the app's git_repository configuration.", - "required": false, - "output_only": true + "FinalizeLoggedModel": { + "name": "FinalizeLoggedModel", + "description": "Finalize a logged model.", + "signature": "FinalizeLoggedModel(ctx context.Context, request FinalizeLoggedModelRequest) (*FinalizeLoggedModelResponse, error)", + "parameters": [ + { + "name": "request", + "type": "FinalizeLoggedModelRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*FinalizeLoggedModelResponse", + "description": "" + } }, - "resolved_commit": { - "name": "resolved_commit", - "type": "any", - "description": "The resolved commit SHA that was actually used for the deployment. This is populated by the\nsystem after resolving the reference (branch, tag, or commit). If commit is specified\ndirectly, this will match commit. If a branch or tag is specified, this contains the\ncommit SHA that the branch or tag pointed to at deployment time.", - "required": false, - "output_only": true + "GetByName": { + "name": "GetByName", + "description": "Gets metadata for an experiment.\n\nThis endpoint will return deleted experiments, but prefers the active\nexperiment if an active and deleted experiment share the same name. If\nmultiple deleted experiments share the same name, the API will return one\nof them.\n\nThrows `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name\nexists.", + "signature": "GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetByNameRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetExperimentByNameResponse", + "description": "" + } }, - "source_code_path": { - "name": "source_code_path", - "type": "string", - "description": "Relative path to the app source code within the Git repository. If not specified, the root\nof the repository is used.", - "required": false + "GetExperiment": { + "name": "GetExperiment", + "description": "Gets metadata for an experiment. This method works on deleted\nexperiments.", + "signature": "GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetExperimentRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetExperimentResponse", + "description": "" + } }, - "tag": { - "name": "tag", - "type": "any", - "description": "Git tag to checkout.", - "required": false - } - } - }, - "bundle.Alert": { - "name": "Alert", - "package": "resources", - "description": "alert configuration.", - "fields": { - "create_time": { - "name": "create_time", - "type": "string (timestamp)", - "description": "The timestamp indicating when the alert was created.", - "required": false, - "output_only": true + "GetHistory": { + "name": "GetHistory", + "description": "Gets a list of all values for the specified metric for a given run.", + "signature": "GetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetHistoryRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetMetricHistoryResponse", + "description": "" + } }, - "custom_description": { - "name": "custom_description", - "type": "string", - "description": "Custom description for the alert. support mustache template.", - "required": false - }, - "custom_summary": { - "name": "custom_summary", - "type": "any", - "description": "Custom summary for the alert. support mustache template.", - "required": false - }, - "display_name": { - "name": "display_name", - "type": "string", - "description": "The display name of the alert.", - "required": false + "GetLoggedModel": { + "name": "GetLoggedModel", + "description": "Get a logged model.", + "signature": "GetLoggedModel(ctx context.Context, request GetLoggedModelRequest) (*GetLoggedModelResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetLoggedModelRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetLoggedModelResponse", + "description": "" + } }, - "effective_run_as": { - "name": "effective_run_as", - "type": "any", - "description": "The actual identity that will be used to execute the alert.\nThis is an output-only field that shows the resolved run-as identity after applying\npermissions and defaults.", - "required": false, - "output_only": true + "GetPermissionLevels": { + "name": "GetPermissionLevels", + "description": "Gets the permission levels that a user can have on an object.", + "signature": "GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetExperimentPermissionLevelsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetExperimentPermissionLevelsResponse", + "description": "" + } }, - "evaluation": { - "name": "evaluation", - "type": "any", - "description": "", - "required": false + "GetPermissions": { + "name": "GetPermissions", + "description": "Gets the permissions of an experiment. Experiments can inherit\npermissions from their root object.", + "signature": "GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "GetExperimentPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ExperimentPermissions", + "description": "" + } }, - "id": { - "name": "id", - "type": "any", - "description": "UUID identifying the alert.", - "required": false, - "output_only": true + "GetRun": { + "name": "GetRun", + "description": "Gets the metadata, metrics, params, and tags for a run. In the case where\nmultiple metrics with the same key are logged for a run, return only the\nvalue with the latest timestamp.\n\nIf there are multiple values with the latest timestamp, return the\nmaximum of these values.", + "signature": "GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetRunRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetRunResponse", + "description": "" + } }, - "lifecycle_state": { - "name": "lifecycle_state", - "type": "any", - "description": "Indicates whether the query is trashed.", - "required": false, - "output_only": true + "ListArtifacts": { + "name": "ListArtifacts", + "description": "List artifacts for a run. Takes an optional `artifact_path` prefix which\nif specified, the response contains only artifacts with the specified\nprefix. A maximum of 1000 artifacts will be retrieved for UC Volumes.\nPlease call `/api/2.0/fs/directories{directory_path}` for listing\nartifacts in UC Volumes, which supports pagination. See [List directory\ncontents | Files API](/api/workspace/files/listdirectorycontents).", + "signature": "ListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListArtifactsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListArtifactsResponse", + "description": "" + } }, - "owner_user_name": { - "name": "owner_user_name", - "type": "string", - "description": "The owner's username. This field is set to \"Unavailable\" if the user has been deleted.", - "required": false, - "output_only": true + "ListExperiments": { + "name": "ListExperiments", + "description": "Gets a list of all experiments.", + "signature": "ListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListExperimentsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListExperimentsResponse", + "description": "" + } }, - "parent_path": { - "name": "parent_path", - "type": "string", - "description": "The workspace path of the folder containing the alert. Can only be set on create, and cannot be updated.", - "required": false + "LogBatch": { + "name": "LogBatch", + "description": "Logs a batch of metrics, params, and tags for a run. If any data failed\nto be persisted, the server will respond with an error (non-200 status\ncode).\n\nIn case of error (due to internal server error or an invalid request),\npartial data may be written.\n\nYou can write metrics, params, and tags in interleaving fashion, but\nwithin a given entity type are guaranteed to follow the order specified\nin the request body.\n\nThe overwrite behavior for metrics, params, and tags is as follows:\n\n* Metrics: me...", + "signature": "LogBatch(ctx context.Context, request LogBatch) error", + "parameters": [ + { + "name": "request", + "type": "LogBatch", + "description": "", + "required": true + } + ] }, - "query_text": { - "name": "query_text", - "type": "any", - "description": "Text of the query to be run.", - "required": false + "LogInputs": { + "name": "LogInputs", + "description": "Logs inputs, such as datasets and models, to an MLflow Run.", + "signature": "LogInputs(ctx context.Context, request LogInputs) error", + "parameters": [ + { + "name": "request", + "type": "LogInputs", + "description": "", + "required": true + } + ] }, - "run_as": { - "name": "run_as", - "type": "any", - "description": "Specifies the identity that will be used to run the alert.\nThis field allows you to configure alerts to run as a specific user or service principal.\n- For user identity: Set `user_name` to the email of an active workspace user. Users can only set this to their own email.\n- For service principal: Set `service_principal_name` to the application ID. Requires the `servicePrincipal/user` role.\nIf not specified, the alert will run as the request user.", - "required": false + "LogLoggedModelParams": { + "name": "LogLoggedModelParams", + "description": "Logs params for a logged model. A param is a key-value pair (string key,\nstring value). Examples include hyperparameters used for ML model\ntraining. A param can be logged only once for a logged model, and\nattempting to overwrite an existing param with a different value will\nresult in an error", + "signature": "LogLoggedModelParams(ctx context.Context, request LogLoggedModelParamsRequest) error", + "parameters": [ + { + "name": "request", + "type": "LogLoggedModelParamsRequest", + "description": "", + "required": true + } + ] }, - "run_as_user_name": { - "name": "run_as_user_name", - "type": "string", - "description": "The run as username or application ID of service principal.\nOn Create and Update, this field can be set to application ID of an active service principal. Setting this field requires the servicePrincipal/user role.\nDeprecated: Use `run_as` field instead. This field will be removed in a future release.", - "required": false, - "deprecated": true + "LogMetric": { + "name": "LogMetric", + "description": "Log a metric for a run. A metric is a key-value pair (string key, float\nvalue) with an associated timestamp. Examples include the various metrics\nthat represent ML model accuracy. A metric can be logged multiple times.", + "signature": "LogMetric(ctx context.Context, request LogMetric) error", + "parameters": [ + { + "name": "request", + "type": "LogMetric", + "description": "", + "required": true + } + ] }, - "schedule": { - "name": "schedule", - "type": "any", - "description": "", - "required": false + "LogModel": { + "name": "LogModel", + "description": "**Note:** the [Create a logged\nmodel](/api/workspace/experiments/createloggedmodel) API replaces this\nendpoint.\n\nLog a model to an MLflow Run.", + "signature": "LogModel(ctx context.Context, request LogModel) error", + "parameters": [ + { + "name": "request", + "type": "LogModel", + "description": "", + "required": true + } + ] }, - "update_time": { - "name": "update_time", - "type": "string (timestamp)", - "description": "The timestamp indicating when the alert was updated.", - "required": false, - "output_only": true + "LogOutputs": { + "name": "LogOutputs", + "description": "Logs outputs, such as models, from an MLflow Run.", + "signature": "LogOutputs(ctx context.Context, request LogOutputsRequest) error", + "parameters": [ + { + "name": "request", + "type": "LogOutputsRequest", + "description": "", + "required": true + } + ] }, - "warehouse_id": { - "name": "warehouse_id", - "type": "string", - "description": "ID of the SQL warehouse attached to the alert.", - "required": false - } - } - }, - "bundle.App": { - "name": "App", - "package": "resources", - "description": "app configuration.", - "fields": { - "active_deployment": { - "name": "active_deployment", - "type": "any", - "description": "The active deployment of the app. A deployment is considered active when it has been deployed\nto the app compute.", - "required": false, - "output_only": true + "LogParam": { + "name": "LogParam", + "description": "Logs a param used for a run. A param is a key-value pair (string key,\nstring value). Examples include hyperparameters used for ML model\ntraining and constant dates and values used in an ETL pipeline. A param\ncan be logged only once for a run.", + "signature": "LogParam(ctx context.Context, request LogParam) error", + "parameters": [ + { + "name": "request", + "type": "LogParam", + "description": "", + "required": true + } + ] }, - "app_status": { - "name": "app_status", - "type": "any", - "description": "", - "required": false, - "output_only": true + "RestoreExperiment": { + "name": "RestoreExperiment", + "description": "Restore an experiment marked for deletion. This also restores associated\nmetadata, runs, metrics, params, and tags. If experiment uses FileStore,\nunderlying artifacts associated with experiment are also restored.\n\nThrows `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was\npermanently deleted.", + "signature": "RestoreExperiment(ctx context.Context, request RestoreExperiment) error", + "parameters": [ + { + "name": "request", + "type": "RestoreExperiment", + "description": "", + "required": true + } + ] }, - "budget_policy_id": { - "name": "budget_policy_id", - "type": "string", - "description": "", - "required": false + "RestoreRun": { + "name": "RestoreRun", + "description": "Restores a deleted run. This also restores associated metadata, runs,\nmetrics, params, and tags.\n\nThrows `RESOURCE_DOES_NOT_EXIST` if the run was never created or was\npermanently deleted.", + "signature": "RestoreRun(ctx context.Context, request RestoreRun) error", + "parameters": [ + { + "name": "request", + "type": "RestoreRun", + "description": "", + "required": true + } + ] }, - "compute_size": { - "name": "compute_size", - "type": "int", - "description": "", - "required": false + "RestoreRuns": { + "name": "RestoreRuns", + "description": "Bulk restore runs in an experiment that were deleted no earlier than the\nspecified timestamp. Restores at most max_runs per request. To call this\nAPI from a Databricks Notebook in Python, you can use the client code\nsnippet on", + "signature": "RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "RestoreRuns", + "description": "", + "required": true + } + ], + "returns": { + "type": "*RestoreRunsResponse", + "description": "" + } }, - "compute_status": { - "name": "compute_status", - "type": "any", - "description": "", - "required": false, - "output_only": true + "SearchExperiments": { + "name": "SearchExperiments", + "description": "Searches for experiments that satisfy specified search criteria.", + "signature": "SearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "SearchExperiments", + "description": "", + "required": true + } + ], + "returns": { + "type": "*SearchExperimentsResponse", + "description": "" + } }, - "create_time": { - "name": "create_time", - "type": "string (timestamp)", - "description": "The creation time of the app. Formatted timestamp in ISO 6801.", - "required": false, - "output_only": true + "SearchLoggedModels": { + "name": "SearchLoggedModels", + "description": "Search for Logged Models that satisfy specified search criteria.", + "signature": "SearchLoggedModels(ctx context.Context, request SearchLoggedModelsRequest) (*SearchLoggedModelsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "SearchLoggedModelsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*SearchLoggedModelsResponse", + "description": "" + } }, - "creator": { - "name": "creator", - "type": "any", - "description": "The email of the user that created the app.", - "required": false, - "output_only": true + "SearchRuns": { + "name": "SearchRuns", + "description": "Searches for runs that satisfy expressions.\n\nSearch expressions can use `mlflowMetric` and `mlflowParam` keys.", + "signature": "SearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "SearchRuns", + "description": "", + "required": true + } + ], + "returns": { + "type": "*SearchRunsResponse", + "description": "" + } }, - "default_source_code_path": { - "name": "default_source_code_path", - "type": "string", - "description": "The default workspace file system path of the source code from which app deployment are\ncreated. This field tracks the workspace source code path of the last active deployment.", - "required": false, - "output_only": true + "SetExperimentTag": { + "name": "SetExperimentTag", + "description": "Sets a tag on an experiment. Experiment tags are metadata that can be\nupdated.", + "signature": "SetExperimentTag(ctx context.Context, request SetExperimentTag) error", + "parameters": [ + { + "name": "request", + "type": "SetExperimentTag", + "description": "", + "required": true + } + ] }, - "description": { - "name": "description", - "type": "string", - "description": "The description of the app.", - "required": false + "SetLoggedModelTags": { + "name": "SetLoggedModelTags", + "description": "Set tags for a logged model.", + "signature": "SetLoggedModelTags(ctx context.Context, request SetLoggedModelTagsRequest) error", + "parameters": [ + { + "name": "request", + "type": "SetLoggedModelTagsRequest", + "description": "", + "required": true + } + ] }, - "effective_budget_policy_id": { - "name": "effective_budget_policy_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "SetPermissions": { + "name": "SetPermissions", + "description": "Sets permissions on an object, replacing existing permissions if they\nexist. Deletes all direct permissions if none are specified. Objects can\ninherit permissions from their root object.", + "signature": "SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "ExperimentPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ExperimentPermissions", + "description": "" + } }, - "effective_usage_policy_id": { - "name": "effective_usage_policy_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "SetTag": { + "name": "SetTag", + "description": "Sets a tag on a run. Tags are run metadata that can be updated during a\nrun and after a run completes.", + "signature": "SetTag(ctx context.Context, request SetTag) error", + "parameters": [ + { + "name": "request", + "type": "SetTag", + "description": "", + "required": true + } + ] }, - "effective_user_api_scopes": { - "name": "effective_user_api_scopes", - "type": "any", - "description": "The effective api scopes granted to the user access token.", - "required": false, - "output_only": true + "UpdateExperiment": { + "name": "UpdateExperiment", + "description": "Updates experiment metadata.", + "signature": "UpdateExperiment(ctx context.Context, request UpdateExperiment) error", + "parameters": [ + { + "name": "request", + "type": "UpdateExperiment", + "description": "", + "required": true + } + ] }, - "git_repository": { - "name": "git_repository", - "type": "any", - "description": "Git repository configuration for app deployments. When specified, deployments can\nreference code from this repository by providing only the git reference (branch, tag, or commit).", - "required": false + "UpdatePermissions": { + "name": "UpdatePermissions", + "description": "Updates the permissions on an experiment. Experiments can inherit\npermissions from their root object.", + "signature": "UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error)", + "parameters": [ + { + "name": "request", + "type": "ExperimentPermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ExperimentPermissions", + "description": "" + } }, - "id": { - "name": "id", - "type": "any", - "description": "The unique identifier of the app.", - "required": false, - "output_only": true + "UpdateRun": { + "name": "UpdateRun", + "description": "Updates run metadata.", + "signature": "UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error)", + "parameters": [ + { + "name": "request", + "type": "UpdateRun", + "description": "", + "required": true + } + ], + "returns": { + "type": "*UpdateRunResponse", + "description": "" + } + } + } + }, + "pipelines": { + "name": "Pipelines", + "description": "The Lakeflow Spark Declarative Pipelines API allows you to create, edit,\ndelete, start, and view details about pipelines.\n\nSpark Declarative Pipelines is a framework for building reliable,\nmaintainable, and testable data processing pipelines. You define the\ntransformations to perform on your data, and Spark Declarative Pipelines\nmanages task orchestration, cluster management, monitoring, data quality, and\nerror handling.\n\nInstead of defining your data pipelines using a series of separate Apac...", + "package": "github.com/databricks/databricks-sdk-go/service/pipelines", + "methods": { + "Clone": { + "name": "Clone", + "description": "Creates a new pipeline using Unity Catalog from a pipeline using Hive\nMetastore. This method returns the ID of the newly created clone.\nAdditionally, this method starts an update for the newly created\npipeline.", + "signature": "Clone(ctx context.Context, request ClonePipelineRequest) (*ClonePipelineResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ClonePipelineRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ClonePipelineResponse", + "description": "" + } }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the app. The name must contain only lowercase alphanumeric characters and hyphens.\nIt must be unique within the workspace.", - "required": false + "Create": { + "name": "Create", + "description": "Creates a new data processing pipeline based on the requested\nconfiguration. If successful, this method returns the ID of the new\npipeline.", + "signature": "Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreatePipeline", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreatePipelineResponse", + "description": "" + } }, - "oauth2_app_client_id": { - "name": "oauth2_app_client_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "Delete": { + "name": "Delete", + "description": "Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline\ndeletion will cascade to all pipeline tables. Please reach out to\nDatabricks support for assistance to undo this action.", + "signature": "Delete(ctx context.Context, request DeletePipelineRequest) error", + "parameters": [ + { + "name": "request", + "type": "DeletePipelineRequest", + "description": "", + "required": true + } + ] }, - "oauth2_app_integration_id": { - "name": "oauth2_app_integration_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "Get": { + "name": "Get", + "description": "Get a pipeline.", + "signature": "Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetPipelineRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetPipelineResponse", + "description": "" + } }, - "pending_deployment": { - "name": "pending_deployment", - "type": "any", - "description": "The pending deployment of the app. A deployment is considered pending when it is being prepared\nfor deployment to the app compute.", - "required": false, - "output_only": true + "GetPermissionLevels": { + "name": "GetPermissionLevels", + "description": "Gets the permission levels that a user can have on an object.", + "signature": "GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetPipelinePermissionLevelsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetPipelinePermissionLevelsResponse", + "description": "" + } }, - "resources": { - "name": "resources", - "type": "any", - "description": "Resources for the app.", - "required": false + "GetPermissions": { + "name": "GetPermissions", + "description": "Gets the permissions of a pipeline. Pipelines can inherit permissions\nfrom their root object.", + "signature": "GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error)", + "parameters": [ + { + "name": "request", + "type": "GetPipelinePermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*PipelinePermissions", + "description": "" + } }, - "service_principal_client_id": { - "name": "service_principal_client_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "GetUpdate": { + "name": "GetUpdate", + "description": "Gets an update from an active pipeline.", + "signature": "GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetUpdateRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetUpdateResponse", + "description": "" + } }, - "service_principal_id": { - "name": "service_principal_id", - "type": "string", - "description": "", - "required": false, - "output_only": true + "ListPipelineEvents": { + "name": "ListPipelineEvents", + "description": "Retrieves events for a pipeline.", + "signature": "ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListPipelineEventsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListPipelineEventsResponse", + "description": "" + } }, - "service_principal_name": { - "name": "service_principal_name", - "type": "string", - "description": "", - "required": false, - "output_only": true + "ListPipelines": { + "name": "ListPipelines", + "description": "Lists pipelines defined in the Spark Declarative Pipelines system.", + "signature": "ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListPipelinesRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListPipelinesResponse", + "description": "" + } }, - "update_time": { - "name": "update_time", - "type": "string (timestamp)", - "description": "The update time of the app. Formatted timestamp in ISO 6801.", - "required": false, - "output_only": true + "ListUpdates": { + "name": "ListUpdates", + "description": "List updates for an active pipeline.", + "signature": "ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error)", + "parameters": [ + { + "name": "request", + "type": "ListUpdatesRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*ListUpdatesResponse", + "description": "" + } }, - "updater": { - "name": "updater", - "type": "any", - "description": "The email of the user that last updated the app.", - "required": false, - "output_only": true + "SetPermissions": { + "name": "SetPermissions", + "description": "Sets permissions on an object, replacing existing permissions if they\nexist. Deletes all direct permissions if none are specified. Objects can\ninherit permissions from their root object.", + "signature": "SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)", + "parameters": [ + { + "name": "request", + "type": "PipelinePermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*PipelinePermissions", + "description": "" + } }, - "url": { - "name": "url", - "type": "any", - "description": "The URL of the app once it is deployed.", - "required": false, - "output_only": true + "StartUpdate": { + "name": "StartUpdate", + "description": "Starts a new update for the pipeline. If there is already an active\nupdate for the pipeline, the request will fail and the active update will\nremain running.", + "signature": "StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)", + "parameters": [ + { + "name": "request", + "type": "StartUpdate", + "description": "", + "required": true + } + ], + "returns": { + "type": "*StartUpdateResponse", + "description": "" + } }, - "usage_policy_id": { - "name": "usage_policy_id", - "type": "string", - "description": "", - "required": false + "Stop": { + "name": "Stop", + "description": "Stops the pipeline by canceling the active update. If there is no active\nupdate for the pipeline, this request is a no-op.", + "signature": "Stop(ctx context.Context, request StopRequest) error", + "parameters": [ + { + "name": "request", + "type": "StopRequest", + "description": "", + "required": true + } + ] }, - "user_api_scopes": { - "name": "user_api_scopes", - "type": "any", - "description": "", - "required": false + "Update": { + "name": "Update", + "description": "Updates a pipeline with the supplied configuration.", + "signature": "Update(ctx context.Context, request EditPipeline) error", + "parameters": [ + { + "name": "request", + "type": "EditPipeline", + "description": "", + "required": true + } + ] + }, + "UpdatePermissions": { + "name": "UpdatePermissions", + "description": "Updates the permissions on a pipeline. Pipelines can inherit permissions\nfrom their root object.", + "signature": "UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)", + "parameters": [ + { + "name": "request", + "type": "PipelinePermissionsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*PipelinePermissions", + "description": "" + } } } }, - "bundle.Cluster": { - "name": "Cluster", - "package": "resources", - "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", - "fields": { - "apply_policy_default_values": { - "name": "apply_policy_default_values", - "type": "any", - "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied.", - "required": false + "serving": { + "name": "DataPlane", + "description": "DataPlaneService is an interface for services that access DataPlane.", + "package": "github.com/databricks/databricks-sdk-go/service/serving", + "methods": { + "GetDataPlaneDetails": { + "name": "GetDataPlaneDetails", + "description": "", + "signature": "GetDataPlaneDetails(method string, params []string, refresh func(*DataPlaneInfo) (*goauth.Token, error), infoGetter func() (*DataPlaneInfo, error)) (string, *goauth.Token, error)", + "parameters": [ + { + "name": "method", + "type": "string", + "description": "", + "required": true + }, + { + "name": "params", + "type": "[]string", + "description": "", + "required": true + }, + { + "name": "refresh", + "type": "func(*DataPlaneInfo) (*goauth.Token, error)", + "description": "", + "required": true + }, + { + "name": "infoGetter", + "type": "func() (*DataPlaneInfo, error)", + "description": "", + "required": true + } + ], + "returns": { + "type": "string", + "description": "" + } + } + } + }, + "sql": { + "name": "AlertsLegacy", + "description": "The alerts API can be used to perform CRUD operations on alerts. An alert is\na Databricks SQL object that periodically runs a query, evaluates a condition\nof its result, and notifies one or more users and/or notification\ndestinations if the condition was met. Alerts can be scheduled using the\n`sql_task` type of the Jobs API, e.g. :method:jobs/create.\n\n**Warning**: This API is deprecated. Please see the latest version of the\nDatabricks SQL API. [Learn more]", + "package": "github.com/databricks/databricks-sdk-go/service/sql", + "methods": { + "Create": { + "name": "Create", + "description": "Creates an alert. An alert is a Databricks SQL object that periodically\nruns a query, evaluates a condition of its result, and notifies users or\nnotification destinations if the condition was met.\n\n**Warning**: This API is deprecated. Please use :method:alerts/create\ninstead. [Learn more]\n\n[Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html", + "signature": "Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error)", + "parameters": [ + { + "name": "request", + "type": "CreateAlert", + "description": "", + "required": true + } + ], + "returns": { + "type": "*LegacyAlert", + "description": "" + } }, - "autoscale": { - "name": "autoscale", - "type": "any", - "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", - "required": false + "Delete": { + "name": "Delete", + "description": "Deletes an alert. Deleted alerts are no longer accessible and cannot be\nrestored. **Note**: Unlike queries and dashboards, alerts cannot be moved\nto the trash.\n\n**Warning**: This API is deprecated. Please use :method:alerts/delete\ninstead. [Learn more]\n\n[Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html", + "signature": "Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error", + "parameters": [ + { + "name": "request", + "type": "DeleteAlertsLegacyRequest", + "description": "", + "required": true + } + ] }, - "autotermination_minutes": { - "name": "autotermination_minutes", - "type": "int", - "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.", - "required": false + "Get": { + "name": "Get", + "description": "Gets an alert.\n\n**Warning**: This API is deprecated. Please use :method:alerts/get\ninstead. [Learn more]\n\n[Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html", + "signature": "Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error)", + "parameters": [ + { + "name": "request", + "type": "GetAlertsLegacyRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*LegacyAlert", + "description": "" + } }, - "aws_attributes": { - "name": "aws_attributes", - "type": "any", - "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", - "required": false + "List": { + "name": "List", + "description": "Gets a list of alerts.\n\n**Warning**: This API is deprecated. Please use :method:alerts/list\ninstead. [Learn more]\n\n[Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html", + "signature": "List(ctx context.Context) ([]LegacyAlert, error)", + "parameters": [], + "returns": { + "type": "[]LegacyAlert", + "description": "" + } }, - "azure_attributes": { - "name": "azure_attributes", - "type": "any", - "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "Update": { + "name": "Update", + "description": "Updates an alert.\n\n**Warning**: This API is deprecated. Please use :method:alerts/update\ninstead. [Learn more]\n\n[Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html", + "signature": "Update(ctx context.Context, request EditAlert) error", + "parameters": [ + { + "name": "request", + "type": "EditAlert", + "description": "", + "required": true + } + ] + } + } + }, + "workspace": { + "name": "GitCredentials", + "description": "Registers personal access token for Databricks to do operations on behalf of\nthe user.\n\nSee [more info].", + "package": "github.com/databricks/databricks-sdk-go/service/workspace", + "methods": { + "Create": { + "name": "Create", + "description": "Creates a Git credential entry for the user. Only one Git credential per\nuser is supported, so any attempts to create credentials if an entry\nalready exists will fail. Use the PATCH endpoint to update existing\ncredentials, or the DELETE endpoint to delete existing credentials.", + "signature": "Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "CreateCredentialsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*CreateCredentialsResponse", + "description": "" + } + }, + "Delete": { + "name": "Delete", + "description": "Deletes the specified Git credential.", + "signature": "Delete(ctx context.Context, request DeleteCredentialsRequest) error", + "parameters": [ + { + "name": "request", + "type": "DeleteCredentialsRequest", + "description": "", + "required": true + } + ] + }, + "Get": { + "name": "Get", + "description": "Gets the Git credential with the specified credential ID.", + "signature": "Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error)", + "parameters": [ + { + "name": "request", + "type": "GetCredentialsRequest", + "description": "", + "required": true + } + ], + "returns": { + "type": "*GetCredentialsResponse", + "description": "" + } + }, + "List": { + "name": "List", + "description": "Lists the calling user's Git credentials. One credential per user is\nsupported.", + "signature": "List(ctx context.Context) (*ListCredentialsResponse, error)", + "parameters": [], + "returns": { + "type": "*ListCredentialsResponse", + "description": "" + } + }, + "Update": { + "name": "Update", + "description": "Updates the specified Git credential.", + "signature": "Update(ctx context.Context, request UpdateCredentialsRequest) error", + "parameters": [ + { + "name": "request", + "type": "UpdateCredentialsRequest", + "description": "", + "required": true + } + ] + } + } + } + }, + "types": { + "apps.App": { + "name": "App", + "package": "apps", + "description": "", + "fields": { + "active_deployment": { + "name": "active_deployment", + "type": "*AppDeployment", + "description": "The active deployment of the app. A deployment is considered active when\nit has been deployed to the app compute.", "required": false }, - "cluster_log_conf": { - "name": "cluster_log_conf", - "type": "any", - "description": "The configuration for delivering spark logs to a long-term storage destination.\nThree kinds of destinations (DBFS, S3 and Unity Catalog volumes) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "app_status": { + "name": "app_status", + "type": "*ApplicationStatus", + "description": "", "required": false }, - "cluster_name": { - "name": "cluster_name", + "budget_policy_id": { + "name": "budget_policy_id", "type": "string", - "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\nFor job clusters, the cluster name is automatically set based on the job and job run IDs.", + "description": "", "required": false }, - "custom_tags": { - "name": "custom_tags", - "type": "map[string]string", - "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "compute_size": { + "name": "compute_size", + "type": "ComputeSize", + "description": "", "required": false }, - "data_security_mode": { - "name": "data_security_mode", - "type": "any", - "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "compute_status": { + "name": "compute_status", + "type": "*ComputeStatus", + "description": "", "required": false }, - "docker_image": { - "name": "docker_image", - "type": "any", - "description": "Custom docker image BYOC", + "create_time": { + "name": "create_time", + "type": "string", + "description": "The creation time of the app. Formatted timestamp in ISO 6801.", "required": false }, - "driver_instance_pool_id": { - "name": "driver_instance_pool_id", + "creator": { + "name": "creator", "type": "string", - "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", + "description": "The email of the user that created the app.", "required": false }, - "driver_node_type_id": { - "name": "driver_node_type_id", + "default_source_code_path": { + "name": "default_source_code_path", "type": "string", - "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", + "description": "The default workspace file system path of the source code from which app\ndeployment are created. This field tracks the workspace source code path\nof the last active deployment.", "required": false }, - "enable_elastic_disk": { - "name": "enable_elastic_disk", - "type": "bool", - "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space.", + "description": { + "name": "description", + "type": "string", + "description": "The description of the app.", "required": false }, - "enable_local_disk_encryption": { - "name": "enable_local_disk_encryption", - "type": "bool", - "description": "Whether to enable LUKS on cluster VMs' local disks", + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", + "type": "string", + "description": "", "required": false }, - "gcp_attributes": { - "name": "gcp_attributes", - "type": "any", - "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "", "required": false }, - "init_scripts": { - "name": "init_scripts", - "type": "any", - "description": "The configuration for storing init scripts. Any number of destinations can be specified.\nThe scripts are executed sequentially in the order provided.\nIf `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "effective_user_api_scopes": { + "name": "effective_user_api_scopes", + "type": "[]string", + "description": "The effective api scopes granted to the user access token.", "required": false }, - "instance_pool_id": { - "name": "instance_pool_id", - "type": "string", - "description": "The optional ID of the instance pool to which the cluster belongs.", + "git_repository": { + "name": "git_repository", + "type": "*GitRepository", + "description": "Git repository configuration for app deployments. When specified,\ndeployments can reference code from this repository by providing only the\ngit reference (branch, tag, or commit).", "required": false }, - "is_single_node": { - "name": "is_single_node", - "type": "bool", - "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`", + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the app.", "required": false }, - "kind": { - "name": "kind", - "type": "any", - "description": "The kind of compute described by this compute specification.\n\nDepending on `kind`, different validations and default values will be applied.\n\nClusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no specified `kind` do not.\n* [is_single_node](/api/workspace/clusters/create#is_single_node)\n* [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime)\n* [data_security_mode](/api/workspace/clusters/create#data_security_mode) set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`\n\nBy using the [simple form](https://docs.databricks.com/compute/simple-form.html), your clusters are automatically using `kind = CLASSIC_PREVIEW`.", + "name": { + "name": "name", + "type": "string", + "description": "The name of the app. The name must contain only lowercase alphanumeric\ncharacters and hyphens. It must be unique within the workspace.", "required": false }, - "node_type_id": { - "name": "node_type_id", + "oauth2_app_client_id": { + "name": "oauth2_app_client_id", "type": "string", - "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", + "description": "", "required": false }, - "num_workers": { - "name": "num_workers", - "type": "any", - "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.", + "oauth2_app_integration_id": { + "name": "oauth2_app_integration_id", + "type": "string", + "description": "", "required": false }, - "policy_id": { - "name": "policy_id", - "type": "string", - "description": "The ID of the cluster policy used to create the cluster if applicable.", + "pending_deployment": { + "name": "pending_deployment", + "type": "*AppDeployment", + "description": "The pending deployment of the app. A deployment is considered pending\nwhen it is being prepared for deployment to the app compute.", "required": false }, - "remote_disk_throughput": { - "name": "remote_disk_throughput", - "type": "any", - "description": "If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "resources": { + "name": "resources", + "type": "[]AppResource", + "description": "Resources for the app.", "required": false }, - "runtime_engine": { - "name": "runtime_engine", - "type": "any", - "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that contain `-photon-`.\nRemove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the spark_version\ncontains -photon-, in which case Photon will be used.", + "service_principal_client_id": { + "name": "service_principal_client_id", + "type": "string", + "description": "", "required": false }, - "single_user_name": { - "name": "single_user_name", + "service_principal_id": { + "name": "service_principal_id", + "type": "int64", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", "type": "string", - "description": "Single user name if data_security_mode is `SINGLE_USER`", + "description": "", "required": false }, - "spark_conf": { - "name": "spark_conf", - "type": "any", - "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.", + "update_time": { + "name": "update_time", + "type": "string", + "description": "The update time of the app. Formatted timestamp in ISO 6801.", "required": false }, - "spark_env_vars": { - "name": "spark_env_vars", - "type": "any", - "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "updater": { + "name": "updater", + "type": "string", + "description": "The email of the user that last updated the app.", "required": false }, - "spark_version": { - "name": "spark_version", - "type": "any", - "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.", + "url": { + "name": "url", + "type": "string", + "description": "The URL of the app once it is deployed.", "required": false }, - "ssh_public_keys": { - "name": "ssh_public_keys", - "type": "any", - "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "", "required": false }, - "total_initial_remote_disk_size": { - "name": "total_initial_remote_disk_size", - "type": "int", - "description": "If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "user_api_scopes": { + "name": "user_api_scopes", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "apps.AppAccessControlRequest": { + "name": "AppAccessControlRequest", + "package": "apps", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", "required": false }, - "use_ml_runtime": { - "name": "use_ml_runtime", - "type": "any", - "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not.", + "permission_level": { + "name": "permission_level", + "type": "AppPermissionLevel", + "description": "", "required": false }, - "workload_type": { - "name": "workload_type", - "type": "any", - "description": "Cluster Attributes showing for clusters workload types.", + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", "required": false } } }, - "bundle.DatabaseCatalog": { - "name": "DatabaseCatalog", - "package": "resources", - "description": "database catalog configuration.", + "apps.AppAccessControlResponse": { + "name": "AppAccessControlResponse", + "package": "apps", + "description": "", "fields": { - "create_database_if_not_exists": { - "name": "create_database_if_not_exists", - "type": "any", - "description": "", + "all_permissions": { + "name": "all_permissions", + "type": "[]AppPermission", + "description": "All permissions.", "required": false }, - "database_instance_name": { - "name": "database_instance_name", + "display_name": { + "name": "display_name", "type": "string", - "description": "The name of the DatabaseInstance housing the database.", + "description": "Display name of the user or service principal.", "required": false }, - "database_name": { - "name": "database_name", + "group_name": { + "name": "group_name", "type": "string", - "description": "The name of the database (in a instance) associated with the catalog.", + "description": "name of the group", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the catalog in UC.", + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", "required": false }, - "uid": { - "name": "uid", - "type": "any", - "description": "", - "required": false, - "output_only": true + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false } } }, - "bundle.DatabaseInstance": { - "name": "DatabaseInstance", - "package": "resources", - "description": "A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage.", + "apps.AppDeployment": { + "name": "AppDeployment", + "package": "apps", + "description": "", "fields": { - "capacity": { - "name": "capacity", - "type": "any", - "description": "The sku of the instance. Valid values are \"CU_1\", \"CU_2\", \"CU_4\", \"CU_8\".", + "command": { + "name": "command", + "type": "[]string", + "description": "The command with which to run the app. This will override the command\nspecified in the app.yaml file.", "required": false }, - "child_instance_refs": { - "name": "child_instance_refs", - "type": "any", - "description": "The refs of the child instances. This is only available if the instance is\nparent instance.", - "required": false, - "output_only": true - }, - "creation_time": { - "name": "creation_time", - "type": "string (timestamp)", - "description": "The timestamp when the instance was created.", - "required": false, - "output_only": true + "create_time": { + "name": "create_time", + "type": "string", + "description": "The creation time of the deployment. Formatted timestamp in ISO 6801.", + "required": false }, "creator": { "name": "creator", - "type": "any", - "description": "The email of the creator of the instance.", - "required": false, - "output_only": true - }, - "custom_tags": { - "name": "custom_tags", - "type": "map[string]string", - "description": "Custom tags associated with the instance. This field is only included on create and update responses.", + "type": "string", + "description": "The email of the user creates the deployment.", "required": false }, - "effective_capacity": { - "name": "effective_capacity", - "type": "any", - "description": "Deprecated. The sku of the instance; this field will always match the value of capacity.", - "required": false, - "output_only": true, - "deprecated": true - }, - "effective_custom_tags": { - "name": "effective_custom_tags", - "type": "map[string]string", - "description": "The recorded custom tags associated with the instance.", - "required": false, - "output_only": true - }, - "effective_enable_pg_native_login": { - "name": "effective_enable_pg_native_login", - "type": "any", - "description": "Whether the instance has PG native password login enabled.", - "required": false, - "output_only": true - }, - "effective_enable_readable_secondaries": { - "name": "effective_enable_readable_secondaries", - "type": "any", - "description": "Whether secondaries serving read-only traffic are enabled. Defaults to false.", - "required": false, - "output_only": true - }, - "effective_node_count": { - "name": "effective_node_count", - "type": "int", - "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", - "required": false, - "output_only": true - }, - "effective_retention_window_in_days": { - "name": "effective_retention_window_in_days", - "type": "any", - "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained.", - "required": false, - "output_only": true - }, - "effective_stopped": { - "name": "effective_stopped", - "type": "any", - "description": "Whether the instance is stopped.", - "required": false, - "output_only": true + "deployment_artifacts": { + "name": "deployment_artifacts", + "type": "*AppDeploymentArtifacts", + "description": "The deployment artifacts for an app.", + "required": false }, - "effective_usage_policy_id": { - "name": "effective_usage_policy_id", + "deployment_id": { + "name": "deployment_id", "type": "string", - "description": "The policy that is applied to the instance.", - "required": false, - "output_only": true - }, - "enable_pg_native_login": { - "name": "enable_pg_native_login", - "type": "bool", - "description": "Whether to enable PG native password login on the instance. Defaults to false.", + "description": "The unique id of the deployment.", "required": false }, - "enable_readable_secondaries": { - "name": "enable_readable_secondaries", - "type": "bool", - "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", + "env_vars": { + "name": "env_vars", + "type": "[]EnvVar", + "description": "The environment variables to set in the app runtime environment. This\nwill override the environment variables specified in the app.yaml file.", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the instance. This is the unique identifier for the instance.", + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "Git repository to use as the source for the app deployment.", "required": false }, - "node_count": { - "name": "node_count", - "type": "int", - "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries. This field is input only, see effective_node_count for the output.", + "mode": { + "name": "mode", + "type": "AppDeploymentMode", + "description": "The mode of which the deployment will manage the source code.", "required": false }, - "parent_instance_ref": { - "name": "parent_instance_ref", - "type": "any", - "description": "The ref of the parent instance. This is only available if the instance is\nchild instance.\nInput: For specifying the parent instance to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "source_code_path": { + "name": "source_code_path", + "type": "string", + "description": "The workspace file system path of the source code used to create the app\ndeployment. This is different from\n`deployment_artifacts.source_code_path`, which is the path used by the\ndeployed app. The former refers to the original source code location of\nthe app in the workspace during deployment creation, whereas the latter\nprovides a system generated stable snapshotted source code path used by\nthe deployment.", "required": false }, - "pg_version": { - "name": "pg_version", - "type": "any", - "description": "The version of Postgres running on the instance.", - "required": false, - "output_only": true - }, - "read_only_dns": { - "name": "read_only_dns", - "type": "any", - "description": "The DNS endpoint to connect to the instance for read only access. This is only available if\nenable_readable_secondaries is true.", - "required": false, - "output_only": true - }, - "read_write_dns": { - "name": "read_write_dns", - "type": "any", - "description": "The DNS endpoint to connect to the instance for read+write access.", - "required": false, - "output_only": true + "status": { + "name": "status", + "type": "*AppDeploymentStatus", + "description": "Status and status message of the deployment", + "required": false }, - "retention_window_in_days": { - "name": "retention_window_in_days", - "type": "any", - "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained. The default value is 7 days.\nValid values are 2 to 35 days.", - "required": false - }, - "state": { - "name": "state", - "type": "any", - "description": "The current state of the instance.", - "required": false, - "output_only": true - }, - "stopped": { - "name": "stopped", - "type": "any", - "description": "Whether to stop the instance. An input only param, see effective_stopped for the output.", - "required": false - }, - "uid": { - "name": "uid", - "type": "any", - "description": "An immutable UUID identifier for the instance.", - "required": false, - "output_only": true - }, - "usage_policy_id": { - "name": "usage_policy_id", + "update_time": { + "name": "update_time", "type": "string", - "description": "The desired usage policy to associate with the instance.", + "description": "The update time of the deployment. Formatted timestamp in ISO 6801.", "required": false } } }, - "bundle.Job": { - "name": "Job", - "package": "resources", - "description": "job configuration.", + "apps.AppDeploymentArtifacts": { + "name": "AppDeploymentArtifacts", + "package": "apps", + "description": "", "fields": { - "budget_policy_id": { - "name": "budget_policy_id", + "source_code_path": { + "name": "source_code_path", "type": "string", - "description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.", + "description": "The snapshotted workspace file system path of the source code loaded by\nthe deployed app.", "required": false - }, - "continuous": { - "name": "continuous", - "type": "any", - "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", + } + } + }, + "apps.AppDeploymentStatus": { + "name": "AppDeploymentStatus", + "package": "apps", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "string", + "description": "Message corresponding with the deployment state.", "required": false }, - "deployment": { - "name": "deployment", - "type": "any", - "description": "Deployment information for jobs managed by external sources.", + "state": { + "name": "state", + "type": "AppDeploymentState", + "description": "State of the deployment.", "required": false - }, + } + } + }, + "apps.AppManifest": { + "name": "AppManifest", + "package": "apps", + "description": "App manifest definition", + "fields": { "description": { "name": "description", "type": "string", - "description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding.", + "description": "Description of the app defined by manifest author / publisher", "required": false }, - "edit_mode": { - "name": "edit_mode", - "type": "any", - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", + "name": { + "name": "name", + "type": "string", + "description": "Name of the app defined by manifest author / publisher", "required": false }, - "email_notifications": { - "name": "email_notifications", - "type": "any", - "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", + "resource_specs": { + "name": "resource_specs", + "type": "[]AppManifestAppResourceSpec", + "description": "", "required": false }, - "environments": { - "name": "environments", - "type": "any", - "description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nFor serverless notebook tasks, if the environment_key is not specified, the notebook environment will be used if present. If a jobs environment is specified, it will override the notebook environment.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.", + "version": { + "name": "version", + "type": "int", + "description": "The manifest schema version, for now only 1 is allowed", "required": false - }, - "format": { - "name": "format", - "type": "any", - "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`.", - "required": false, - "deprecated": true - }, - "git_source": { - "name": "git_source", - "type": "any", - "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", + } + } + }, + "apps.AppManifestAppResourceExperimentSpec": { + "name": "AppManifestAppResourceExperimentSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceExperimentSpecExperimentPermission", + "description": "", "required": false - }, - "health": { - "name": "health", - "type": "any", - "description": "An optional set of health rules that can be defined for this job.", + } + } + }, + "apps.AppManifestAppResourceJobSpec": { + "name": "AppManifestAppResourceJobSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceJobSpecJobPermission", + "description": "Permissions to grant on the Job. Supported permissions are: \"CAN_MANAGE\",\n\"IS_OWNER\", \"CAN_MANAGE_RUN\", \"CAN_VIEW\".", + "required": false + } + } + }, + "apps.AppManifestAppResourceSecretSpec": { + "name": "AppManifestAppResourceSecretSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceSecretSpecSecretPermission", + "description": "Permission to grant on the secret scope. For secrets, only one permission\nis allowed. Permission must be one of: \"READ\", \"WRITE\", \"MANAGE\".", + "required": false + } + } + }, + "apps.AppManifestAppResourceServingEndpointSpec": { + "name": "AppManifestAppResourceServingEndpointSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceServingEndpointSpecServingEndpointPermission", + "description": "Permission to grant on the serving endpoint. Supported permissions are:\n\"CAN_MANAGE\", \"CAN_QUERY\", \"CAN_VIEW\".", + "required": false + } + } + }, + "apps.AppManifestAppResourceSpec": { + "name": "AppManifestAppResourceSpec", + "package": "apps", + "description": "AppResource related fields are copied from app.proto but excludes resource\nidentifiers (e.g. name, id, key, scope, etc.)", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "Description of the App Resource.", "required": false }, - "job_clusters": { - "name": "job_clusters", - "type": "any", - "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", + "experiment_spec": { + "name": "experiment_spec", + "type": "*AppManifestAppResourceExperimentSpec", + "description": "", "required": false }, - "max_concurrent_runs": { - "name": "max_concurrent_runs", - "type": "any", - "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped.", + "job_spec": { + "name": "job_spec", + "type": "*AppManifestAppResourceJobSpec", + "description": "", "required": false }, "name": { "name": "name", - "type": "any", - "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding.", + "type": "string", + "description": "Name of the App Resource.", "required": false }, - "notification_settings": { - "name": "notification_settings", - "type": "any", - "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", + "secret_spec": { + "name": "secret_spec", + "type": "*AppManifestAppResourceSecretSpec", + "description": "", "required": false }, - "parameters": { - "name": "parameters", - "type": "any", - "description": "Job-level parameter definitions", + "serving_endpoint_spec": { + "name": "serving_endpoint_spec", + "type": "*AppManifestAppResourceServingEndpointSpec", + "description": "", "required": false }, - "performance_target": { - "name": "performance_target", - "type": "any", - "description": "The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run.\nThe performance target does not apply to tasks that run on Serverless GPU compute.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads.\n* `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance.", + "sql_warehouse_spec": { + "name": "sql_warehouse_spec", + "type": "*AppManifestAppResourceSqlWarehouseSpec", + "description": "", "required": false }, - "queue": { - "name": "queue", - "type": "any", - "description": "The queue settings of the job.", + "uc_securable_spec": { + "name": "uc_securable_spec", + "type": "*AppManifestAppResourceUcSecurableSpec", + "description": "", "required": false - }, - "run_as": { - "name": "run_as", - "type": "any", - "description": "The user or service principal that the job runs as, if specified in the request.\nThis field indicates the explicit configuration of `run_as` for the job.\nTo find the value in all cases, explicit or implicit, use `run_as_user_name`.", + } + } + }, + "apps.AppManifestAppResourceSqlWarehouseSpec": { + "name": "AppManifestAppResourceSqlWarehouseSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceSqlWarehouseSpecSqlWarehousePermission", + "description": "Permission to grant on the SQL warehouse. Supported permissions are:\n\"CAN_MANAGE\", \"CAN_USE\", \"IS_OWNER\".", "required": false - }, - "schedule": { - "name": "schedule", - "type": "any", - "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + } + } + }, + "apps.AppManifestAppResourceUcSecurableSpec": { + "name": "AppManifestAppResourceUcSecurableSpec", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppManifestAppResourceUcSecurableSpecUcSecurablePermission", + "description": "", "required": false }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", + "securable_type": { + "name": "securable_type", + "type": "AppManifestAppResourceUcSecurableSpecUcSecurableType", + "description": "", "required": false - }, - "tasks": { - "name": "tasks", - "type": "any", - "description": "A list of task specifications to be executed by this job.\nIt supports up to 1000 elements in write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit).\nRead endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available.", + } + } + }, + "apps.AppPermission": { + "name": "AppPermission", + "package": "apps", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", "required": false }, - "timeout_seconds": { - "name": "timeout_seconds", - "type": "int", - "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout.", + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", "required": false }, - "trigger": { - "name": "trigger", - "type": "any", - "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "permission_level": { + "name": "permission_level", + "type": "AppPermissionLevel", + "description": "", + "required": false + } + } + }, + "apps.AppPermissions": { + "name": "AppPermissions", + "package": "apps", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AppAccessControlResponse", + "description": "", "required": false }, - "usage_policy_id": { - "name": "usage_policy_id", + "object_id": { + "name": "object_id", "type": "string", - "description": "The id of the user specified usage policy to use for this job.\nIf not specified, a default usage policy may be applied when creating or modifying the job.\nSee `effective_usage_policy_id` for the usage policy used by this workload.", + "description": "", "required": false }, - "webhook_notifications": { - "name": "webhook_notifications", - "type": "any", - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "object_type": { + "name": "object_type", + "type": "string", + "description": "", "required": false } } }, - "bundle.MlflowExperiment": { - "name": "MlflowExperiment", - "package": "resources", - "description": "mlflow experiment configuration.", + "apps.AppPermissionsDescription": { + "name": "AppPermissionsDescription", + "package": "apps", + "description": "", "fields": { - "artifact_location": { - "name": "artifact_location", - "type": "any", - "description": "Location where all artifacts for the experiment are stored.\nIf not provided, the remote server will select an appropriate default.", + "description": { + "name": "description", + "type": "string", + "description": "", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "Experiment name.", - "required": false - }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "A collection of tags to set on the experiment. Maximum tag size and number of tags per request\ndepends on the storage backend. All storage backends are guaranteed to support tag keys up\nto 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also\nguaranteed to support up to 20 tags per request.", + "permission_level": { + "name": "permission_level", + "type": "AppPermissionLevel", + "description": "", "required": false } } }, - "bundle.MlflowModel": { - "name": "MlflowModel", - "package": "resources", - "description": "mlflow model configuration.", + "apps.AppPermissionsRequest": { + "name": "AppPermissionsRequest", + "package": "apps", + "description": "", "fields": { - "description": { - "name": "description", - "type": "string", - "description": "Optional description for registered model.", - "required": false - }, - "name": { - "name": "name", - "type": "any", - "description": "Register models under this name", - "required": false - }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "Additional metadata for registered model.", + "access_control_list": { + "name": "access_control_list", + "type": "[]AppAccessControlRequest", + "description": "", "required": false } } }, - "bundle.ModelServingEndpoint": { - "name": "ModelServingEndpoint", - "package": "resources", - "description": "model serving endpoint configuration.", + "apps.AppResource": { + "name": "AppResource", + "package": "apps", + "description": "", "fields": { - "ai_gateway": { - "name": "ai_gateway", - "type": "any", - "description": "The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables.", + "database": { + "name": "database", + "type": "*AppResourceDatabase", + "description": "", "required": false }, - "budget_policy_id": { - "name": "budget_policy_id", + "description": { + "name": "description", "type": "string", - "description": "The budget policy to be applied to the serving endpoint.", + "description": "Description of the App Resource.", "required": false }, - "config": { - "name": "config", - "type": "any", - "description": "The core config of the serving endpoint.", + "experiment": { + "name": "experiment", + "type": "*AppResourceExperiment", + "description": "", "required": false }, - "description": { - "name": "description", - "type": "string", + "genie_space": { + "name": "genie_space", + "type": "*AppResourceGenieSpace", "description": "", "required": false }, - "email_notifications": { - "name": "email_notifications", - "type": "any", - "description": "Email notification settings.", + "job": { + "name": "job", + "type": "*AppResourceJob", + "description": "", "required": false }, "name": { "name": "name", - "type": "any", - "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.", + "type": "string", + "description": "Name of the App Resource.", "required": false }, - "rate_limits": { - "name": "rate_limits", - "type": "any", - "description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.", - "required": false, - "deprecated": true + "secret": { + "name": "secret", + "type": "*AppResourceSecret", + "description": "", + "required": false }, - "route_optimized": { - "name": "route_optimized", - "type": "any", - "description": "Enable route optimization for the serving endpoint.", + "serving_endpoint": { + "name": "serving_endpoint", + "type": "*AppResourceServingEndpoint", + "description": "", "required": false }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", + "sql_warehouse": { + "name": "sql_warehouse", + "type": "*AppResourceSqlWarehouse", + "description": "", + "required": false + }, + "uc_securable": { + "name": "uc_securable", + "type": "*AppResourceUcSecurable", + "description": "", "required": false } } }, - "bundle.Pipeline": { - "name": "Pipeline", - "package": "resources", - "description": "pipeline configuration.", + "apps.AppResourceDatabase": { + "name": "AppResourceDatabase", + "package": "apps", + "description": "", "fields": { - "allow_duplicate_names": { - "name": "allow_duplicate_names", - "type": "any", - "description": "If false, deployment will fail if name conflicts with that of another pipeline.", + "database_name": { + "name": "database_name", + "type": "string", + "description": "", "required": false }, - "budget_policy_id": { - "name": "budget_policy_id", + "instance_name": { + "name": "instance_name", "type": "string", - "description": "Budget policy of this pipeline.", + "description": "", "required": false }, - "catalog": { - "name": "catalog", - "type": "any", - "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.", + "permission": { + "name": "permission", + "type": "AppResourceDatabaseDatabasePermission", + "description": "", "required": false - }, - "channel": { - "name": "channel", - "type": "any", - "description": "DLT Release Channel that specifies which version to use.", + } + } + }, + "apps.AppResourceExperiment": { + "name": "AppResourceExperiment", + "package": "apps", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "", "required": false }, - "clusters": { - "name": "clusters", - "type": "any", - "description": "Cluster settings for this pipeline deployment.", + "permission": { + "name": "permission", + "type": "AppResourceExperimentExperimentPermission", + "description": "", "required": false - }, - "configuration": { - "name": "configuration", - "type": "any", - "description": "String-String configuration for this pipeline execution.", + } + } + }, + "apps.AppResourceGenieSpace": { + "name": "AppResourceGenieSpace", + "package": "apps", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "", "required": false }, - "continuous": { - "name": "continuous", - "type": "any", - "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "permission": { + "name": "permission", + "type": "AppResourceGenieSpaceGenieSpacePermission", + "description": "", "required": false }, - "deployment": { - "name": "deployment", - "type": "any", - "description": "Deployment type of this pipeline.", + "space_id": { + "name": "space_id", + "type": "string", + "description": "", "required": false - }, - "development": { - "name": "development", - "type": "any", - "description": "Whether the pipeline is in Development mode. Defaults to false.", + } + } + }, + "apps.AppResourceJob": { + "name": "AppResourceJob", + "package": "apps", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "Id of the job to grant permission on.", "required": false }, - "dry_run": { - "name": "dry_run", - "type": "any", - "description": "", + "permission": { + "name": "permission", + "type": "AppResourceJobJobPermission", + "description": "Permissions to grant on the Job. Supported permissions are: \"CAN_MANAGE\",\n\"IS_OWNER\", \"CAN_MANAGE_RUN\", \"CAN_VIEW\".", "required": false - }, - "edition": { - "name": "edition", - "type": "any", - "description": "Pipeline product edition.", + } + } + }, + "apps.AppResourceSecret": { + "name": "AppResourceSecret", + "package": "apps", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Key of the secret to grant permission on.", "required": false }, - "environment": { - "name": "environment", - "type": "any", - "description": "Environment specification for this pipeline used to install dependencies.", + "permission": { + "name": "permission", + "type": "AppResourceSecretSecretPermission", + "description": "Permission to grant on the secret scope. For secrets, only one permission\nis allowed. Permission must be one of: \"READ\", \"WRITE\", \"MANAGE\".", "required": false }, - "event_log": { - "name": "event_log", - "type": "any", - "description": "Event log configuration for this pipeline", + "scope": { + "name": "scope", + "type": "string", + "description": "Scope of the secret to grant permission on.", "required": false - }, - "filters": { - "name": "filters", - "type": "any", - "description": "Filters on which Pipeline packages to include in the deployed graph.", + } + } + }, + "apps.AppResourceSecretSecretPermission": { + "name": "AppResourceSecretSecretPermission", + "package": "apps", + "description": "Permission to grant on the secret scope. Supported permissions are: \"READ\", \"WRITE\", \"MANAGE\".", + "fields": {} + }, + "apps.AppResourceServingEndpoint": { + "name": "AppResourceServingEndpoint", + "package": "apps", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Name of the serving endpoint to grant permission on.", "required": false }, - "gateway_definition": { - "name": "gateway_definition", - "type": "any", - "description": "The definition of a gateway pipeline to support change data capture.", + "permission": { + "name": "permission", + "type": "AppResourceServingEndpointServingEndpointPermission", + "description": "Permission to grant on the serving endpoint. Supported permissions are:\n\"CAN_MANAGE\", \"CAN_QUERY\", \"CAN_VIEW\".", "required": false - }, + } + } + }, + "apps.AppResourceSqlWarehouse": { + "name": "AppResourceSqlWarehouse", + "package": "apps", + "description": "", + "fields": { "id": { "name": "id", - "type": "any", - "description": "Unique identifier for this pipeline.", - "required": false - }, - "ingestion_definition": { - "name": "ingestion_definition", - "type": "any", - "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'schema', 'target', or 'catalog' settings.", - "required": false - }, - "libraries": { - "name": "libraries", - "type": "any", - "description": "Libraries or code needed by this deployment.", + "type": "string", + "description": "Id of the SQL warehouse to grant permission on.", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "Friendly identifier for this pipeline.", + "permission": { + "name": "permission", + "type": "AppResourceSqlWarehouseSqlWarehousePermission", + "description": "Permission to grant on the SQL warehouse. Supported permissions are:\n\"CAN_MANAGE\", \"CAN_USE\", \"IS_OWNER\".", "required": false - }, - "notifications": { - "name": "notifications", - "type": "any", - "description": "List of notification settings for this pipeline.", + } + } + }, + "apps.AppResourceUcSecurable": { + "name": "AppResourceUcSecurable", + "package": "apps", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AppResourceUcSecurableUcSecurablePermission", + "description": "", "required": false }, - "photon": { - "name": "photon", - "type": "any", - "description": "Whether Photon is enabled for this pipeline.", + "securable_full_name": { + "name": "securable_full_name", + "type": "string", + "description": "", "required": false }, - "restart_window": { - "name": "restart_window", - "type": "any", - "description": "Restart window of this pipeline.", + "securable_type": { + "name": "securable_type", + "type": "AppResourceUcSecurableUcSecurableType", + "description": "", "required": false - }, - "root_path": { - "name": "root_path", + } + } + }, + "apps.AppUpdate": { + "name": "AppUpdate", + "package": "apps", + "description": "", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", "type": "string", - "description": "Root path for this pipeline.\nThis is used as the root directory when editing the pipeline in the Databricks user interface and it is\nadded to sys.path when executing Python sources during pipeline execution.", + "description": "", "required": false }, - "run_as": { - "name": "run_as", - "type": "any", - "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", + "compute_size": { + "name": "compute_size", + "type": "ComputeSize", + "description": "", "required": false }, - "schema": { - "name": "schema", - "type": "any", - "description": "The default schema (database) where tables are read from or published to.", + "description": { + "name": "description", + "type": "string", + "description": "", "required": false }, - "serverless": { - "name": "serverless", - "type": "any", - "description": "Whether serverless compute is enabled for this pipeline.", + "git_repository": { + "name": "git_repository", + "type": "*GitRepository", + "description": "", "required": false }, - "storage": { - "name": "storage", - "type": "any", - "description": "DBFS root directory for storing checkpoints and tables.", + "resources": { + "name": "resources", + "type": "[]AppResource", + "description": "", "required": false }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "A map of tags associated with the pipeline.\nThese are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.\nA maximum of 25 tags can be added to the pipeline.", + "status": { + "name": "status", + "type": "*AppUpdateUpdateStatus", + "description": "", "required": false }, - "target": { - "name": "target", - "type": "any", - "description": "Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.", - "required": false, - "deprecated": true - }, - "trigger": { - "name": "trigger", - "type": "any", - "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", - "required": false, - "deprecated": true - }, "usage_policy_id": { "name": "usage_policy_id", "type": "string", - "description": "Usage policy of this pipeline.", + "description": "", + "required": false + }, + "user_api_scopes": { + "name": "user_api_scopes", + "type": "[]string", + "description": "", "required": false } } }, - "bundle.QualityMonitor": { - "name": "QualityMonitor", - "package": "resources", - "description": "quality monitor configuration.", + "apps.AppUpdateUpdateStatus": { + "name": "AppUpdateUpdateStatus", + "package": "apps", + "description": "", "fields": { - "assets_dir": { - "name": "assets_dir", - "type": "any", - "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store data-monitoring\nassets. Normally prepopulated to a default user location via UI and Python APIs.", + "message": { + "name": "message", + "type": "string", + "description": "", "required": false }, - "baseline_table_name": { - "name": "baseline_table_name", + "state": { + "name": "state", + "type": "AppUpdateUpdateStatusUpdateState", + "description": "", + "required": false + } + } + }, + "apps.ApplicationStatus": { + "name": "ApplicationStatus", + "package": "apps", + "description": "", + "fields": { + "message": { + "name": "message", "type": "string", - "description": "[Create:OPT Update:OPT] Baseline table name.\nBaseline data is used to compute drift from the data in the monitored `table_name`.\nThe baseline table and the monitored table shall have the same schema.", + "description": "Application status message", "required": false }, - "custom_metrics": { - "name": "custom_metrics", - "type": "any", - "description": "[Create:OPT Update:OPT] Custom metrics.", + "state": { + "name": "state", + "type": "ApplicationState", + "description": "State of the application.", + "required": false + } + } + }, + "apps.AsyncUpdateAppRequest": { + "name": "AsyncUpdateAppRequest", + "package": "apps", + "description": "", + "fields": { + "app": { + "name": "app", + "type": "*App", + "description": "", "required": false }, - "data_classification_config": { - "name": "data_classification_config", - "type": "any", - "description": "[Create:OPT Update:OPT] Data classification related config.", + "update_mask": { + "name": "update_mask", + "type": "string", + "description": "The field mask must be a single string, with multiple fields separated by\ncommas (no spaces). The field path is relative to the resource object,\nusing a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).\nSpecification of elements in sequence or map fields is not allowed, as\nonly the entire collection field can be specified. Field names must\nexactly match the resource field names.\n\nA field mask of `*` indicates full replacement. It’s recommended to\nalways explicitly list the field...", + "required": false + } + } + }, + "apps.ComputeStatus": { + "name": "ComputeStatus", + "package": "apps", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "string", + "description": "Compute status message", "required": false }, - "inference_log": { - "name": "inference_log", - "type": "any", + "state": { + "name": "state", + "type": "ComputeState", + "description": "State of the app compute.", + "required": false + } + } + }, + "apps.CreateAppDeploymentRequest": { + "name": "CreateAppDeploymentRequest", + "package": "apps", + "description": "", + "fields": { + "app_deployment": { + "name": "app_deployment", + "type": "AppDeployment", + "description": "The app deployment configuration.", + "required": false + } + } + }, + "apps.CreateAppRequest": { + "name": "CreateAppRequest", + "package": "apps", + "description": "", + "fields": { + "app": { + "name": "app", + "type": "App", "description": "", "required": false - }, - "latest_monitor_failure_msg": { - "name": "latest_monitor_failure_msg", - "type": "any", - "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", + } + } + }, + "apps.CreateCustomTemplateRequest": { + "name": "CreateCustomTemplateRequest", + "package": "apps", + "description": "", + "fields": { + "template": { + "name": "template", + "type": "CustomTemplate", + "description": "", "required": false - }, - "notifications": { - "name": "notifications", - "type": "any", - "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", + } + } + }, + "apps.CustomTemplate": { + "name": "CustomTemplate", + "package": "apps", + "description": "", + "fields": { + "creator": { + "name": "creator", + "type": "string", + "description": "", "required": false }, - "output_schema_name": { - "name": "output_schema_name", + "description": { + "name": "description", "type": "string", - "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format {catalog}.{schema}", + "description": "The description of the template.", "required": false }, - "schedule": { - "name": "schedule", - "type": "any", - "description": "[Create:OPT Update:OPT] The monitor schedule.", + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "The Git provider of the template.", "required": false }, - "skip_builtin_dashboard": { - "name": "skip_builtin_dashboard", - "type": "any", - "description": "Whether to skip creating a default dashboard summarizing data quality metrics.", + "git_repo": { + "name": "git_repo", + "type": "string", + "description": "The Git repository URL that the template resides in.", "required": false }, - "slicing_exprs": { - "name": "slicing_exprs", - "type": "any", - "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The data is grouped by\neach expression independently, resulting in a separate slice for each predicate and its\ncomplements. For example `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the following\nslices: two slices for `col_2 \u003e 10` (True and False), and one slice per unique value in\n`col1`. For high-cardinality columns, only the top 100 unique values by frequency will\ngenerate slices.", + "manifest": { + "name": "manifest", + "type": "AppManifest", + "description": "The manifest of the template. It defines fields and default values when\ninstalling the template.", "required": false }, - "snapshot": { - "name": "snapshot", - "type": "any", - "description": "Configuration for monitoring snapshot tables.", - "required": false - }, - "time_series": { - "name": "time_series", - "type": "any", - "description": "Configuration for monitoring time series tables.", + "name": { + "name": "name", + "type": "string", + "description": "The name of the template. It must contain only alphanumeric characters,\nhyphens, underscores, and whitespaces. It must be unique within the\nworkspace.", "required": false }, - "warehouse_id": { - "name": "warehouse_id", + "path": { + "name": "path", "type": "string", - "description": "Optional argument to specify the warehouse for dashboard creation. If not specified, the first running\nwarehouse will be used.", + "description": "The path to the template within the Git repository.", "required": false } } }, - "bundle.RegisteredModel": { - "name": "RegisteredModel", - "package": "resources", - "description": "registered model configuration.", + "apps.EnvVar": { + "name": "EnvVar", + "package": "apps", + "description": "", "fields": { - "aliases": { - "name": "aliases", - "type": "any", - "description": "List of aliases associated with the registered model", + "name": { + "name": "name", + "type": "string", + "description": "The name of the environment variable.", "required": false }, - "browse_only": { - "name": "browse_only", - "type": "any", - "description": "Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.", + "value": { + "name": "value", + "type": "string", + "description": "The value for the environment variable.", "required": false }, - "catalog_name": { - "name": "catalog_name", + "value_from": { + "name": "value_from", "type": "string", - "description": "The name of the catalog where the schema and the registered model reside", + "description": "The name of an external Databricks resource that contains the value, such\nas a secret or a database table.", "required": false - }, - "comment": { - "name": "comment", - "type": "any", - "description": "The comment attached to the registered model", + } + } + }, + "apps.GetAppPermissionLevelsResponse": { + "name": "GetAppPermissionLevelsResponse", + "package": "apps", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]AppPermissionsDescription", + "description": "Specific permission levels", "required": false - }, - "created_at": { - "name": "created_at", - "type": "string (timestamp)", - "description": "Creation timestamp of the registered model in milliseconds since the Unix epoch", + } + } + }, + "apps.GitRepository": { + "name": "GitRepository", + "package": "apps", + "description": "Git repository configuration specifying the location of the repository.", + "fields": { + "provider": { + "name": "provider", + "type": "string", + "description": "Git provider. Case insensitive. Supported values: gitHub,\ngitHubEnterprise, bitbucketCloud, bitbucketServer, azureDevOpsServices,\ngitLab, gitLabEnterpriseEdition, awsCodeCommit.", "required": false }, - "created_by": { - "name": "created_by", - "type": "any", - "description": "The identifier of the user who created the registered model", + "url": { + "name": "url", + "type": "string", + "description": "URL of the Git repository.", "required": false - }, - "full_name": { - "name": "full_name", + } + } + }, + "apps.GitSource": { + "name": "GitSource", + "package": "apps", + "description": "Complete git source specification including repository location and reference.", + "fields": { + "branch": { + "name": "branch", "type": "string", - "description": "The three-level (fully qualified) name of the registered model", + "description": "Git branch to checkout.", "required": false }, - "metastore_id": { - "name": "metastore_id", + "commit": { + "name": "commit", "type": "string", - "description": "The unique identifier of the metastore", + "description": "Git commit SHA to checkout.", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the registered model", + "git_repository": { + "name": "git_repository", + "type": "*GitRepository", + "description": "Git repository configuration. Populated from the app's git_repository\nconfiguration.", "required": false }, - "owner": { - "name": "owner", - "type": "any", - "description": "The identifier of the user who owns the registered model", + "resolved_commit": { + "name": "resolved_commit", + "type": "string", + "description": "The resolved commit SHA that was actually used for the deployment. This\nis populated by the system after resolving the reference (branch, tag, or\ncommit). If commit is specified directly, this will match commit. If a\nbranch or tag is specified, this contains the commit SHA that the branch\nor tag pointed to at deployment time.", "required": false }, - "schema_name": { - "name": "schema_name", + "source_code_path": { + "name": "source_code_path", "type": "string", - "description": "The name of the schema where the registered model resides", + "description": "Relative path to the app source code within the Git repository. If not\nspecified, the root of the repository is used.", "required": false }, - "storage_location": { - "name": "storage_location", - "type": "any", - "description": "The storage location on the cloud under which model version data files are stored", + "tag": { + "name": "tag", + "type": "string", + "description": "Git tag to checkout.", "required": false - }, - "updated_at": { - "name": "updated_at", - "type": "string (timestamp)", - "description": "Last-update timestamp of the registered model in milliseconds since the Unix epoch", + } + } + }, + "apps.ListAppDeploymentsResponse": { + "name": "ListAppDeploymentsResponse", + "package": "apps", + "description": "", + "fields": { + "app_deployments": { + "name": "app_deployments", + "type": "[]AppDeployment", + "description": "Deployment history of the app.", "required": false }, - "updated_by": { - "name": "updated_by", - "type": "any", - "description": "The identifier of the user who updated the registered model last time", + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of apps.", "required": false } } }, - "bundle.Schema": { - "name": "Schema", - "package": "resources", - "description": "schema configuration.", + "apps.ListAppsResponse": { + "name": "ListAppsResponse", + "package": "apps", + "description": "", "fields": { - "catalog_name": { - "name": "catalog_name", - "type": "string", - "description": "Name of parent catalog.", + "apps": { + "name": "apps", + "type": "[]App", + "description": "", "required": false }, - "comment": { - "name": "comment", - "type": "any", - "description": "User-provided free-form text description.", + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of apps.", "required": false - }, - "name": { - "name": "name", - "type": "any", - "description": "Name of schema, relative to parent catalog.", + } + } + }, + "apps.ListCustomTemplatesResponse": { + "name": "ListCustomTemplatesResponse", + "package": "apps", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of custom templates.", "required": false }, - "properties": { - "name": "properties", - "type": "any", - "description": "A map of key-value properties attached to the securable.", + "templates": { + "name": "templates", + "type": "[]CustomTemplate", + "description": "", "required": false - }, - "storage_root": { - "name": "storage_root", - "type": "any", - "description": "Storage root URL for managed tables within schema.", + } + } + }, + "apps.UpdateAppRequest": { + "name": "UpdateAppRequest", + "package": "apps", + "description": "", + "fields": { + "app": { + "name": "app", + "type": "App", + "description": "", "required": false } } }, - "bundle.SqlWarehouse": { - "name": "SqlWarehouse", - "package": "resources", - "description": "Creates a new SQL warehouse.", + "apps.UpdateCustomTemplateRequest": { + "name": "UpdateCustomTemplateRequest", + "package": "apps", + "description": "", "fields": { - "auto_stop_mins": { - "name": "auto_stop_mins", - "type": "any", - "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", + "template": { + "name": "template", + "type": "CustomTemplate", + "description": "", + "required": false + } + } + }, + "apps.WaitGetAppActive": { + "name": "WaitGetAppActive", + "package": "apps", + "description": "WaitGetAppActive is a wrapper that calls [AppsAPI.WaitGetAppActive] and waits to reach ACTIVE state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*App)) (*App, error)", + "description": "", "required": false }, - "channel": { - "name": "channel", - "type": "any", - "description": "Channel Details", + "Response": { + "name": "Response", + "type": "*R", + "description": "", "required": false }, - "cluster_size": { - "name": "cluster_size", - "type": "int", - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "callback": { + "name": "callback", + "type": "func(*App)", + "description": "", "required": false }, - "creator_name": { - "name": "creator_name", + "name": { + "name": "name", "type": "string", - "description": "warehouse creator name", + "description": "", "required": false }, - "enable_photon": { - "name": "enable_photon", - "type": "bool", - "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", "required": false - }, - "enable_serverless_compute": { - "name": "enable_serverless_compute", - "type": "bool", - "description": "Configures whether the warehouse should use serverless compute", + } + } + }, + "apps.WaitGetAppStopped": { + "name": "WaitGetAppStopped", + "package": "apps", + "description": "WaitGetAppStopped is a wrapper that calls [AppsAPI.WaitGetAppStopped] and waits to reach STOPPED state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*App)) (*App, error)", + "description": "", "required": false }, - "instance_profile_arn": { - "name": "instance_profile_arn", - "type": "any", - "description": "Deprecated. Instance profile used to pass IAM role to the cluster", - "required": false, - "deprecated": true - }, - "max_num_clusters": { - "name": "max_num_clusters", - "type": "any", - "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "Response": { + "name": "Response", + "type": "*R", + "description": "", "required": false }, - "min_num_clusters": { - "name": "min_num_clusters", - "type": "any", - "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "callback": { + "name": "callback", + "type": "func(*App)", + "description": "", "required": false }, "name": { "name": "name", - "type": "any", - "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", + "type": "string", + "description": "", "required": false }, - "spot_instance_policy": { - "name": "spot_instance_policy", - "type": "any", - "description": "Configurations whether the endpoint should use spot instances.", - "required": false - }, - "tags": { - "name": "tags", - "type": "map[string]string", - "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", - "required": false - }, - "warehouse_type": { - "name": "warehouse_type", - "type": "any", - "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute,\nyou must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", "required": false } } }, - "bundle.SyncedDatabaseTable": { - "name": "SyncedDatabaseTable", - "package": "resources", - "description": "Next field marker: 18", + "apps.WaitGetDeploymentAppSucceeded": { + "name": "WaitGetDeploymentAppSucceeded", + "package": "apps", + "description": "WaitGetDeploymentAppSucceeded is a wrapper that calls [AppsAPI.WaitGetDeploymentAppSucceeded] and waits to reach SUCCEEDED state.", "fields": { - "data_synchronization_status": { - "name": "data_synchronization_status", - "type": "any", - "description": "Synced Table data synchronization status", - "required": false, - "output_only": true - }, - "database_instance_name": { - "name": "database_instance_name", - "type": "string", - "description": "Name of the target database instance. This is required when creating synced database tables in standard catalogs.\nThis is optional when creating synced database tables in registered catalogs. If this field is specified\nwhen creating synced database tables in registered catalogs, the database instance name MUST\nmatch that of the registered catalog (or the request will be rejected).", + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*AppDeployment)) (*AppDeployment, error)", + "description": "", "required": false }, - "effective_database_instance_name": { - "name": "effective_database_instance_name", - "type": "string", - "description": "The name of the database instance that this table is registered to. This field is always returned, and for\ntables inside database catalogs is inferred database instance associated with the catalog.", - "required": false, - "output_only": true - }, - "effective_logical_database_name": { - "name": "effective_logical_database_name", - "type": "string", - "description": "The name of the logical database that this table is registered to.", - "required": false, - "output_only": true + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false }, - "logical_database_name": { - "name": "logical_database_name", + "app_name": { + "name": "app_name", "type": "string", - "description": "Target Postgres database object (logical database) name for this table.\n\nWhen creating a synced table in a registered Postgres catalog, the\ntarget Postgres database name is inferred to be that of the registered catalog.\nIf this field is specified in this scenario, the Postgres database name MUST\nmatch that of the registered catalog (or the request will be rejected).\n\nWhen creating a synced table in a standard catalog, this field is required.\nIn this scenario, specifying this field will allow targeting an arbitrary postgres database.\nNote that this has implications for the `create_database_objects_is_missing` field in `spec`.", + "description": "", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "Full three-part (catalog, schema, table) name of the table.", + "callback": { + "name": "callback", + "type": "func(*AppDeployment)", + "description": "", "required": false }, - "spec": { - "name": "spec", - "type": "any", - "description": "Specification of a synced database table.", + "deployment_id": { + "name": "deployment_id", + "type": "string", + "description": "", "required": false }, - "unity_catalog_provisioning_state": { - "name": "unity_catalog_provisioning_state", - "type": "any", - "description": "The provisioning state of the synced table entity in Unity Catalog. This is distinct from the\nstate of the data synchronization pipeline (i.e. the table may be in \"ACTIVE\" but the pipeline\nmay be in \"PROVISIONING\" as it runs asynchronously).", - "required": false, - "output_only": true + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false } } }, - "bundle.Volume": { - "name": "Volume", - "package": "resources", - "description": "volume configuration.", + "apps.WaitGetUpdateAppSucceeded": { + "name": "WaitGetUpdateAppSucceeded", + "package": "apps", + "description": "WaitGetUpdateAppSucceeded is a wrapper that calls [AppsAPI.WaitGetUpdateAppSucceeded] and waits to reach SUCCEEDED state.", "fields": { - "catalog_name": { - "name": "catalog_name", - "type": "string", - "description": "The name of the catalog where the schema and the volume are", - "required": false - }, - "comment": { - "name": "comment", - "type": "any", - "description": "The comment attached to the volume", + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*AppUpdate)) (*AppUpdate, error)", + "description": "", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the volume", + "Response": { + "name": "Response", + "type": "*R", + "description": "", "required": false }, - "schema_name": { - "name": "schema_name", + "app_name": { + "name": "app_name", "type": "string", - "description": "The name of the schema where the volume is", + "description": "", "required": false }, - "storage_location": { - "name": "storage_location", - "type": "any", - "description": "The storage location on the cloud", + "callback": { + "name": "callback", + "type": "func(*AppUpdate)", + "description": "", "required": false }, - "volume_type": { - "name": "volume_type", - "type": "any", - "description": "The type of the volume. An external volume is located in the specified external location.\nA managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore.\n[Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", "required": false } } }, - "catalog.MonitorCronSchedule": { - "name": "MonitorCronSchedule", - "package": "catalog", - "description": "monitor cron schedule configuration.", + "apps.appsImpl": { + "name": "appsImpl", + "package": "apps", + "description": "unexported type that holds implementations of just Apps API methods", "fields": { - "pause_status": { - "name": "pause_status", - "type": "any", - "description": "Read only field that indicates whether a schedule is paused or not.", - "required": false - }, - "quartz_cron_expression": { - "name": "quartz_cron_expression", - "type": "any", - "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).", - "required": false - }, - "timezone_id": { - "name": "timezone_id", - "type": "string", - "description": "The timezone id (e.g., ``PST``) in which to evaluate the quartz expression.", + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", "required": false } } }, - "catalog.MonitorCronSchedulePauseStatus": { - "name": "MonitorCronSchedulePauseStatus", - "package": "catalog", - "description": "Source link: https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto\nMonitoring workflow schedule pause status.", - "fields": {} - }, - "catalog.MonitorDataClassificationConfig": { - "name": "MonitorDataClassificationConfig", - "package": "catalog", - "description": "Data classification related configuration.", + "apps.appsSettingsImpl": { + "name": "appsSettingsImpl", + "package": "apps", + "description": "unexported type that holds implementations of just AppsSettings API methods", "fields": { - "enabled": { - "name": "enabled", - "type": "bool", - "description": "Whether to enable data classification.", + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", "required": false } } }, - "catalog.MonitorDestination": { - "name": "MonitorDestination", - "package": "catalog", - "description": "monitor destination configuration.", + "bundle.Alert": { + "name": "Alert", + "package": "resources", + "description": "", "fields": { - "email_addresses": { - "name": "email_addresses", + "create_time": { + "name": "create_time", "type": "any", - "description": "The list of email addresses to send the notification to. A maximum of 5 email addresses is supported.", + "description": "The timestamp indicating when the alert was created.", "required": false - } - } - }, - "catalog.MonitorInferenceLog": { - "name": "MonitorInferenceLog", - "package": "catalog", - "description": "monitor inference log configuration.", - "fields": { - "granularities": { - "name": "granularities", + }, + "custom_description": { + "name": "custom_description", "type": "any", - "description": "List of granularities to use when aggregating data into time windows based on their timestamp.", + "description": "Custom description for the alert. support mustache template.", "required": false }, - "label_col": { - "name": "label_col", + "custom_summary": { + "name": "custom_summary", "type": "any", - "description": "Column for the label.", + "description": "Custom summary for the alert. support mustache template.", "required": false }, - "model_id_col": { - "name": "model_id_col", + "display_name": { + "name": "display_name", "type": "any", - "description": "Column for the model identifier.", + "description": "The display name of the alert.", "required": false }, - "prediction_col": { - "name": "prediction_col", + "effective_run_as": { + "name": "effective_run_as", "type": "any", - "description": "Column for the prediction.", + "description": "The actual identity that will be used to execute the alert.\nThis is an output-only field that shows the resolved run-as identity after applying\npermissions and defaults.", "required": false }, - "prediction_proba_col": { - "name": "prediction_proba_col", + "evaluation": { + "name": "evaluation", "type": "any", - "description": "Column for prediction probabilities", + "description": "", "required": false }, - "problem_type": { - "name": "problem_type", + "id": { + "name": "id", "type": "any", - "description": "Problem type the model aims to solve.", + "description": "UUID identifying the alert.", "required": false }, - "timestamp_col": { - "name": "timestamp_col", + "lifecycle_state": { + "name": "lifecycle_state", "type": "any", - "description": "Column for the timestamp.", + "description": "Indicates whether the query is trashed.", "required": false - } - } - }, - "catalog.MonitorInferenceLogProblemType": { - "name": "MonitorInferenceLogProblemType", - "package": "catalog", - "description": "monitor inference log problem type configuration.", - "fields": {} - }, - "catalog.MonitorMetric": { - "name": "MonitorMetric", - "package": "catalog", - "description": "Custom metric definition.", - "fields": { - "definition": { - "name": "definition", + }, + "owner_user_name": { + "name": "owner_user_name", "type": "any", - "description": "Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition).", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has been deleted.", "required": false }, - "input_columns": { - "name": "input_columns", + "parent_path": { + "name": "parent_path", "type": "any", - "description": "A list of column names in the input table the metric should be computed for.\nCan use ``\":table\"`` to indicate that the metric needs information from multiple columns.", + "description": "The workspace path of the folder containing the alert. Can only be set on create, and cannot be updated.", "required": false }, - "name": { - "name": "name", + "query_text": { + "name": "query_text", "type": "any", - "description": "Name of the metric in the output tables.", + "description": "Text of the query to be run.", "required": false }, - "output_data_type": { - "name": "output_data_type", + "run_as": { + "name": "run_as", "type": "any", - "description": "The output type of the custom metric.", + "description": "Specifies the identity that will be used to run the alert.\nThis field allows you to configure alerts to run as a specific user or service principal.\n- For user identity: Set `user_name` to the email of an active workspace user. Users can only set this to their own email.\n- For service principal: Set `service_principal_name` to the application ID. Requires the `servicePrincipal/user` role.\nIf not specified, the alert will run as the request user.", "required": false }, - "type": { - "name": "type", + "run_as_user_name": { + "name": "run_as_user_name", "type": "any", - "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", + "description": "The run as username or application ID of service principal.\nOn Create and Update, this field can be set to application ID of an active service principal. Setting this field requires the servicePrincipal/user role.\nDeprecated: Use `run_as` field instead. This field will be removed in a future release.", "required": false - } - } - }, - "catalog.MonitorMetricType": { - "name": "MonitorMetricType", - "package": "catalog", - "description": "Can only be one of ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"``, ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"``, or ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"``.\nThe ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"`` and ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"`` metrics\nare computed on a single table, whereas the ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", - "fields": {} - }, - "catalog.MonitorNotifications": { - "name": "MonitorNotifications", - "package": "catalog", - "description": "monitor notifications configuration.", - "fields": { - "on_failure": { - "name": "on_failure", + }, + "schedule": { + "name": "schedule", "type": "any", - "description": "Destinations to send notifications on failure/timeout.", + "description": "", "required": false }, - "on_new_classification_tag_detected": { - "name": "on_new_classification_tag_detected", + "update_time": { + "name": "update_time", "type": "any", - "description": "Destinations to send notifications on new classification tag detected.", + "description": "The timestamp indicating when the alert was updated.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "any", + "description": "ID of the SQL warehouse attached to the alert.", "required": false } } }, - "catalog.MonitorSnapshot": { - "name": "MonitorSnapshot", - "package": "catalog", - "description": "Snapshot analysis configuration", - "fields": {} - }, - "catalog.MonitorTimeSeries": { - "name": "MonitorTimeSeries", - "package": "catalog", - "description": "Time series analysis configuration.", + "bundle.App": { + "name": "App", + "package": "resources", + "description": "", "fields": { - "granularities": { - "name": "granularities", + "active_deployment": { + "name": "active_deployment", "type": "any", - "description": "Granularities for aggregating data into time windows based on their timestamp. Currently the following static\ngranularities are supported:\n{``\\\"5 minutes\\\"``, ``\\\"30 minutes\\\"``, ``\\\"1 hour\\\"``, ``\\\"1 day\\\"``, ``\\\"\\u003cn\\u003e week(s)\\\"``, ``\\\"1 month\\\"``, ``\\\"1 year\\\"``}.", + "description": "The active deployment of the app. A deployment is considered active when it has been deployed\nto the app compute.", "required": false }, - "timestamp_col": { - "name": "timestamp_col", + "app_status": { + "name": "app_status", "type": "any", - "description": "Column for the timestamp.", + "description": "", "required": false - } - } - }, - "catalog.RegisteredModelAlias": { - "name": "RegisteredModelAlias", - "package": "catalog", - "description": "registered model alias configuration.", - "fields": { - "alias_name": { - "name": "alias_name", - "type": "string", - "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "any", + "description": "", "required": false }, - "catalog_name": { - "name": "catalog_name", - "type": "string", - "description": "The name of the catalog containing the model version", + "compute_size": { + "name": "compute_size", + "type": "any", + "description": "", "required": false }, - "id": { - "name": "id", + "compute_status": { + "name": "compute_status", "type": "any", - "description": "The unique identifier of the alias", + "description": "", "required": false }, - "model_name": { - "name": "model_name", - "type": "string", - "description": "The name of the parent registered model of the model version, relative to parent schema", + "create_time": { + "name": "create_time", + "type": "any", + "description": "The creation time of the app. Formatted timestamp in ISO 6801.", "required": false }, - "schema_name": { - "name": "schema_name", - "type": "string", - "description": "The name of the schema containing the model version, relative to parent catalog", + "creator": { + "name": "creator", + "type": "any", + "description": "The email of the user that created the app.", "required": false }, - "version_num": { - "name": "version_num", + "default_source_code_path": { + "name": "default_source_code_path", "type": "any", - "description": "Integer version number of the model version to which this alias points.", + "description": "The default workspace file system path of the source code from which app deployment are\ncreated. This field tracks the workspace source code path of the last active deployment.", "required": false - } - } - }, - "catalog.VolumeType": { - "name": "VolumeType", - "package": "catalog", - "description": "volume type configuration.", - "fields": {} - }, - "compute.Adlsgen2Info": { - "name": "Adlsgen2Info", - "package": "compute", - "description": "A storage location in Adls Gen2", - "fields": { - "destination": { - "name": "destination", + }, + "description": { + "name": "description", "type": "any", - "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`.", + "description": "The description of the app.", "required": false - } - } - }, - "compute.AutoScale": { - "name": "AutoScale", - "package": "compute", - "description": "auto scale configuration.", - "fields": { - "max_workers": { - "name": "max_workers", + }, + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", "type": "any", - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`.", + "description": "", "required": false }, - "min_workers": { - "name": "min_workers", + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", "type": "any", - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation.", + "description": "", "required": false - } - } - }, - "compute.AwsAttributes": { - "name": "AwsAttributes", - "package": "compute", - "description": "Attributes set during cluster creation which are related to Amazon Web Services.", - "fields": { - "availability": { - "name": "availability", + }, + "effective_user_api_scopes": { + "name": "effective_user_api_scopes", "type": "any", - "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "description": "The effective api scopes granted to the user access token.", "required": false }, - "ebs_volume_count": { - "name": "ebs_volume_count", - "type": "int", - "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden.", + "git_repository": { + "name": "git_repository", + "type": "any", + "description": "Git repository configuration for app deployments. When specified, deployments can\nreference code from this repository by providing only the git reference (branch, tag, or commit).", "required": false }, - "ebs_volume_iops": { - "name": "ebs_volume_iops", + "id": { + "name": "id", "type": "any", - "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used.", + "description": "The unique identifier of the app.", "required": false }, - "ebs_volume_size": { - "name": "ebs_volume_size", - "type": "int", - "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096.", + "name": { + "name": "name", + "type": "any", + "description": "The name of the app. The name must contain only lowercase alphanumeric characters and hyphens.\nIt must be unique within the workspace.", "required": false }, - "ebs_volume_throughput": { - "name": "ebs_volume_throughput", + "oauth2_app_client_id": { + "name": "oauth2_app_client_id", "type": "any", - "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used.", + "description": "", "required": false }, - "ebs_volume_type": { - "name": "ebs_volume_type", + "oauth2_app_integration_id": { + "name": "oauth2_app_integration_id", "type": "any", - "description": "The type of EBS volumes that will be launched with this cluster.", + "description": "", "required": false }, - "first_on_demand": { - "name": "first_on_demand", + "pending_deployment": { + "name": "pending_deployment", "type": "any", - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "description": "The pending deployment of the app. A deployment is considered pending when it is being prepared\nfor deployment to the app compute.", "required": false }, - "instance_profile_arn": { - "name": "instance_profile_arn", + "resources": { + "name": "resources", "type": "any", - "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.", + "description": "Resources for the app.", "required": false }, - "spot_bid_price_percent": { - "name": "spot_bid_price_percent", + "service_principal_client_id": { + "name": "service_principal_client_id", "type": "any", - "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.", + "description": "", "required": false }, - "zone_id": { - "name": "zone_id", - "type": "string", - "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, the zone \"auto\" will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\n\nThe list of available zones as well as the default value can be found by using the\n`List Zones` method.", + "service_principal_id": { + "name": "service_principal_id", + "type": "any", + "description": "", "required": false - } - } - }, - "compute.AwsAvailability": { - "name": "AwsAvailability", - "package": "compute", - "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", - "fields": {} - }, - "compute.AzureAttributes": { - "name": "AzureAttributes", - "package": "compute", - "description": "Attributes set during cluster creation which are related to Microsoft Azure.", - "fields": { - "availability": { - "name": "availability", + }, + "service_principal_name": { + "name": "service_principal_name", "type": "any", - "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\nNote: If `first_on_demand` is zero, this availability\ntype will be used for the entire cluster.", + "description": "", "required": false }, - "first_on_demand": { - "name": "first_on_demand", + "update_time": { + "name": "update_time", "type": "any", - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "description": "The update time of the app. Formatted timestamp in ISO 6801.", "required": false }, - "log_analytics_info": { - "name": "log_analytics_info", + "updater": { + "name": "updater", "type": "any", - "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "description": "The email of the user that last updated the app.", "required": false }, - "spot_bid_max_price": { - "name": "spot_bid_max_price", + "url": { + "name": "url", "type": "any", - "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1.", + "description": "The URL of the app once it is deployed.", "required": false - } - } - }, - "compute.AzureAvailability": { - "name": "AzureAvailability", - "package": "compute", - "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", - "fields": {} - }, - "compute.ClientsTypes": { - "name": "ClientsTypes", - "package": "compute", - "description": "clients types configuration.", - "fields": { - "jobs": { - "name": "jobs", + }, + "usage_policy_id": { + "name": "usage_policy_id", "type": "any", - "description": "With jobs set, the cluster can be used for jobs", + "description": "", "required": false }, - "notebooks": { - "name": "notebooks", + "user_api_scopes": { + "name": "user_api_scopes", "type": "any", - "description": "With notebooks set, this cluster can be used for notebooks", + "description": "", "required": false } } }, - "compute.ClusterLogConf": { - "name": "ClusterLogConf", - "package": "compute", - "description": "Cluster log delivery config", - "fields": { - "dbfs": { - "name": "dbfs", - "type": "any", - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "required": false - }, - "s3": { - "name": "s3", - "type": "any", - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", - "required": false - }, - "volumes": { - "name": "volumes", - "type": "any", - "description": "destination needs to be provided, e.g.\n`{ \"volumes\": { \"destination\": \"/Volumes/catalog/schema/volume/cluster_log\" } }`", - "required": false - } - } - }, - "compute.ClusterSpec": { - "name": "ClusterSpec", - "package": "compute", - "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", + "bundle.Cluster": { + "name": "Cluster", + "package": "resources", + "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", "fields": { "apply_policy_default_values": { "name": "apply_policy_default_values", @@ -3021,7 +3788,7 @@ }, "autotermination_minutes": { "name": "autotermination_minutes", - "type": "int", + "type": "any", "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination.", "required": false }, @@ -3045,13 +3812,13 @@ }, "cluster_name": { "name": "cluster_name", - "type": "string", + "type": "any", "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\nFor job clusters, the cluster name is automatically set based on the job and job run IDs.", "required": false }, "custom_tags": { "name": "custom_tags", - "type": "map[string]string", + "type": "any", "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "required": false }, @@ -3069,25 +3836,25 @@ }, "driver_instance_pool_id": { "name": "driver_instance_pool_id", - "type": "string", + "type": "any", "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", "required": false }, "driver_node_type_id": { "name": "driver_node_type_id", - "type": "string", + "type": "any", "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if virtual_cluster_size is set.\nIf both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence.", "required": false }, "enable_elastic_disk": { "name": "enable_elastic_disk", - "type": "bool", + "type": "any", "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space.", "required": false }, "enable_local_disk_encryption": { "name": "enable_local_disk_encryption", - "type": "bool", + "type": "any", "description": "Whether to enable LUKS on cluster VMs' local disks", "required": false }, @@ -3105,13 +3872,13 @@ }, "instance_pool_id": { "name": "instance_pool_id", - "type": "string", + "type": "any", "description": "The optional ID of the instance pool to which the cluster belongs.", "required": false }, "is_single_node": { "name": "is_single_node", - "type": "bool", + "type": "any", "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`", "required": false }, @@ -3123,7 +3890,7 @@ }, "node_type_id": { "name": "node_type_id", - "type": "string", + "type": "any", "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", "required": false }, @@ -3135,7 +3902,7 @@ }, "policy_id": { "name": "policy_id", - "type": "string", + "type": "any", "description": "The ID of the cluster policy used to create the cluster if applicable.", "required": false }, @@ -3153,7 +3920,7 @@ }, "single_user_name": { "name": "single_user_name", - "type": "string", + "type": "any", "description": "Single user name if data_security_mode is `SINGLE_USER`", "required": false }, @@ -3183,7 +3950,7 @@ }, "total_initial_remote_disk_size": { "name": "total_initial_remote_disk_size", - "type": "int", + "type": "any", "description": "If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED disks.", "required": false }, @@ -3201,4962 +3968,43312 @@ } } }, - "compute.DataSecurityMode": { - "name": "DataSecurityMode", - "package": "compute", - "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", - "fields": {} - }, - "compute.DbfsStorageInfo": { - "name": "DbfsStorageInfo", - "package": "compute", - "description": "A storage location in DBFS", + "bundle.DatabaseCatalog": { + "name": "DatabaseCatalog", + "package": "resources", + "description": "", "fields": { - "destination": { - "name": "destination", + "create_database_if_not_exists": { + "name": "create_database_if_not_exists", "type": "any", - "description": "dbfs destination, e.g. `dbfs:/my/path`", + "description": "", "required": false - } - } - }, - "compute.DockerBasicAuth": { - "name": "DockerBasicAuth", - "package": "compute", - "description": "docker basic auth configuration.", - "fields": { - "password": { - "name": "password", + }, + "database_instance_name": { + "name": "database_instance_name", "type": "any", - "description": "Password of the user", + "description": "The name of the DatabaseInstance housing the database.", "required": false }, - "username": { - "name": "username", + "database_name": { + "name": "database_name", "type": "any", - "description": "Name of the user", + "description": "The name of the database (in a instance) associated with the catalog.", "required": false - } - } - }, - "compute.DockerImage": { - "name": "DockerImage", - "package": "compute", - "description": "docker image configuration.", - "fields": { - "basic_auth": { - "name": "basic_auth", + }, + "name": { + "name": "name", "type": "any", - "description": "Basic auth with username and password", + "description": "The name of the catalog in UC.", "required": false }, - "url": { - "name": "url", + "uid": { + "name": "uid", "type": "any", - "description": "URL of the docker image.", + "description": "", "required": false } } }, - "compute.EbsVolumeType": { - "name": "EbsVolumeType", - "package": "compute", - "description": "All EBS volume types that Databricks supports.\nSee https://aws.amazon.com/ebs/details/ for details.", - "fields": {} - }, - "compute.Environment": { - "name": "Environment", - "package": "compute", - "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "bundle.DatabaseInstance": { + "name": "DatabaseInstance", + "package": "resources", + "description": "A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage.", "fields": { - "client": { - "name": "client", + "capacity": { + "name": "capacity", "type": "any", - "description": "Use `environment_version` instead.", - "required": false, - "deprecated": true + "description": "The sku of the instance. Valid values are \"CU_1\", \"CU_2\", \"CU_4\", \"CU_8\".", + "required": false }, - "dependencies": { - "name": "dependencies", + "child_instance_refs": { + "name": "child_instance_refs", "type": "any", - "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a valid pip requirements file line per https://pip.pypa.io/en/stable/reference/requirements-file-format/.\nAllowed dependencies include a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in Databricks), or a VCS project URL.", + "description": "The refs of the child instances. This is only available if the instance is\nparent instance.", "required": false }, - "environment_version": { - "name": "environment_version", + "creation_time": { + "name": "creation_time", "type": "any", - "description": "Required. Environment version used by the environment.\nEach version comes with a specific Python version and a set of Python packages.\nThe version is a string, consisting of an integer.", + "description": "The timestamp when the instance was created.", "required": false }, - "java_dependencies": { - "name": "java_dependencies", + "creator": { + "name": "creator", "type": "any", - "description": "List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`.", + "description": "The email of the creator of the instance.", "required": false - } - } - }, - "compute.GcpAttributes": { - "name": "GcpAttributes", - "package": "compute", - "description": "Attributes set during cluster creation which are related to GCP.", - "fields": { - "availability": { - "name": "availability", + }, + "custom_tags": { + "name": "custom_tags", "type": "any", - "description": "This field determines whether the spark executors will be scheduled to run on preemptible\nVMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.", + "description": "Custom tags associated with the instance. This field is only included on create and update responses.", "required": false }, - "boot_disk_size": { - "name": "boot_disk_size", - "type": "int", - "description": "Boot disk size in GB", + "effective_capacity": { + "name": "effective_capacity", + "type": "any", + "description": "Deprecated. The sku of the instance; this field will always match the value of capacity.", "required": false }, - "first_on_demand": { - "name": "first_on_demand", + "effective_custom_tags": { + "name": "effective_custom_tags", "type": "any", - "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster.", + "description": "The recorded custom tags associated with the instance.", "required": false }, - "google_service_account": { - "name": "google_service_account", + "effective_enable_pg_native_login": { + "name": "effective_enable_pg_native_login", "type": "any", - "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator.", + "description": "Whether the instance has PG native password login enabled.", "required": false }, - "local_ssd_count": { - "name": "local_ssd_count", - "type": "int", - "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached.\nEach local SSD is 375GB in size.\nRefer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds)\nfor the supported number of local SSDs for each instance type.", + "effective_enable_readable_secondaries": { + "name": "effective_enable_readable_secondaries", + "type": "any", + "description": "Whether secondaries serving read-only traffic are enabled. Defaults to false.", "required": false }, - "use_preemptible_executors": { - "name": "use_preemptible_executors", + "effective_node_count": { + "name": "effective_node_count", "type": "any", - "description": "This field determines whether the spark executors will be scheduled to run on preemptible\nVMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the 'availability' field instead.", - "required": false, - "deprecated": true - }, - "zone_id": { - "name": "zone_id", - "type": "string", - "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default].\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from\nhttps://cloud.google.com/compute/docs/regions-zones.", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", "required": false - } - } - }, - "compute.GcpAvailability": { - "name": "GcpAvailability", - "package": "compute", - "description": "This field determines whether the instance pool will contain preemptible\nVMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.", - "fields": {} - }, - "compute.GcsStorageInfo": { - "name": "GcsStorageInfo", - "package": "compute", - "description": "A storage location in Google Cloud Platform's GCS", - "fields": { - "destination": { - "name": "destination", + }, + "effective_retention_window_in_days": { + "name": "effective_retention_window_in_days", "type": "any", - "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`", + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained.", "required": false - } - } - }, - "compute.InitScriptInfo": { - "name": "InitScriptInfo", - "package": "compute", - "description": "Config for an individual init script\nNext ID: 11", - "fields": { - "abfss": { - "name": "abfss", + }, + "effective_stopped": { + "name": "effective_stopped", "type": "any", - "description": "destination needs to be provided, e.g.\n`abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`", + "description": "Whether the instance is stopped.", "required": false }, - "dbfs": { - "name": "dbfs", + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", "type": "any", - "description": "destination needs to be provided. e.g.\n`{ \"dbfs\": { \"destination\" : \"dbfs:/home/cluster_log\" } }`", - "required": false, - "deprecated": true + "description": "The policy that is applied to the instance.", + "required": false }, - "file": { - "name": "file", + "enable_pg_native_login": { + "name": "enable_pg_native_login", "type": "any", - "description": "destination needs to be provided, e.g.\n`{ \"file\": { \"destination\": \"file:/my/local/file.sh\" } }`", + "description": "Whether to enable PG native password login on the instance. Defaults to false.", "required": false }, - "gcs": { - "name": "gcs", + "enable_readable_secondaries": { + "name": "enable_readable_secondaries", "type": "any", - "description": "destination needs to be provided, e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", "required": false }, - "s3": { - "name": "s3", + "name": { + "name": "name", "type": "any", - "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \\\"s3\\\": { \\\"destination\\\": \\\"s3://cluster_log_bucket/prefix\\\", \\\"region\\\": \\\"us-west-2\\\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "description": "The name of the instance. This is the unique identifier for the instance.", "required": false }, - "volumes": { - "name": "volumes", + "node_count": { + "name": "node_count", "type": "any", - "description": "destination needs to be provided. e.g.\n`{ \\\"volumes\\\" : { \\\"destination\\\" : \\\"/Volumes/my-init.sh\\\" } }`", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries. This field is input only, see effective_node_count for the output.", "required": false }, - "workspace": { - "name": "workspace", + "parent_instance_ref": { + "name": "parent_instance_ref", "type": "any", - "description": "destination needs to be provided, e.g.\n`{ \"workspace\": { \"destination\": \"/cluster-init-scripts/setup-datadog.sh\" } }`", + "description": "The ref of the parent instance. This is only available if the instance is\nchild instance.\nInput: For specifying the parent instance to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", "required": false - } - } - }, - "compute.Library": { - "name": "Library", - "package": "compute", - "description": "library configuration.", - "fields": { - "cran": { - "name": "cran", + }, + "pg_version": { + "name": "pg_version", "type": "any", - "description": "Specification of a CRAN library to be installed as part of the library", + "description": "The version of Postgres running on the instance.", "required": false }, - "egg": { - "name": "egg", + "read_only_dns": { + "name": "read_only_dns", "type": "any", - "description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above.", - "required": false, - "deprecated": true + "description": "The DNS endpoint to connect to the instance for read only access. This is only available if\nenable_readable_secondaries is true.", + "required": false }, - "jar": { - "name": "jar", + "read_write_dns": { + "name": "read_write_dns", "type": "any", - "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI.", + "description": "The DNS endpoint to connect to the instance for read+write access.", "required": false }, - "maven": { - "name": "maven", + "retention_window_in_days": { + "name": "retention_window_in_days", "type": "any", - "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained. The default value is 7 days.\nValid values are 2 to 35 days.", "required": false }, - "pypi": { - "name": "pypi", + "state": { + "name": "state", "type": "any", - "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", + "description": "The current state of the instance.", "required": false }, - "requirements": { - "name": "requirements", + "stopped": { + "name": "stopped", "type": "any", - "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`", + "description": "Whether to stop the instance. An input only param, see effective_stopped for the output.", "required": false }, - "whl": { - "name": "whl", + "uid": { + "name": "uid", "type": "any", - "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI.", + "description": "An immutable UUID identifier for the instance.", "required": false - } - } - }, - "compute.LocalFileInfo": { - "name": "LocalFileInfo", - "package": "compute", - "description": "local file info configuration.", - "fields": { - "destination": { - "name": "destination", + }, + "usage_policy_id": { + "name": "usage_policy_id", "type": "any", - "description": "local file destination, e.g. `file:/my/local/file.sh`", + "description": "The desired usage policy to associate with the instance.", "required": false } } }, - "compute.LogAnalyticsInfo": { - "name": "LogAnalyticsInfo", - "package": "compute", - "description": "log analytics info configuration.", + "bundle.Job": { + "name": "Job", + "package": "resources", + "description": "", "fields": { - "log_analytics_primary_key": { - "name": "log_analytics_primary_key", + "budget_policy_id": { + "name": "budget_policy_id", "type": "any", - "description": "", + "description": "The id of the user specified budget policy to use for this job.\nIf not specified, a default budget policy may be applied when creating or modifying the job.\nSee `effective_budget_policy_id` for the budget policy used by this workload.", "required": false }, - "log_analytics_workspace_id": { - "name": "log_analytics_workspace_id", - "type": "string", - "description": "", - "required": false - } - } - }, - "compute.MavenLibrary": { - "name": "MavenLibrary", - "package": "compute", - "description": "maven library configuration.", - "fields": { - "coordinates": { - "name": "coordinates", + "continuous": { + "name": "continuous", "type": "any", - "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\".", + "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "required": false }, - "exclusions": { - "name": "exclusions", + "deployment": { + "name": "deployment", "type": "any", - "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "description": "Deployment information for jobs managed by external sources.", "required": false }, - "repo": { - "name": "repo", + "description": { + "name": "description", "type": "any", - "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched.", + "description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding.", "required": false - } - } - }, - "compute.PythonPyPiLibrary": { - "name": "PythonPyPiLibrary", - "package": "compute", - "description": "python py pi library configuration.", - "fields": { - "package": { - "name": "package", + }, + "edit_mode": { + "name": "edit_mode", "type": "any", - "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\".", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", "required": false }, - "repo": { - "name": "repo", + "email_notifications": { + "name": "email_notifications", "type": "any", - "description": "The repository where the package can be found. If not specified, the default pip index is\nused.", + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", "required": false - } - } - }, - "compute.RCranLibrary": { - "name": "RCranLibrary", - "package": "compute", - "description": "r cran library configuration.", - "fields": { - "package": { - "name": "package", + }, + "environments": { + "name": "environments", "type": "any", - "description": "The name of the CRAN package to install.", + "description": "A list of task execution environment specifications that can be referenced by serverless tasks of this job.\nFor serverless notebook tasks, if the environment_key is not specified, the notebook environment will be used if present. If a jobs environment is specified, it will override the notebook environment.\nFor other serverless tasks, the task environment is required to be specified using environment_key in the task settings.", "required": false }, - "repo": { - "name": "repo", + "format": { + "name": "format", "type": "any", - "description": "The repository where the package can be found. If not specified, the default CRAN repo is used.", + "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`.", "required": false - } - } - }, - "compute.RuntimeEngine": { - "name": "RuntimeEngine", - "package": "compute", - "description": "runtime engine configuration.", - "fields": {} - }, - "compute.S3StorageInfo": { - "name": "S3StorageInfo", - "package": "compute", - "description": "A storage location in Amazon S3", - "fields": { - "canned_acl": { - "name": "canned_acl", + }, + "git_source": { + "name": "git_source", "type": "any", - "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs.", + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", "required": false }, - "destination": { - "name": "destination", + "health": { + "name": "health", "type": "any", - "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs.", + "description": "An optional set of health rules that can be defined for this job.", "required": false }, - "enable_encryption": { - "name": "enable_encryption", - "type": "bool", - "description": "(Optional) Flag to enable server side encryption, `false` by default.", + "job_clusters": { + "name": "job_clusters", + "type": "any", + "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", "required": false }, - "encryption_type": { - "name": "encryption_type", + "max_concurrent_runs": { + "name": "max_concurrent_runs", "type": "any", - "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`.", + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped.", "required": false }, - "endpoint": { - "name": "endpoint", + "name": { + "name": "name", "type": "any", - "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used.", + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding.", "required": false }, - "kms_key": { - "name": "kms_key", + "notification_settings": { + "name": "notification_settings", "type": "any", - "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`.", + "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this job.", "required": false }, - "region": { - "name": "region", + "parameters": { + "name": "parameters", "type": "any", - "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used.", + "description": "Job-level parameter definitions", "required": false - } - } - }, - "compute.VolumesStorageInfo": { - "name": "VolumesStorageInfo", - "package": "compute", - "description": "A storage location back by UC Volumes.", - "fields": { - "destination": { - "name": "destination", + }, + "performance_target": { + "name": "performance_target", "type": "any", - "description": "UC Volumes destination, e.g. `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`\nor `dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`", + "description": "The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run.\nThe performance target does not apply to tasks that run on Serverless GPU compute.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads.\n* `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance.", "required": false - } - } - }, - "compute.WorkloadType": { - "name": "WorkloadType", - "package": "compute", - "description": "Cluster Attributes showing for clusters workload types.", - "fields": { - "clients": { - "name": "clients", + }, + "queue": { + "name": "queue", "type": "any", - "description": "defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "description": "The queue settings of the job.", "required": false - } - } - }, - "compute.WorkspaceStorageInfo": { - "name": "WorkspaceStorageInfo", - "package": "compute", - "description": "A storage location in Workspace Filesystem (WSFS)", - "fields": { - "destination": { - "name": "destination", + }, + "run_as": { + "name": "run_as", "type": "any", - "description": "wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`", + "description": "The user or service principal that the job runs as, if specified in the request.\nThis field indicates the explicit configuration of `run_as` for the job.\nTo find the value in all cases, explicit or implicit, use `run_as_user_name`.", "required": false - } - } - }, - "dashboards.LifecycleState": { - "name": "LifecycleState", - "package": "dashboards", - "description": "lifecycle state configuration.", - "fields": {} - }, - "database.CustomTag": { - "name": "CustomTag", - "package": "database", - "description": "custom tag configuration.", - "fields": { - "key": { - "name": "key", + }, + "schedule": { + "name": "schedule", "type": "any", - "description": "The key of the custom tag.", + "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "required": false }, - "value": { - "name": "value", + "tags": { + "name": "tags", "type": "any", - "description": "The value of the custom tag.", - "required": false - } - } - }, - "database.DatabaseInstanceRef": { - "name": "DatabaseInstanceRef", - "package": "database", - "description": "DatabaseInstanceRef is a reference to a database instance. It is used in the\nDatabaseInstance object to refer to the parent instance of an instance and\nto refer the child instances of an instance.\nTo specify as a parent instance during creation of an instance,\nthe lsn and branch_time fields are optional. If not specified, the child\ninstance will be created from the latest lsn of the parent.\nIf both lsn and branch_time are specified, the lsn will be used to create\nthe child instance.", - "fields": { - "branch_time": { - "name": "branch_time", - "type": "string (timestamp)", - "description": "Branch time of the ref database instance.\nFor a parent ref instance, this is the point in time on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the point in time on the instance from which the child\ninstance was created.\nInput: For specifying the point in time to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", "required": false }, - "effective_lsn": { - "name": "effective_lsn", + "tasks": { + "name": "tasks", "type": "any", - "description": "For a parent ref instance, this is the LSN on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the LSN on the instance from which the child instance\nwas created.", - "required": false, - "output_only": true + "description": "A list of task specifications to be executed by this job.\nIt supports up to 1000 elements in write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit).\nRead endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available.", + "required": false }, - "lsn": { - "name": "lsn", + "timeout_seconds": { + "name": "timeout_seconds", "type": "any", - "description": "User-specified WAL LSN of the ref database instance.\n\nInput: For specifying the WAL LSN to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout.", "required": false }, - "name": { - "name": "name", + "trigger": { + "name": "trigger", "type": "any", - "description": "Name of the ref database instance.", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "required": false }, - "uid": { - "name": "uid", - "type": "any", - "description": "Id of the ref database instance.", - "required": false, - "output_only": true - } - } - }, - "database.DatabaseInstanceState": { - "name": "DatabaseInstanceState", - "package": "database", - "description": "database instance state configuration.", - "fields": {} - }, - "database.DeltaTableSyncInfo": { - "name": "DeltaTableSyncInfo", - "package": "database", - "description": "delta table sync info configuration.", - "fields": { - "delta_commit_timestamp": { - "name": "delta_commit_timestamp", + "usage_policy_id": { + "name": "usage_policy_id", "type": "any", - "description": "The timestamp when the above Delta version was committed in the source Delta table.\nNote: This is the Delta commit time, not the time the data was written to the synced table.", - "required": false, - "output_only": true + "description": "The id of the user specified usage policy to use for this job.\nIf not specified, a default usage policy may be applied when creating or modifying the job.\nSee `effective_usage_policy_id` for the usage policy used by this workload.", + "required": false }, - "delta_commit_version": { - "name": "delta_commit_version", + "webhook_notifications": { + "name": "webhook_notifications", "type": "any", - "description": "The Delta Lake commit version that was last successfully synced.", - "required": false, - "output_only": true + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "required": false } } }, - "database.NewPipelineSpec": { - "name": "NewPipelineSpec", - "package": "database", - "description": "Custom fields that user can set for pipeline while creating SyncedDatabaseTable.\nNote that other fields of pipeline are still inferred by table def internally", + "bundle.MlflowExperiment": { + "name": "MlflowExperiment", + "package": "resources", + "description": "", "fields": { - "budget_policy_id": { - "name": "budget_policy_id", - "type": "string", - "description": "Budget policy to set on the newly created pipeline.", + "artifact_location": { + "name": "artifact_location", + "type": "any", + "description": "Location where all artifacts for the experiment are stored.\nIf not provided, the remote server will select an appropriate default.", "required": false }, - "storage_catalog": { - "name": "storage_catalog", + "name": { + "name": "name", "type": "any", - "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC catalog for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be a standard catalog where the user has permissions to create Delta tables.", + "description": "Experiment name.", "required": false }, - "storage_schema": { - "name": "storage_schema", + "tags": { + "name": "tags", "type": "any", - "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC schema for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be in the standard catalog where the user has permissions to create Delta tables.", + "description": "A collection of tags to set on the experiment. Maximum tag size and number of tags per request\ndepends on the storage backend. All storage backends are guaranteed to support tag keys up\nto 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also\nguaranteed to support up to 20 tags per request.", "required": false } } }, - "database.ProvisioningInfoState": { - "name": "ProvisioningInfoState", - "package": "database", - "description": "provisioning info state configuration.", - "fields": {} - }, - "database.ProvisioningPhase": { - "name": "ProvisioningPhase", - "package": "database", - "description": "provisioning phase configuration.", - "fields": {} - }, - "database.SyncedTableContinuousUpdateStatus": { - "name": "SyncedTableContinuousUpdateStatus", - "package": "database", - "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "bundle.MlflowModel": { + "name": "MlflowModel", + "package": "resources", + "description": "", "fields": { - "initial_pipeline_sync_progress": { - "name": "initial_pipeline_sync_progress", + "description": { + "name": "description", "type": "any", - "description": "Progress of the initial data synchronization.", - "required": false, - "output_only": true + "description": "Optional description for registered model.", + "required": false }, - "last_processed_commit_version": { - "name": "last_processed_commit_version", + "name": { + "name": "name", "type": "any", - "description": "The last source table Delta version that was successfully synced to the synced table.", - "required": false, - "output_only": true + "description": "Register models under this name", + "required": false }, - "timestamp": { - "name": "timestamp", + "tags": { + "name": "tags", "type": "any", - "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", - "required": false, - "output_only": true + "description": "Additional metadata for registered model.", + "required": false } } }, - "database.SyncedTableFailedStatus": { - "name": "SyncedTableFailedStatus", - "package": "database", - "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "bundle.ModelServingEndpoint": { + "name": "ModelServingEndpoint", + "package": "resources", + "description": "", "fields": { - "last_processed_commit_version": { - "name": "last_processed_commit_version", + "ai_gateway": { + "name": "ai_gateway", "type": "any", - "description": "The last source table Delta version that was successfully synced to the synced table.\nThe last source table Delta version that was synced to the synced table.\nOnly populated if the table is still\nsynced and available for serving.", - "required": false, - "output_only": true + "description": "The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables.", + "required": false }, - "timestamp": { - "name": "timestamp", + "budget_policy_id": { + "name": "budget_policy_id", "type": "any", - "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. Only populated if the table is still synced and available for serving.", - "required": false, - "output_only": true - } - } - }, - "database.SyncedTablePipelineProgress": { - "name": "SyncedTablePipelineProgress", - "package": "database", - "description": "Progress information of the Synced Table data synchronization pipeline.", - "fields": { - "estimated_completion_time_seconds": { - "name": "estimated_completion_time_seconds", - "type": "int", - "description": "The estimated time remaining to complete this update in seconds.", - "required": false, - "output_only": true + "description": "The budget policy to be applied to the serving endpoint.", + "required": false }, - "latest_version_currently_processing": { - "name": "latest_version_currently_processing", + "config": { + "name": "config", "type": "any", - "description": "The source table Delta version that was last processed by the pipeline. The pipeline may not\nhave completely processed this version yet.", - "required": false, - "output_only": true + "description": "The core config of the serving endpoint.", + "required": false }, - "provisioning_phase": { - "name": "provisioning_phase", + "description": { + "name": "description", "type": "any", - "description": "The current phase of the data synchronization pipeline.", - "required": false, - "output_only": true + "description": "", + "required": false }, - "sync_progress_completion": { - "name": "sync_progress_completion", + "email_notifications": { + "name": "email_notifications", "type": "any", - "description": "The completion ratio of this update. This is a number between 0 and 1.", - "required": false, - "output_only": true - }, - "synced_row_count": { - "name": "synced_row_count", - "type": "int", - "description": "The number of rows that have been synced in this update.", - "required": false, - "output_only": true + "description": "Email notification settings.", + "required": false }, - "total_row_count": { - "name": "total_row_count", - "type": "int", - "description": "The total number of rows that need to be synced in this update. This number may be an estimate.", - "required": false, - "output_only": true - } - } - }, - "database.SyncedTablePosition": { - "name": "SyncedTablePosition", - "package": "database", - "description": "synced table position configuration.", - "fields": { - "delta_table_sync_info": { - "name": "delta_table_sync_info", + "name": { + "name": "name", "type": "any", - "description": "", - "required": false, - "output_only": true + "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.", + "required": false }, - "sync_end_timestamp": { - "name": "sync_end_timestamp", + "rate_limits": { + "name": "rate_limits", "type": "any", - "description": "The end timestamp of the most recent successful synchronization.\nThis is the time when the data is available in the synced table.", - "required": false, - "output_only": true + "description": "Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits.", + "required": false }, - "sync_start_timestamp": { - "name": "sync_start_timestamp", + "route_optimized": { + "name": "route_optimized", "type": "any", - "description": "The starting timestamp of the most recent successful synchronization from the source table\nto the destination (synced) table.\nNote this is the starting timestamp of the sync operation, not the end time.\nE.g., for a batch, this is the time when the sync operation started.", - "required": false, - "output_only": true - } - } - }, - "database.SyncedTableProvisioningStatus": { - "name": "SyncedTableProvisioningStatus", - "package": "database", - "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", - "fields": { - "initial_pipeline_sync_progress": { - "name": "initial_pipeline_sync_progress", + "description": "Enable route optimization for the serving endpoint.", + "required": false + }, + "tags": { + "name": "tags", "type": "any", - "description": "Details about initial data synchronization. Only populated when in the\nPROVISIONING_INITIAL_SNAPSHOT state.", - "required": false, - "output_only": true + "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", + "required": false } } }, - "database.SyncedTableSchedulingPolicy": { - "name": "SyncedTableSchedulingPolicy", - "package": "database", - "description": "synced table scheduling policy configuration.", - "fields": {} - }, - "database.SyncedTableSpec": { - "name": "SyncedTableSpec", - "package": "database", - "description": "Specification of a synced database table.", + "bundle.Pipeline": { + "name": "Pipeline", + "package": "resources", + "description": "", "fields": { - "create_database_objects_if_missing": { - "name": "create_database_objects_if_missing", + "allow_duplicate_names": { + "name": "allow_duplicate_names", "type": "any", - "description": "If true, the synced table's logical database and schema resources in PG\nwill be created if they do not already exist.", - "required": false - }, - "existing_pipeline_id": { - "name": "existing_pipeline_id", - "type": "string", - "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf existing_pipeline_id is defined, the synced table will be bin packed into the existing pipeline\nreferenced. This avoids creating a new pipeline and allows sharing existing compute.\nIn this case, the scheduling_policy of this synced table must match the scheduling policy of the existing pipeline.", + "description": "If false, deployment will fail if name conflicts with that of another pipeline.", "required": false }, - "new_pipeline_spec": { - "name": "new_pipeline_spec", + "budget_policy_id": { + "name": "budget_policy_id", "type": "any", - "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf new_pipeline_spec is defined, a new pipeline is created for this synced table. The location pointed to is used\nto store intermediate files (checkpoints, event logs etc). The caller must have write permissions to create Delta\ntables in the specified catalog and schema. Again, note this requires write permissions, whereas the source table\nonly requires read permissions.", + "description": "Budget policy of this pipeline.", "required": false }, - "primary_key_columns": { - "name": "primary_key_columns", + "catalog": { + "name": "catalog", "type": "any", - "description": "Primary Key columns to be used for data insert/update in the destination.", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.", "required": false }, - "scheduling_policy": { - "name": "scheduling_policy", + "channel": { + "name": "channel", "type": "any", - "description": "Scheduling policy of the underlying pipeline.", + "description": "DLT Release Channel that specifies which version to use.", "required": false }, - "source_table_full_name": { - "name": "source_table_full_name", - "type": "string", - "description": "Three-part (catalog, schema, table) name of the source Delta table.", + "clusters": { + "name": "clusters", + "type": "any", + "description": "Cluster settings for this pipeline deployment.", "required": false }, - "timeseries_key": { - "name": "timeseries_key", + "configuration": { + "name": "configuration", "type": "any", - "description": "Time series key to deduplicate (tie-break) rows with the same primary key.", + "description": "String-String configuration for this pipeline execution.", "required": false - } - } - }, - "database.SyncedTableState": { - "name": "SyncedTableState", - "package": "database", - "description": "The state of a synced table.", - "fields": {} - }, - "database.SyncedTableStatus": { - "name": "SyncedTableStatus", - "package": "database", - "description": "Status of a synced table.", - "fields": { - "continuous_update_status": { - "name": "continuous_update_status", + }, + "continuous": { + "name": "continuous", "type": "any", - "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", "required": false }, - "detailed_state": { - "name": "detailed_state", + "deployment": { + "name": "deployment", "type": "any", - "description": "The state of the synced table.", - "required": false, - "output_only": true + "description": "Deployment type of this pipeline.", + "required": false }, - "failed_status": { - "name": "failed_status", + "development": { + "name": "development", "type": "any", - "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "description": "Whether the pipeline is in Development mode. Defaults to false.", "required": false }, - "last_sync": { - "name": "last_sync", + "dry_run": { + "name": "dry_run", "type": "any", - "description": "Summary of the last successful synchronization from source to destination.\n\nWill always be present if there has been a successful sync. Even if the most recent syncs have failed.\n\nLimitation:\nThe only exception is if the synced table is doing a FULL REFRESH, then the last sync information\nwill not be available until the full refresh is complete. This limitation will be addressed in a future version.\n\nThis top-level field is a convenience for consumers who want easy access to last sync information\nwithout having to traverse detailed_status.", - "required": false, - "output_only": true + "description": "", + "required": false }, - "message": { - "name": "message", + "edition": { + "name": "edition", "type": "any", - "description": "A text description of the current state of the synced table.", - "required": false, - "output_only": true - }, - "pipeline_id": { - "name": "pipeline_id", - "type": "string", - "description": "ID of the associated pipeline. The pipeline ID may have been provided by the client\n(in the case of bin packing), or generated by the server (when creating a new pipeline).", - "required": false, - "output_only": true + "description": "Pipeline product edition.", + "required": false }, - "provisioning_status": { - "name": "provisioning_status", + "environment": { + "name": "environment", "type": "any", - "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "description": "Environment specification for this pipeline used to install dependencies.", "required": false }, - "triggered_update_status": { - "name": "triggered_update_status", + "event_log": { + "name": "event_log", "type": "any", - "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", + "description": "Event log configuration for this pipeline", "required": false - } - } - }, - "database.SyncedTableTriggeredUpdateStatus": { - "name": "SyncedTableTriggeredUpdateStatus", - "package": "database", - "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", - "fields": { - "last_processed_commit_version": { - "name": "last_processed_commit_version", + }, + "filters": { + "name": "filters", "type": "any", - "description": "The last source table Delta version that was successfully synced to the synced table.", - "required": false, - "output_only": true + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false }, - "timestamp": { - "name": "timestamp", + "gateway_definition": { + "name": "gateway_definition", "type": "any", - "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", - "required": false, - "output_only": true + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false }, - "triggered_update_progress": { - "name": "triggered_update_progress", + "id": { + "name": "id", "type": "any", - "description": "Progress of the active data synchronization pipeline.", - "required": false, - "output_only": true - } - } - }, - "jobs.AuthenticationMethod": { - "name": "AuthenticationMethod", - "package": "jobs", - "description": "authentication method configuration.", - "fields": {} - }, - "jobs.CleanRoomsNotebookTask": { - "name": "CleanRoomsNotebookTask", - "package": "jobs", - "description": "Clean Rooms notebook task for V1 Clean Room service (GA).\nReplaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service.", - "fields": { - "clean_room_name": { - "name": "clean_room_name", - "type": "string", - "description": "The clean room that the notebook belongs to.", + "description": "Unique identifier for this pipeline.", "required": false }, - "etag": { - "name": "etag", + "ingestion_definition": { + "name": "ingestion_definition", "type": "any", - "description": "Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version).\nIt can be fetched by calling the :method:cleanroomassets/get API.", + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'schema', 'target', or 'catalog' settings.", "required": false }, - "notebook_base_parameters": { - "name": "notebook_base_parameters", + "libraries": { + "name": "libraries", "type": "any", - "description": "Base parameters to be used for the clean room notebook job.", + "description": "Libraries or code needed by this deployment.", "required": false }, - "notebook_name": { - "name": "notebook_name", - "type": "string", - "description": "Name of the notebook being run.", + "name": { + "name": "name", + "type": "any", + "description": "Friendly identifier for this pipeline.", "required": false - } - } - }, - "jobs.ComputeConfig": { - "name": "ComputeConfig", - "package": "jobs", - "description": "compute config configuration.", - "fields": { - "gpu_node_pool_id": { - "name": "gpu_node_pool_id", - "type": "string", - "description": "IDof the GPU pool to use.", + }, + "notifications": { + "name": "notifications", + "type": "any", + "description": "List of notification settings for this pipeline.", "required": false }, - "gpu_type": { - "name": "gpu_type", + "photon": { + "name": "photon", "type": "any", - "description": "GPU type.", + "description": "Whether Photon is enabled for this pipeline.", "required": false }, - "num_gpus": { - "name": "num_gpus", + "restart_window": { + "name": "restart_window", "type": "any", - "description": "Number of GPUs.", + "description": "Restart window of this pipeline.", "required": false - } - } - }, - "jobs.Condition": { - "name": "Condition", - "package": "jobs", - "description": "condition configuration.", - "fields": {} - }, - "jobs.ConditionTask": { - "name": "ConditionTask", - "package": "jobs", - "description": "condition task configuration.", - "fields": { - "left": { - "name": "left", + }, + "root_path": { + "name": "root_path", "type": "any", - "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference.", + "description": "Root path for this pipeline.\nThis is used as the root directory when editing the pipeline in the Databricks user interface and it is\nadded to sys.path when executing Python sources during pipeline execution.", "required": false }, - "op": { - "name": "op", + "run_as": { + "name": "run_as", "type": "any", - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.", + "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", "required": false }, - "right": { - "name": "right", + "schema": { + "name": "schema", "type": "any", - "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference.", + "description": "The default schema (database) where tables are read from or published to.", "required": false - } - } - }, - "jobs.ConditionTaskOp": { - "name": "ConditionTaskOp", - "package": "jobs", - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.", - "fields": {} - }, - "jobs.Continuous": { - "name": "Continuous", - "package": "jobs", - "description": "continuous configuration.", - "fields": { - "pause_status": { - "name": "pause_status", + }, + "serverless": { + "name": "serverless", "type": "any", - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED.", + "description": "Whether serverless compute is enabled for this pipeline.", "required": false }, - "task_retry_mode": { - "name": "task_retry_mode", + "storage": { + "name": "storage", "type": "any", - "description": "Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.", + "description": "DBFS root directory for storing checkpoints and tables.", "required": false - } - } - }, - "jobs.CronSchedule": { - "name": "CronSchedule", - "package": "jobs", - "description": "cron schedule configuration.", - "fields": { - "pause_status": { - "name": "pause_status", + }, + "tags": { + "name": "tags", "type": "any", - "description": "Indicate whether this schedule is paused or not.", + "description": "A map of tags associated with the pipeline.\nThese are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.\nA maximum of 25 tags can be added to the pipeline.", "required": false }, - "quartz_cron_expression": { - "name": "quartz_cron_expression", + "target": { + "name": "target", "type": "any", - "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required.", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.", "required": false }, - "timezone_id": { - "name": "timezone_id", - "type": "string", - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required.", + "trigger": { + "name": "trigger", + "type": "any", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "any", + "description": "Usage policy of this pipeline.", "required": false } } }, - "jobs.DashboardTask": { - "name": "DashboardTask", - "package": "jobs", - "description": "Configures the Lakeview Dashboard job task type.", + "bundle.QualityMonitor": { + "name": "QualityMonitor", + "package": "resources", + "description": "", "fields": { - "dashboard_id": { - "name": "dashboard_id", - "type": "string", - "description": "The identifier of the dashboard to refresh.", + "assets_dir": { + "name": "assets_dir", + "type": "any", + "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store data-monitoring\nassets. Normally prepopulated to a default user location via UI and Python APIs.", "required": false }, - "subscription": { - "name": "subscription", + "baseline_table_name": { + "name": "baseline_table_name", "type": "any", - "description": "Optional: subscription configuration for sending the dashboard snapshot.", + "description": "[Create:OPT Update:OPT] Baseline table name.\nBaseline data is used to compute drift from the data in the monitored `table_name`.\nThe baseline table and the monitored table shall have the same schema.", "required": false }, - "warehouse_id": { - "name": "warehouse_id", - "type": "string", - "description": "Optional: The warehouse id to execute the dashboard with for the schedule.\nIf not specified, the default warehouse of the dashboard will be used.", + "custom_metrics": { + "name": "custom_metrics", + "type": "any", + "description": "[Create:OPT Update:OPT] Custom metrics.", "required": false - } - } - }, - "jobs.DbtCloudTask": { - "name": "DbtCloudTask", - "package": "jobs", - "description": "Deprecated in favor of DbtPlatformTask", - "fields": { - "connection_resource_name": { - "name": "connection_resource_name", - "type": "string", - "description": "The resource name of the UC connection that authenticates the dbt Cloud for this task", + }, + "data_classification_config": { + "name": "data_classification_config", + "type": "any", + "description": "[Create:OPT Update:OPT] Data classification related config.", "required": false }, - "dbt_cloud_job_id": { - "name": "dbt_cloud_job_id", - "type": "string", - "description": "Id of the dbt Cloud job to be triggered", + "inference_log": { + "name": "inference_log", + "type": "any", + "description": "", "required": false - } - } - }, - "jobs.DbtPlatformTask": { - "name": "DbtPlatformTask", - "package": "jobs", - "description": "dbt platform task configuration.", - "fields": { - "connection_resource_name": { - "name": "connection_resource_name", - "type": "string", - "description": "The resource name of the UC connection that authenticates the dbt platform for this task", + }, + "latest_monitor_failure_msg": { + "name": "latest_monitor_failure_msg", + "type": "any", + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", "required": false }, - "dbt_platform_job_id": { - "name": "dbt_platform_job_id", - "type": "string", - "description": "Id of the dbt platform job to be triggered. Specified as a string for maximum compatibility with clients.", + "notifications": { + "name": "notifications", + "type": "any", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", "required": false - } - } - }, - "jobs.DbtTask": { - "name": "DbtTask", - "package": "jobs", - "description": "dbt task configuration.", - "fields": { - "catalog": { - "name": "catalog", + }, + "output_schema_name": { + "name": "output_schema_name", "type": "any", - "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1.", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format {catalog}.{schema}", "required": false }, - "commands": { - "name": "commands", + "schedule": { + "name": "schedule", "type": "any", - "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", + "description": "[Create:OPT Update:OPT] The monitor schedule.", "required": false }, - "profiles_directory": { - "name": "profiles_directory", + "skip_builtin_dashboard": { + "name": "skip_builtin_dashboard", "type": "any", - "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used.", + "description": "Whether to skip creating a default dashboard summarizing data quality metrics.", "required": false }, - "project_directory": { - "name": "project_directory", + "slicing_exprs": { + "name": "slicing_exprs", "type": "any", - "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used.", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The data is grouped by\neach expression independently, resulting in a separate slice for each predicate and its\ncomplements. For example `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the following\nslices: two slices for `col_2 \u003e 10` (True and False), and one slice per unique value in\n`col1`. For high-cardinality columns, only the top 100 unique values by frequency will\ngenerate slices.", "required": false }, - "schema": { - "name": "schema", + "snapshot": { + "name": "snapshot", "type": "any", - "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used.", + "description": "Configuration for monitoring snapshot tables.", "required": false }, - "source": { - "name": "source", + "time_series": { + "name": "time_series", "type": "any", - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider.", + "description": "Configuration for monitoring time series tables.", "required": false }, "warehouse_id": { "name": "warehouse_id", - "type": "string", - "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument.", + "type": "any", + "description": "Optional argument to specify the warehouse for dashboard creation. If not specified, the first running\nwarehouse will be used.", "required": false } } }, - "jobs.FileArrivalTriggerConfiguration": { - "name": "FileArrivalTriggerConfiguration", - "package": "jobs", - "description": "file arrival trigger configuration configuration.", + "bundle.RegisteredModel": { + "name": "RegisteredModel", + "package": "resources", + "description": "", "fields": { - "min_time_between_triggers_seconds": { - "name": "min_time_between_triggers_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds", + "aliases": { + "name": "aliases", + "type": "any", + "description": "List of aliases associated with the registered model", "required": false }, - "url": { - "name": "url", + "browse_only": { + "name": "browse_only", "type": "any", - "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location.", + "description": "Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.", "required": false }, - "wait_after_last_change_seconds": { - "name": "wait_after_last_change_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.", + "catalog_name": { + "name": "catalog_name", + "type": "any", + "description": "The name of the catalog where the schema and the registered model reside", "required": false - } - } - }, - "jobs.ForEachTask": { - "name": "ForEachTask", - "package": "jobs", - "description": "for each task configuration.", - "fields": { - "concurrency": { - "name": "concurrency", + }, + "comment": { + "name": "comment", "type": "any", - "description": "An optional maximum allowed number of concurrent runs of the task.\nSet this value if you want to be able to execute multiple runs of the task concurrently.", + "description": "The comment attached to the registered model", "required": false }, - "inputs": { - "name": "inputs", + "created_at": { + "name": "created_at", "type": "any", - "description": "Array for task to iterate on. This can be a JSON string or a reference to\nan array parameter.", + "description": "Creation timestamp of the registered model in milliseconds since the Unix epoch", "required": false }, - "task": { - "name": "task", + "created_by": { + "name": "created_by", "type": "any", - "description": "Configuration for the task that will be run for each element in the array", + "description": "The identifier of the user who created the registered model", "required": false - } - } - }, - "jobs.Format": { - "name": "Format", - "package": "jobs", - "description": "format configuration.", - "fields": {} - }, - "jobs.GenAiComputeTask": { - "name": "GenAiComputeTask", - "package": "jobs", - "description": "gen ai compute task configuration.", - "fields": { - "command": { - "name": "command", + }, + "full_name": { + "name": "full_name", "type": "any", - "description": "Command launcher to run the actual script, e.g. bash, python etc.", + "description": "The three-level (fully qualified) name of the registered model", "required": false }, - "compute": { - "name": "compute", + "metastore_id": { + "name": "metastore_id", "type": "any", - "description": "", + "description": "The unique identifier of the metastore", "required": false }, - "dl_runtime_image": { - "name": "dl_runtime_image", + "name": { + "name": "name", "type": "any", - "description": "Runtime image", + "description": "The name of the registered model", "required": false }, - "mlflow_experiment_name": { - "name": "mlflow_experiment_name", - "type": "string", - "description": "Optional string containing the name of the MLflow experiment to log the run to. If name is not\nfound, backend will create the mlflow experiment using the name.", + "owner": { + "name": "owner", + "type": "any", + "description": "The identifier of the user who owns the registered model", "required": false }, - "source": { - "name": "source", + "schema_name": { + "name": "schema_name", "type": "any", - "description": "Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Script is located in Databricks workspace.\n* `GIT`: Script is located in cloud Git provider.", + "description": "The name of the schema where the registered model resides", "required": false }, - "training_script_path": { - "name": "training_script_path", - "type": "string", - "description": "The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.", + "storage_location": { + "name": "storage_location", + "type": "any", + "description": "The storage location on the cloud under which model version data files are stored", "required": false }, - "yaml_parameters": { - "name": "yaml_parameters", + "updated_at": { + "name": "updated_at", "type": "any", - "description": "Optional string containing model parameters passed to the training script in yaml format.\nIf present, then the content in yaml_parameters_file_path will be ignored.", + "description": "Last-update timestamp of the registered model in milliseconds since the Unix epoch", "required": false }, - "yaml_parameters_file_path": { - "name": "yaml_parameters_file_path", - "type": "string", - "description": "Optional path to a YAML file containing model parameters passed to the training script.", + "updated_by": { + "name": "updated_by", + "type": "any", + "description": "The identifier of the user who updated the registered model last time", "required": false } } }, - "jobs.GitProvider": { - "name": "GitProvider", - "package": "jobs", - "description": "git provider configuration.", - "fields": {} - }, - "jobs.GitSnapshot": { - "name": "GitSnapshot", - "package": "jobs", - "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "bundle.Schema": { + "name": "Schema", + "package": "resources", + "description": "", "fields": { - "used_commit": { - "name": "used_commit", + "catalog_name": { + "name": "catalog_name", "type": "any", - "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to.", + "description": "Name of parent catalog.", "required": false - } - } - }, - "jobs.GitSource": { - "name": "GitSource", - "package": "jobs", - "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", - "fields": { - "git_branch": { - "name": "git_branch", + }, + "comment": { + "name": "comment", "type": "any", - "description": "Name of the branch to be checked out and used by this job. This field cannot be specified in conjunction with git_tag or git_commit.", + "description": "User-provided free-form text description.", "required": false }, - "git_commit": { - "name": "git_commit", + "name": { + "name": "name", "type": "any", - "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.", + "description": "Name of schema, relative to parent catalog.", "required": false }, - "git_provider": { - "name": "git_provider", + "properties": { + "name": "properties", "type": "any", - "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive.", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "git_snapshot": { - "name": "git_snapshot", + "storage_root": { + "name": "storage_root", "type": "any", - "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "description": "Storage root URL for managed tables within schema.", "required": false - }, - "git_tag": { - "name": "git_tag", + } + } + }, + "bundle.SqlWarehouse": { + "name": "SqlWarehouse", + "package": "resources", + "description": "Creates a new SQL warehouse.", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", "type": "any", - "description": "Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit.", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", "required": false }, - "git_url": { - "name": "git_url", - "type": "string", - "description": "URL of the repository to be cloned by this job.", + "channel": { + "name": "channel", + "type": "any", + "description": "Channel Details", "required": false }, - "job_source": { - "name": "job_source", + "cluster_size": { + "name": "cluster_size", "type": "any", - "description": "The source of the job specification in the remote repository when the job is source controlled.", - "required": false, - "deprecated": true - } - } - }, - "jobs.JobCluster": { - "name": "JobCluster", - "package": "jobs", - "description": "job cluster configuration.", - "fields": { - "job_cluster_key": { - "name": "job_cluster_key", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", "type": "any", - "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution.", + "description": "warehouse creator name", "required": false }, - "new_cluster": { - "name": "new_cluster", + "enable_photon": { + "name": "enable_photon", "type": "any", - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", "required": false - } - } - }, - "jobs.JobDeployment": { - "name": "JobDeployment", - "package": "jobs", - "description": "job deployment configuration.", - "fields": { - "kind": { - "name": "kind", + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", "type": "any", - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "description": "Configures whether the warehouse should use serverless compute", "required": false }, - "metadata_file_path": { - "name": "metadata_file_path", - "type": "string", - "description": "Path of the file that contains deployment metadata.", + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "any", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", "required": false - } - } - }, - "jobs.JobDeploymentKind": { - "name": "JobDeploymentKind", - "package": "jobs", - "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.", - "fields": {} - }, - "jobs.JobEditMode": { - "name": "JobEditMode", - "package": "jobs", - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", - "fields": {} - }, - "jobs.JobEmailNotifications": { - "name": "JobEmailNotifications", - "package": "jobs", - "description": "job email notifications configuration.", - "fields": { - "no_alert_for_skipped_runs": { - "name": "no_alert_for_skipped_runs", + }, + "max_num_clusters": { + "name": "max_num_clusters", "type": "any", - "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.", - "required": false, - "deprecated": true + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false }, - "on_duration_warning_threshold_exceeded": { - "name": "on_duration_warning_threshold_exceeded", + "min_num_clusters": { + "name": "min_num_clusters", "type": "any", - "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", "required": false }, - "on_failure": { - "name": "on_failure", + "name": { + "name": "name", "type": "any", - "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", "required": false }, - "on_start": { - "name": "on_start", + "spot_instance_policy": { + "name": "spot_instance_policy", "type": "any", - "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "description": "Configurations whether the endpoint should use spot instances.", "required": false }, - "on_streaming_backlog_exceeded": { - "name": "on_streaming_backlog_exceeded", + "tags": { + "name": "tags", "type": "any", - "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", "required": false }, - "on_success": { - "name": "on_success", + "warehouse_type": { + "name": "warehouse_type", "type": "any", - "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute,\nyou must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", "required": false } } }, - "jobs.JobEnvironment": { - "name": "JobEnvironment", - "package": "jobs", - "description": "job environment configuration.", + "bundle.SyncedDatabaseTable": { + "name": "SyncedDatabaseTable", + "package": "resources", + "description": "Next field marker: 18", "fields": { - "environment_key": { - "name": "environment_key", + "data_synchronization_status": { + "name": "data_synchronization_status", "type": "any", - "description": "The key of an environment. It has to be unique within a job.", + "description": "Synced Table data synchronization status", "required": false }, - "spec": { - "name": "spec", + "database_instance_name": { + "name": "database_instance_name", "type": "any", - "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "description": "Name of the target database instance. This is required when creating synced database tables in standard catalogs.\nThis is optional when creating synced database tables in registered catalogs. If this field is specified\nwhen creating synced database tables in registered catalogs, the database instance name MUST\nmatch that of the registered catalog (or the request will be rejected).", "required": false - } - } - }, - "jobs.JobNotificationSettings": { - "name": "JobNotificationSettings", - "package": "jobs", - "description": "Configuration settings for job notification.", - "fields": { - "no_alert_for_canceled_runs": { - "name": "no_alert_for_canceled_runs", + }, + "effective_database_instance_name": { + "name": "effective_database_instance_name", "type": "any", - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled.", + "description": "The name of the database instance that this table is registered to. This field is always returned, and for\ntables inside database catalogs is inferred database instance associated with the catalog.", "required": false }, - "no_alert_for_skipped_runs": { - "name": "no_alert_for_skipped_runs", + "effective_logical_database_name": { + "name": "effective_logical_database_name", "type": "any", - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped.", + "description": "The name of the logical database that this table is registered to.", "required": false - } - } - }, - "jobs.JobParameterDefinition": { - "name": "JobParameterDefinition", - "package": "jobs", - "description": "job parameter definition configuration.", - "fields": { - "default": { - "name": "default", + }, + "logical_database_name": { + "name": "logical_database_name", "type": "any", - "description": "Default value of the parameter.", + "description": "Target Postgres database object (logical database) name for this table.\n\nWhen creating a synced table in a registered Postgres catalog, the\ntarget Postgres database name is inferred to be that of the registered catalog.\nIf this field is specified in this scenario, the Postgres database name MUST\nmatch that of the registered catalog (or the request will be rejected).\n\nWhen creating a synced table in a standard catalog, this field is required.\nIn this scenario, specifying this field will allow targeting an arbitrary postgres database.\nNote that this has implications for the `create_database_objects_is_missing` field in `spec`.", "required": false }, "name": { "name": "name", "type": "any", - "description": "The name of the defined parameter. May only contain alphanumeric characters, `_`, `-`, and `.`", - "required": false - } - } - }, - "jobs.JobRunAs": { - "name": "JobRunAs", - "package": "jobs", - "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", - "fields": { - "group_name": { - "name": "group_name", - "type": "string", - "description": "Group name of an account group assigned to the workspace. Setting this field requires being a member of the group.", + "description": "Full three-part (catalog, schema, table) name of the table.", "required": false }, - "service_principal_name": { - "name": "service_principal_name", - "type": "string", - "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", + "spec": { + "name": "spec", + "type": "any", + "description": "Specification of a synced database table.", "required": false }, - "user_name": { - "name": "user_name", - "type": "string", - "description": "The email of an active workspace user. Non-admin users can only set this field to their own email.", + "unity_catalog_provisioning_state": { + "name": "unity_catalog_provisioning_state", + "type": "any", + "description": "The provisioning state of the synced table entity in Unity Catalog. This is distinct from the\nstate of the data synchronization pipeline (i.e. the table may be in \"ACTIVE\" but the pipeline\nmay be in \"PROVISIONING\" as it runs asynchronously).", "required": false } } }, - "jobs.JobSource": { - "name": "JobSource", - "package": "jobs", - "description": "The source of the job specification in the remote repository when the job is source controlled.", + "bundle.Volume": { + "name": "Volume", + "package": "resources", + "description": "", "fields": { - "dirty_state": { - "name": "dirty_state", + "catalog_name": { + "name": "catalog_name", "type": "any", - "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.", + "description": "The name of the catalog where the schema and the volume are", "required": false }, - "import_from_git_branch": { - "name": "import_from_git_branch", + "comment": { + "name": "comment", "type": "any", - "description": "Name of the branch which the job is imported from.", + "description": "The comment attached to the volume", "required": false }, - "job_config_path": { - "name": "job_config_path", - "type": "string", - "description": "Path of the job YAML file that contains the job specification.", - "required": false - } - } - }, - "jobs.JobSourceDirtyState": { - "name": "JobSourceDirtyState", - "package": "jobs", - "description": "Dirty state indicates the job is not fully synced with the job specification\nin the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.", - "fields": {} - }, - "jobs.JobsHealthMetric": { - "name": "JobsHealthMetric", - "package": "jobs", - "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", - "fields": {} - }, - "jobs.JobsHealthOperator": { - "name": "JobsHealthOperator", - "package": "jobs", - "description": "Specifies the operator used to compare the health metric value with the specified threshold.", - "fields": {} - }, - "jobs.JobsHealthRule": { - "name": "JobsHealthRule", - "package": "jobs", - "description": "jobs health rule configuration.", - "fields": { - "metric": { - "name": "metric", - "type": "any", - "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", + "name": { + "name": "name", + "type": "any", + "description": "The name of the volume", "required": false }, - "op": { - "name": "op", + "schema_name": { + "name": "schema_name", "type": "any", - "description": "Specifies the operator used to compare the health metric value with the specified threshold.", + "description": "The name of the schema where the volume is", "required": false }, - "value": { - "name": "value", + "storage_location": { + "name": "storage_location", "type": "any", - "description": "Specifies the threshold value that the health metric should obey to satisfy the health rule.", + "description": "The storage location on the cloud", "required": false - } - } - }, - "jobs.JobsHealthRules": { - "name": "JobsHealthRules", - "package": "jobs", - "description": "An optional set of health rules that can be defined for this job.", - "fields": { - "rules": { - "name": "rules", + }, + "volume_type": { + "name": "volume_type", "type": "any", - "description": "", + "description": "The type of the volume. An external volume is located in the specified external location.\nA managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore.\n[Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", "required": false } } }, - "jobs.ModelTriggerConfiguration": { - "name": "ModelTriggerConfiguration", - "package": "jobs", - "description": "model trigger configuration configuration.", + "catalog.AccessRequestDestinations": { + "name": "AccessRequestDestinations", + "package": "catalog", + "description": "", "fields": { - "aliases": { - "name": "aliases", - "type": "any", - "description": "Aliases of the model versions to monitor. Can only be used in conjunction with condition MODEL_ALIAS_SET.", + "are_any_destinations_hidden": { + "name": "are_any_destinations_hidden", + "type": "bool", + "description": "Indicates whether any destinations are hidden from the caller due to a\nlack of permissions. This value is true if the caller does not have\npermission to see all destinations.", "required": false }, - "condition": { - "name": "condition", - "type": "any", - "description": "The condition based on which to trigger a job run.", + "destination_source_securable": { + "name": "destination_source_securable", + "type": "*Securable", + "description": "The source securable from which the destinations are inherited. Either\nthe same value as securable (if destination is set directly on the\nsecurable) or the nearest parent securable with destinations set.", "required": false }, - "min_time_between_triggers_seconds": { - "name": "min_time_between_triggers_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.", + "destinations": { + "name": "destinations", + "type": "[]NotificationDestination", + "description": "The access request destinations for the securable.", "required": false }, - "securable_name": { - "name": "securable_name", + "full_name": { + "name": "full_name", "type": "string", - "description": "Name of the securable to monitor (\"mycatalog.myschema.mymodel\" in the case of model-level triggers,\n\"mycatalog.myschema\" in the case of schema-level triggers) or empty in the case of metastore-level triggers.", + "description": "The full name of the securable. Redundant with the name in the securable\nobject, but necessary for Terraform integration", "required": false }, - "wait_after_last_change_seconds": { - "name": "wait_after_last_change_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after no model updates have occurred for the specified time\nand can be used to wait for a series of model updates before triggering a run. The\nminimum allowed value is 60 seconds.", + "securable": { + "name": "securable", + "type": "Securable", + "description": "The securable for which the access request destinations are being\nmodified or read.", + "required": false + }, + "securable_type": { + "name": "securable_type", + "type": "string", + "description": "The type of the securable. Redundant with the type in the securable\nobject, but necessary for Terraform integration", "required": false } } }, - "jobs.ModelTriggerConfigurationCondition": { - "name": "ModelTriggerConfigurationCondition", - "package": "jobs", - "description": "model trigger configuration condition configuration.", - "fields": {} + "catalog.AccountsCreateMetastore": { + "name": "AccountsCreateMetastore", + "package": "catalog", + "description": "Properties of the new metastore.", + "fields": { + "metastore_info": { + "name": "metastore_info", + "type": "*CreateAccountsMetastore", + "description": "", + "required": false + } + } }, - "jobs.NotebookTask": { - "name": "NotebookTask", - "package": "jobs", - "description": "notebook task configuration.", + "catalog.AccountsCreateMetastoreAssignment": { + "name": "AccountsCreateMetastoreAssignment", + "package": "catalog", + "description": "The mapping from workspace to metastore.", "fields": { - "base_parameters": { - "name": "base_parameters", - "type": "any", - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", + "metastore_assignment": { + "name": "metastore_assignment", + "type": "*CreateMetastoreAssignment", + "description": "", "required": false - }, - "notebook_path": { - "name": "notebook_path", - "type": "string", - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.", + } + } + }, + "catalog.AccountsCreateMetastoreResponse": { + "name": "AccountsCreateMetastoreResponse", + "package": "catalog", + "description": "", + "fields": { + "metastore_info": { + "name": "metastore_info", + "type": "*MetastoreInfo", + "description": "", "required": false - }, - "source": { - "name": "source", - "type": "any", - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider.", + } + } + }, + "catalog.AccountsCreateStorageCredential": { + "name": "AccountsCreateStorageCredential", + "package": "catalog", + "description": "", + "fields": { + "credential_info": { + "name": "credential_info", + "type": "*CreateAccountsStorageCredential", + "description": "", "required": false }, - "warehouse_id": { - "name": "warehouse_id", - "type": "string", - "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail.", + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Optional, default false. Supplying true to this argument skips validation\nof the created set of credentials.", "required": false } } }, - "jobs.PauseStatus": { - "name": "PauseStatus", - "package": "jobs", - "description": "pause status configuration.", - "fields": {} + "catalog.AccountsCreateStorageCredentialInfo": { + "name": "AccountsCreateStorageCredentialInfo", + "package": "catalog", + "description": "", + "fields": { + "credential_info": { + "name": "credential_info", + "type": "*StorageCredentialInfo", + "description": "", + "required": false + } + } }, - "jobs.PerformanceTarget": { - "name": "PerformanceTarget", - "package": "jobs", - "description": "PerformanceTarget defines how performant (lower latency) or cost efficient the execution of run on serverless compute should be.\nThe performance mode on the job or pipeline should map to a performance setting that is passed to Cluster Manager\n(see cluster-common PerformanceTarget).", - "fields": {} + "catalog.AccountsGetMetastoreResponse": { + "name": "AccountsGetMetastoreResponse", + "package": "catalog", + "description": "The metastore was successfully returned.", + "fields": { + "metastore_info": { + "name": "metastore_info", + "type": "*MetastoreInfo", + "description": "", + "required": false + } + } }, - "jobs.PeriodicTriggerConfiguration": { - "name": "PeriodicTriggerConfiguration", - "package": "jobs", - "description": "periodic trigger configuration configuration.", + "catalog.AccountsListMetastoresResponse": { + "name": "AccountsListMetastoresResponse", + "package": "catalog", + "description": "Metastores were returned successfully.", "fields": { - "interval": { - "name": "interval", - "type": "any", - "description": "The interval at which the trigger should run.", + "metastores": { + "name": "metastores", + "type": "[]MetastoreInfo", + "description": "An array of metastore information objects.", "required": false - }, - "unit": { - "name": "unit", - "type": "any", - "description": "The unit of time for the interval.", + } + } + }, + "catalog.AccountsMetastoreAssignment": { + "name": "AccountsMetastoreAssignment", + "package": "catalog", + "description": "The workspace metastore assignment was successfully returned.", + "fields": { + "metastore_assignment": { + "name": "metastore_assignment", + "type": "*MetastoreAssignment", + "description": "", "required": false } } }, - "jobs.PeriodicTriggerConfigurationTimeUnit": { - "name": "PeriodicTriggerConfigurationTimeUnit", - "package": "jobs", - "description": "periodic trigger configuration time unit configuration.", - "fields": {} + "catalog.AccountsStorageCredentialInfo": { + "name": "AccountsStorageCredentialInfo", + "package": "catalog", + "description": "The storage credential was successfully retrieved.", + "fields": { + "credential_info": { + "name": "credential_info", + "type": "*StorageCredentialInfo", + "description": "", + "required": false + } + } }, - "jobs.PipelineParams": { - "name": "PipelineParams", - "package": "jobs", - "description": "pipeline params configuration.", + "catalog.AccountsUpdateMetastore": { + "name": "AccountsUpdateMetastore", + "package": "catalog", + "description": "Properties of the metastore to change.", "fields": { - "full_refresh": { - "name": "full_refresh", - "type": "any", - "description": "If true, triggers a full refresh on the delta live table.", + "metastore_info": { + "name": "metastore_info", + "type": "*UpdateAccountsMetastore", + "description": "Properties of the metastore to change.", "required": false } } }, - "jobs.PipelineTask": { - "name": "PipelineTask", - "package": "jobs", - "description": "pipeline task configuration.", + "catalog.AccountsUpdateMetastoreAssignment": { + "name": "AccountsUpdateMetastoreAssignment", + "package": "catalog", + "description": "The metastore assignment to update.", "fields": { - "full_refresh": { - "name": "full_refresh", - "type": "any", - "description": "If true, triggers a full refresh on the delta live table.", + "metastore_assignment": { + "name": "metastore_assignment", + "type": "*UpdateMetastoreAssignment", + "description": "", "required": false - }, - "pipeline_id": { - "name": "pipeline_id", - "type": "string", - "description": "The full name of the pipeline task to execute.", + } + } + }, + "catalog.AccountsUpdateMetastoreResponse": { + "name": "AccountsUpdateMetastoreResponse", + "package": "catalog", + "description": "The metastore update request succeeded.", + "fields": { + "metastore_info": { + "name": "metastore_info", + "type": "*MetastoreInfo", + "description": "", "required": false } } }, - "jobs.PowerBiModel": { - "name": "PowerBiModel", - "package": "jobs", - "description": "power bi model configuration.", + "catalog.AccountsUpdateStorageCredential": { + "name": "AccountsUpdateStorageCredential", + "package": "catalog", + "description": "The storage credential to update.", "fields": { - "authentication_method": { - "name": "authentication_method", - "type": "any", - "description": "How the published Power BI model authenticates to Databricks", + "credential_info": { + "name": "credential_info", + "type": "*UpdateAccountsStorageCredential", + "description": "", "required": false }, - "model_name": { - "name": "model_name", - "type": "string", - "description": "The name of the Power BI model", + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Optional. Supplying true to this argument skips validation of the updated\nset of credentials.", + "required": false + } + } + }, + "catalog.AccountsUpdateStorageCredentialResponse": { + "name": "AccountsUpdateStorageCredentialResponse", + "package": "catalog", + "description": "The storage credential was successfully updated.", + "fields": { + "credential_info": { + "name": "credential_info", + "type": "*StorageCredentialInfo", + "description": "", + "required": false + } + } + }, + "catalog.ArtifactAllowlistInfo": { + "name": "ArtifactAllowlistInfo", + "package": "catalog", + "description": "", + "fields": { + "artifact_matchers": { + "name": "artifact_matchers", + "type": "[]ArtifactMatcher", + "description": "A list of allowed artifact match patterns.", "required": false }, - "overwrite_existing": { - "name": "overwrite_existing", - "type": "any", - "description": "Whether to overwrite existing Power BI models", + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this artifact allowlist was set, in epoch milliseconds.", "required": false }, - "storage_mode": { - "name": "storage_mode", - "type": "any", - "description": "The default storage mode of the Power BI model", + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of the user who set the artifact allowlist.", "required": false }, - "workspace_name": { - "name": "workspace_name", + "metastore_id": { + "name": "metastore_id", "type": "string", - "description": "The name of the Power BI workspace of the model", + "description": "Unique identifier of parent metastore.", "required": false } } }, - "jobs.PowerBiTable": { - "name": "PowerBiTable", - "package": "jobs", - "description": "power bi table configuration.", + "catalog.ArtifactMatcher": { + "name": "ArtifactMatcher", + "package": "catalog", + "description": "", "fields": { - "catalog": { - "name": "catalog", - "type": "any", - "description": "The catalog name in Databricks", + "artifact": { + "name": "artifact", + "type": "string", + "description": "The artifact path or maven coordinate", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The table name in Databricks", - "required": false - }, - "schema": { - "name": "schema", - "type": "any", - "description": "The schema name in Databricks", - "required": false - }, - "storage_mode": { - "name": "storage_mode", - "type": "any", - "description": "The Power BI storage mode of the table", + "match_type": { + "name": "match_type", + "type": "MatchType", + "description": "The pattern matching type of the artifact", "required": false } } }, - "jobs.PowerBiTask": { - "name": "PowerBiTask", - "package": "jobs", - "description": "power bi task configuration.", + "catalog.AwsCredentials": { + "name": "AwsCredentials", + "package": "catalog", + "description": "AWS temporary credentials for API authentication. Read more at\nhttps://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html.", "fields": { - "connection_resource_name": { - "name": "connection_resource_name", + "access_key_id": { + "name": "access_key_id", "type": "string", - "description": "The resource name of the UC connection to authenticate from Databricks to Power BI", - "required": false - }, - "power_bi_model": { - "name": "power_bi_model", - "type": "any", - "description": "The semantic model to update", + "description": "The access key ID that identifies the temporary credentials.", "required": false }, - "refresh_after_update": { - "name": "refresh_after_update", - "type": "any", - "description": "Whether the model should be refreshed after the update", + "access_point": { + "name": "access_point", + "type": "string", + "description": "The Amazon Resource Name (ARN) of the S3 access point for temporary\ncredentials related the external location.", "required": false }, - "tables": { - "name": "tables", - "type": "any", - "description": "The tables to be exported to Power BI", + "secret_access_key": { + "name": "secret_access_key", + "type": "string", + "description": "The secret access key that can be used to sign AWS API requests.", "required": false }, - "warehouse_id": { - "name": "warehouse_id", + "session_token": { + "name": "session_token", "type": "string", - "description": "The SQL warehouse ID to use as the Power BI data source", + "description": "The token that users must pass to AWS API to use the temporary\ncredentials.", "required": false } } }, - "jobs.PythonWheelTask": { - "name": "PythonWheelTask", - "package": "jobs", - "description": "python wheel task configuration.", + "catalog.AwsIamRole": { + "name": "AwsIamRole", + "package": "catalog", + "description": "The AWS IAM role configuration", "fields": { - "entry_point": { - "name": "entry_point", - "type": "any", - "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`", - "required": false - }, - "named_parameters": { - "name": "named_parameters", - "type": "any", - "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", + "external_id": { + "name": "external_id", + "type": "string", + "description": "The external ID used in role assumption to prevent the confused deputy\nproblem.", "required": false }, - "package_name": { - "name": "package_name", + "role_arn": { + "name": "role_arn", "type": "string", - "description": "Name of the package to execute", + "description": "The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary\ncredentials.", "required": false }, - "parameters": { - "name": "parameters", - "type": "any", - "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", + "unity_catalog_iam_arn": { + "name": "unity_catalog_iam_arn", + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks.\nThis is the identity that is going to assume the AWS IAM role.", "required": false } } }, - "jobs.QueueSettings": { - "name": "QueueSettings", - "package": "jobs", - "description": "Configuration settings for queue.", + "catalog.AwsIamRoleRequest": { + "name": "AwsIamRoleRequest", + "package": "catalog", + "description": "The AWS IAM role configuration", "fields": { - "enabled": { - "name": "enabled", - "type": "bool", - "description": "If true, enable queueing for the job. This is a required field.", + "role_arn": { + "name": "role_arn", + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary\ncredentials.", "required": false } } }, - "jobs.RunIf": { - "name": "RunIf", - "package": "jobs", - "description": "An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n\nPossible values are:\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed", - "fields": {} - }, - "jobs.RunJobTask": { - "name": "RunJobTask", - "package": "jobs", - "description": "run job task configuration.", + "catalog.AwsIamRoleResponse": { + "name": "AwsIamRoleResponse", + "package": "catalog", + "description": "The AWS IAM role configuration", "fields": { - "dbt_commands": { - "name": "dbt_commands", - "type": "any", - "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", - "required": false, - "deprecated": true - }, - "jar_params": { - "name": "jar_params", - "type": "any", - "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", - "required": false, - "deprecated": true - }, - "job_id": { - "name": "job_id", + "external_id": { + "name": "external_id", "type": "string", - "description": "ID of the job to trigger.", + "description": "The external ID used in role assumption to prevent the confused deputy\nproblem.", "required": false }, - "job_parameters": { - "name": "job_parameters", - "type": "any", - "description": "Job-level parameters used to trigger the job.", + "role_arn": { + "name": "role_arn", + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary\ncredentials.", "required": false }, - "notebook_params": { - "name": "notebook_params", - "type": "any", - "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", - "required": false, - "deprecated": true - }, - "pipeline_params": { - "name": "pipeline_params", - "type": "any", - "description": "Controls whether the pipeline should perform a full refresh", + "unity_catalog_iam_arn": { + "name": "unity_catalog_iam_arn", + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks.\nThis is the identity that is going to assume the AWS IAM role.", "required": false - }, - "python_named_params": { - "name": "python_named_params", - "type": "any", - "description": "", - "required": false, - "deprecated": true - }, - "python_params": { - "name": "python_params", - "type": "any", - "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", - "required": false, - "deprecated": true - }, - "spark_submit_params": { - "name": "spark_submit_params", - "type": "any", - "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", - "required": false, - "deprecated": true - }, - "sql_params": { - "name": "sql_params", - "type": "any", - "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", - "required": false, - "deprecated": true } } }, - "jobs.Source": { - "name": "Source", - "package": "jobs", - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\\\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider.", - "fields": {} - }, - "jobs.SparkJarTask": { - "name": "SparkJarTask", - "package": "jobs", - "description": "spark jar task configuration.", + "catalog.AwsSqsQueue": { + "name": "AwsSqsQueue", + "package": "catalog", + "description": "", "fields": { - "jar_uri": { - "name": "jar_uri", - "type": "any", - "description": "Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list.\n\nSee the examples of classic and serverless compute usage at the top of the page.", - "required": false, - "deprecated": true - }, - "main_class_name": { - "name": "main_class_name", + "managed_resource_id": { + "name": "managed_resource_id", "type": "string", - "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail.", + "description": "Unique identifier included in the name of file events managed cloud\nresources.", "required": false }, - "parameters": { - "name": "parameters", - "type": "any", - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "queue_url": { + "name": "queue_url", + "type": "string", + "description": "The AQS queue url in the format\nhttps://sqs.{region}.amazonaws.com/{account id}/{queue name} Required for\nprovided_sqs.", "required": false - }, - "run_as_repl": { - "name": "run_as_repl", - "type": "any", - "description": "Deprecated. A value of `false` is no longer supported.", - "required": false, - "deprecated": true } } }, - "jobs.SparkPythonTask": { - "name": "SparkPythonTask", - "package": "jobs", - "description": "spark python task configuration.", + "catalog.AzureActiveDirectoryToken": { + "name": "AzureActiveDirectoryToken", + "package": "catalog", + "description": "Azure Active Directory token, essentially the Oauth token for Azure Service\nPrincipal or Managed Identity. Read more at\nhttps://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token", "fields": { - "parameters": { - "name": "parameters", - "type": "any", - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "aad_token": { + "name": "aad_token", + "type": "string", + "description": "Opaque token that contains claims that you can use in Azure Active\nDirectory to access cloud services.", + "required": false + } + } + }, + "catalog.AzureManagedIdentity": { + "name": "AzureManagedIdentity", + "package": "catalog", + "description": "The Azure managed identity configuration.", + "fields": { + "access_connector_id": { + "name": "access_connector_id", + "type": "string", + "description": "The Azure resource ID of the Azure Databricks Access Connector. Use the\nformat\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`.", "required": false }, - "python_file": { - "name": "python_file", - "type": "any", - "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required.", + "credential_id": { + "name": "credential_id", + "type": "string", + "description": "The Databricks internal ID that represents this managed identity.", "required": false }, - "source": { - "name": "source", - "type": "any", - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.", + "managed_identity_id": { + "name": "managed_identity_id", + "type": "string", + "description": "The Azure resource ID of the managed identity. Use the format,\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}`\nThis is only available for user-assgined identities. For system-assigned\nidentities, the access_connector_id is used to identify the identity. If\nthis field is not provided, then we assume the AzureManagedIdentity is\nusing the system-assigned identity.", "required": false } } }, - "jobs.SparkSubmitTask": { - "name": "SparkSubmitTask", - "package": "jobs", - "description": "spark submit task configuration.", + "catalog.AzureManagedIdentityRequest": { + "name": "AzureManagedIdentityRequest", + "package": "catalog", + "description": "The Azure managed identity configuration.", "fields": { - "parameters": { - "name": "parameters", - "type": "any", - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "access_connector_id": { + "name": "access_connector_id", + "type": "string", + "description": "The Azure resource ID of the Azure Databricks Access Connector. Use the\nformat\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`.", + "required": false + }, + "managed_identity_id": { + "name": "managed_identity_id", + "type": "string", + "description": "The Azure resource ID of the managed identity. Use the format,\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}`\nThis is only available for user-assgined identities. For system-assigned\nidentities, the access_connector_id is used to identify the identity. If\nthis field is not provided, then we assume the AzureManagedIdentity is\nusing the system-assigned identity.", "required": false } } }, - "jobs.SqlTask": { - "name": "SqlTask", - "package": "jobs", - "description": "sql task configuration.", + "catalog.AzureManagedIdentityResponse": { + "name": "AzureManagedIdentityResponse", + "package": "catalog", + "description": "The Azure managed identity configuration.", "fields": { - "alert": { - "name": "alert", - "type": "any", - "description": "If alert, indicates that this job must refresh a SQL alert.", - "required": false - }, - "dashboard": { - "name": "dashboard", - "type": "any", - "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", - "required": false - }, - "file": { - "name": "file", - "type": "any", - "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", - "required": false - }, - "parameters": { - "name": "parameters", - "type": "any", - "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", + "access_connector_id": { + "name": "access_connector_id", + "type": "string", + "description": "The Azure resource ID of the Azure Databricks Access Connector. Use the\nformat\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`.", "required": false }, - "query": { - "name": "query", - "type": "any", - "description": "If query, indicates that this job must execute a SQL query.", + "credential_id": { + "name": "credential_id", + "type": "string", + "description": "The Databricks internal ID that represents this managed identity.", "required": false }, - "warehouse_id": { - "name": "warehouse_id", + "managed_identity_id": { + "name": "managed_identity_id", "type": "string", - "description": "The canonical identifier of the SQL warehouse. Recommended to use with serverless or pro SQL warehouses. Classic SQL warehouses are only supported for SQL alert, dashboard and query tasks and are limited to scheduled single-task jobs.", + "description": "The Azure resource ID of the managed identity. Use the format,\n`/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}`\nThis is only available for user-assgined identities. For system-assigned\nidentities, the access_connector_id is used to identify the identity. If\nthis field is not provided, then we assume the AzureManagedIdentity is\nusing the system-assigned identity.", "required": false } } }, - "jobs.SqlTaskAlert": { - "name": "SqlTaskAlert", - "package": "jobs", - "description": "sql task alert configuration.", + "catalog.AzureQueueStorage": { + "name": "AzureQueueStorage", + "package": "catalog", + "description": "", "fields": { - "alert_id": { - "name": "alert_id", + "managed_resource_id": { + "name": "managed_resource_id", "type": "string", - "description": "The canonical identifier of the SQL alert.", + "description": "Unique identifier included in the name of file events managed cloud\nresources.", "required": false }, - "pause_subscriptions": { - "name": "pause_subscriptions", - "type": "any", - "description": "If true, the alert notifications are not sent to subscribers.", + "queue_url": { + "name": "queue_url", + "type": "string", + "description": "The AQS queue url in the format https://{storage\naccount}.queue.core.windows.net/{queue name} Required for provided_aqs.", "required": false }, - "subscriptions": { - "name": "subscriptions", - "type": "any", - "description": "If specified, alert notifications are sent to subscribers.", + "resource_group": { + "name": "resource_group", + "type": "string", + "description": "The resource group for the queue, event grid subscription, and external\nlocation storage account. Only required for locations with a service\nprincipal storage credential", + "required": false + }, + "subscription_id": { + "name": "subscription_id", + "type": "string", + "description": "Optional subscription id for the queue, event grid subscription, and\nexternal location storage account. Required for locations with a service\nprincipal storage credential", "required": false } } }, - "jobs.SqlTaskDashboard": { - "name": "SqlTaskDashboard", - "package": "jobs", - "description": "sql task dashboard configuration.", + "catalog.AzureServicePrincipal": { + "name": "AzureServicePrincipal", + "package": "catalog", + "description": "The Azure service principal configuration. Only applicable when purpose is\n**STORAGE**.", "fields": { - "custom_subject": { - "name": "custom_subject", - "type": "any", - "description": "Subject of the email sent to subscribers of this task.", - "required": false - }, - "dashboard_id": { - "name": "dashboard_id", + "application_id": { + "name": "application_id", "type": "string", - "description": "The canonical identifier of the SQL dashboard.", + "description": "The application ID of the application registration within the referenced\nAAD tenant.", "required": false }, - "pause_subscriptions": { - "name": "pause_subscriptions", - "type": "any", - "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers.", + "client_secret": { + "name": "client_secret", + "type": "string", + "description": "The client secret generated for the above app ID in AAD.", "required": false }, - "subscriptions": { - "name": "subscriptions", - "type": "any", - "description": "If specified, dashboard snapshots are sent to subscriptions.", + "directory_id": { + "name": "directory_id", + "type": "string", + "description": "The directory ID corresponding to the Azure Active Directory (AAD) tenant\nof the application.", "required": false } } }, - "jobs.SqlTaskFile": { - "name": "SqlTaskFile", - "package": "jobs", - "description": "sql task file configuration.", + "catalog.AzureUserDelegationSas": { + "name": "AzureUserDelegationSas", + "package": "catalog", + "description": "Azure temporary credentials for API authentication. Read more at\nhttps://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas", "fields": { - "path": { - "name": "path", - "type": "any", - "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths.", - "required": false - }, - "source": { - "name": "source", - "type": "any", - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider.", + "sas_token": { + "name": "sas_token", + "type": "string", + "description": "The signed URI (SAS Token) used to access blob services for a given path", "required": false } } }, - "jobs.SqlTaskQuery": { - "name": "SqlTaskQuery", - "package": "jobs", - "description": "sql task query configuration.", + "catalog.BatchCreateAccessRequestsRequest": { + "name": "BatchCreateAccessRequestsRequest", + "package": "catalog", + "description": "", "fields": { - "query_id": { - "name": "query_id", - "type": "string", - "description": "The canonical identifier of the SQL query.", + "requests": { + "name": "requests", + "type": "[]CreateAccessRequest", + "description": "A list of individual access requests, where each request corresponds to a\nset of permissions being requested on a list of securables for a\nspecified principal.\n\nAt most 30 requests per API call.", "required": false } } }, - "jobs.SqlTaskSubscription": { - "name": "SqlTaskSubscription", - "package": "jobs", - "description": "sql task subscription configuration.", + "catalog.BatchCreateAccessRequestsResponse": { + "name": "BatchCreateAccessRequestsResponse", + "package": "catalog", + "description": "", "fields": { - "destination_id": { - "name": "destination_id", - "type": "string", - "description": "The canonical identifier of the destination to receive email notification. This parameter is mutually exclusive with user_name. You cannot set both destination_id and user_name for subscription notifications.", - "required": false - }, - "user_name": { - "name": "user_name", - "type": "string", - "description": "The user name to receive the subscription email. This parameter is mutually exclusive with destination_id. You cannot set both destination_id and user_name for subscription notifications.", + "responses": { + "name": "responses", + "type": "[]CreateAccessRequestResponse", + "description": "The access request destinations for each securable object the principal\nrequested.", "required": false } } }, - "jobs.StorageMode": { - "name": "StorageMode", - "package": "jobs", - "description": "storage mode configuration.", - "fields": {} - }, - "jobs.Subscription": { - "name": "Subscription", - "package": "jobs", - "description": "subscription configuration.", + "catalog.CatalogInfo": { + "name": "CatalogInfo", + "package": "catalog", + "description": "", "fields": { - "custom_subject": { - "name": "custom_subject", - "type": "any", - "description": "Optional: Allows users to specify a custom subject line on the email sent\nto subscribers.", + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", "required": false }, - "paused": { - "name": "paused", - "type": "any", - "description": "When true, the subscription will not send emails.", + "catalog_type": { + "name": "catalog_type", + "type": "CatalogType", + "description": "", "required": false }, - "subscribers": { - "name": "subscribers", - "type": "any", - "description": "The list of subscribers to send the snapshot of the dashboard to.", - "required": false - } - } - }, - "jobs.SubscriptionSubscriber": { - "name": "SubscriptionSubscriber", - "package": "jobs", - "description": "subscription subscriber configuration.", - "fields": { - "destination_id": { - "name": "destination_id", + "comment": { + "name": "comment", "type": "string", - "description": "A snapshot of the dashboard will be sent to the destination when the `destination_id` field is present.", + "description": "User-provided free-form text description.", "required": false }, - "user_name": { - "name": "user_name", + "connection_name": { + "name": "connection_name", "type": "string", - "description": "A snapshot of the dashboard will be sent to the user's email when the `user_name` field is present.", + "description": "The name of the connection to an external data source.", "required": false - } - } - }, - "jobs.TableUpdateTriggerConfiguration": { - "name": "TableUpdateTriggerConfiguration", - "package": "jobs", - "description": "table update trigger configuration configuration.", - "fields": { - "condition": { - "name": "condition", - "type": "any", - "description": "The table(s) condition based on which to trigger a job run.", + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this catalog was created, in epoch milliseconds.", "required": false }, - "min_time_between_triggers_seconds": { - "name": "min_time_between_triggers_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.", + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of catalog creator.", "required": false }, - "table_names": { - "name": "table_names", - "type": "any", - "description": "A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "effective_predictive_optimization_flag": { + "name": "effective_predictive_optimization_flag", + "type": "*EffectivePredictiveOptimizationFlag", + "description": "", "required": false }, - "wait_after_last_change_seconds": { - "name": "wait_after_last_change_seconds", - "type": "int", - "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.", + "enable_predictive_optimization": { + "name": "enable_predictive_optimization", + "type": "EnablePredictiveOptimization", + "description": "Whether predictive optimization should be enabled for this object and\nobjects under it.", "required": false - } - } - }, - "jobs.Task": { - "name": "Task", - "package": "jobs", - "description": "task configuration.", - "fields": { - "clean_rooms_notebook_task": { - "name": "clean_rooms_notebook_task", - "type": "any", - "description": "The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full name of the catalog. Corresponds with the name field.", "required": false }, - "condition_task": { - "name": "condition_task", - "type": "any", - "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", + "isolation_mode": { + "name": "isolation_mode", + "type": "CatalogIsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", "required": false }, - "dashboard_task": { - "name": "dashboard_task", - "type": "any", - "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", "required": false }, - "dbt_cloud_task": { - "name": "dbt_cloud_task", - "type": "any", - "description": "Task type for dbt cloud, deprecated in favor of the new name dbt_platform_task", - "required": false, - "deprecated": true + "name": { + "name": "name", + "type": "string", + "description": "Name of catalog.", + "required": false }, - "dbt_platform_task": { - "name": "dbt_platform_task", - "type": "any", - "description": "", + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "dbt_task": { - "name": "dbt_task", - "type": "any", - "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of catalog.", "required": false }, - "depends_on": { - "name": "depends_on", - "type": "any", - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "description": { - "name": "description", + "provider_name": { + "name": "provider_name", "type": "string", - "description": "An optional description for this task.", + "description": "The name of delta sharing provider.\n\nA Delta Sharing catalog is a catalog that is based on a Delta share on a\nremote sharing server.", "required": false }, - "disable_auto_optimization": { - "name": "disable_auto_optimization", - "type": "any", - "description": "An option to disable auto optimization in serverless", + "provisioning_info": { + "name": "provisioning_info", + "type": "*ProvisioningInfo", + "description": "", "required": false }, - "disabled": { - "name": "disabled", - "type": "any", - "description": "An optional flag to disable the task. If set to true, the task will not run even if it is part of a job.", + "securable_type": { + "name": "securable_type", + "type": "SecurableType", + "description": "", "required": false }, - "email_notifications": { - "name": "email_notifications", - "type": "any", - "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", + "share_name": { + "name": "share_name", + "type": "string", + "description": "The name of the share under the share provider.", "required": false }, - "environment_key": { - "name": "environment_key", - "type": "any", - "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute.", + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "Storage Location URL (full path) for managed tables within catalog.", "required": false }, - "existing_cluster_id": { - "name": "existing_cluster_id", + "storage_root": { + "name": "storage_root", "type": "string", - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability", + "description": "Storage root URL for managed tables within catalog.", "required": false }, - "for_each_task": { - "name": "for_each_task", - "type": "any", - "description": "The task executes a nested task for every input provided when the `for_each_task` field is present.", + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this catalog was last modified, in epoch milliseconds.", "required": false }, - "gen_ai_compute_task": { - "name": "gen_ai_compute_task", - "type": "any", - "description": "", + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified catalog.", + "required": false + } + } + }, + "catalog.CloudflareApiToken": { + "name": "CloudflareApiToken", + "package": "catalog", + "description": "The Cloudflare API token configuration. Read more at\nhttps://developers.cloudflare.com/r2/api/s3/tokens/", + "fields": { + "access_key_id": { + "name": "access_key_id", + "type": "string", + "description": "The access key ID associated with the API token.", "required": false }, - "health": { - "name": "health", - "type": "any", - "description": "An optional set of health rules that can be defined for this job.", + "account_id": { + "name": "account_id", + "type": "string", + "description": "The ID of the account associated with the API token.", "required": false }, - "job_cluster_key": { - "name": "job_cluster_key", - "type": "any", - "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`.", + "secret_access_key": { + "name": "secret_access_key", + "type": "string", + "description": "The secret access token generated for the above access key ID.", + "required": false + } + } + }, + "catalog.ColumnInfo": { + "name": "ColumnInfo", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false }, - "libraries": { - "name": "libraries", - "type": "any", - "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", + "mask": { + "name": "mask", + "type": "*ColumnMask", + "description": "", "required": false }, - "max_retries": { - "name": "max_retries", - "type": "any", - "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means to retry indefinitely and the value `0` means to never retry.", + "name": { + "name": "name", + "type": "string", + "description": "Name of Column.", "required": false }, - "min_retry_interval_millis": { - "name": "min_retry_interval_millis", - "type": "any", - "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.", + "nullable": { + "name": "nullable", + "type": "bool", + "description": "Whether field may be Null (default: true).", "required": false }, - "new_cluster": { - "name": "new_cluster", - "type": "any", - "description": "If new_cluster, a description of a new cluster that is created for each run.", + "partition_index": { + "name": "partition_index", + "type": "int", + "description": "Partition index for column.", "required": false }, - "notebook_task": { - "name": "notebook_task", - "type": "any", - "description": "The task runs a notebook when the `notebook_task` field is present.", + "position": { + "name": "position", + "type": "int", + "description": "Ordinal position of column (starting at position 0).", "required": false }, - "notification_settings": { - "name": "notification_settings", - "type": "any", - "description": "Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task.", + "type_interval_type": { + "name": "type_interval_type", + "type": "string", + "description": "Format of IntervalType.", "required": false }, - "pipeline_task": { - "name": "pipeline_task", - "type": "any", - "description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.", + "type_json": { + "name": "type_json", + "type": "string", + "description": "Full data type specification, JSON-serialized.", "required": false }, - "power_bi_task": { - "name": "power_bi_task", - "type": "any", - "description": "The task triggers a Power BI semantic model update when the `power_bi_task` field is present.", + "type_name": { + "name": "type_name", + "type": "ColumnTypeName", + "description": "", "required": false }, - "python_wheel_task": { - "name": "python_wheel_task", - "type": "any", - "description": "The task runs a Python wheel when the `python_wheel_task` field is present.", + "type_precision": { + "name": "type_precision", + "type": "int", + "description": "Digits of precision; required for DecimalTypes.", "required": false }, - "retry_on_timeout": { - "name": "retry_on_timeout", - "type": "any", - "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout.", + "type_scale": { + "name": "type_scale", + "type": "int", + "description": "Digits to right of decimal; Required for DecimalTypes.", "required": false }, - "run_if": { - "name": "run_if", - "type": "any", - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed", + "type_text": { + "name": "type_text", + "type": "string", + "description": "Full data type specification as SQL/catalogString text.", "required": false - }, - "run_job_task": { - "name": "run_job_task", - "type": "any", - "description": "The task triggers another job when the `run_job_task` field is present.", + } + } + }, + "catalog.ColumnMask": { + "name": "ColumnMask", + "package": "catalog", + "description": "", + "fields": { + "function_name": { + "name": "function_name", + "type": "string", + "description": "The full name of the column mask SQL UDF.", "required": false }, - "spark_jar_task": { - "name": "spark_jar_task", - "type": "any", - "description": "The task runs a JAR when the `spark_jar_task` field is present.", + "using_column_names": { + "name": "using_column_names", + "type": "[]string", + "description": "The list of additional table columns to be passed as input to the column\nmask function. The first arg of the mask function should be of the type\nof the column being masked and the types of the rest of the args should\nmatch the types of columns in 'using_column_names'.", "required": false - }, - "spark_python_task": { - "name": "spark_python_task", - "type": "any", - "description": "The task runs a Python file when the `spark_python_task` field is present.", + } + } + }, + "catalog.ColumnMaskOptions": { + "name": "ColumnMaskOptions", + "package": "catalog", + "description": "", + "fields": { + "function_name": { + "name": "function_name", + "type": "string", + "description": "The fully qualified name of the column mask function. The function is\ncalled on each row of the target table. The function's first argument and\nits return type should match the type of the masked column. Required on\ncreate and update.", "required": false }, - "spark_submit_task": { - "name": "spark_submit_task", - "type": "any", - "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).", - "required": false, - "deprecated": true - }, - "sql_task": { - "name": "sql_task", - "type": "any", - "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", + "on_column": { + "name": "on_column", + "type": "string", + "description": "The alias of the column to be masked. The alias must refer to one of\nmatched columns. The values of the column is passed to the column mask\nfunction as the first argument. Required on create and update.", "required": false }, - "task_key": { - "name": "task_key", - "type": "any", - "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.", + "using": { + "name": "using", + "type": "[]FunctionArgument", + "description": "Optional list of column aliases or constant literals to be passed as\nadditional arguments to the column mask function. The type of each column\nshould match the positional argument of the column mask function.", "required": false - }, - "timeout_seconds": { - "name": "timeout_seconds", - "type": "int", - "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout.", + } + } + }, + "catalog.ColumnRelationship": { + "name": "ColumnRelationship", + "package": "catalog", + "description": "", + "fields": { + "source": { + "name": "source", + "type": "string", + "description": "", "required": false }, - "webhook_notifications": { - "name": "webhook_notifications", - "type": "any", - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "target": { + "name": "target", + "type": "string", + "description": "", "required": false } } }, - "jobs.TaskDependency": { - "name": "TaskDependency", - "package": "jobs", - "description": "task dependency configuration.", + "catalog.ConnectionDependency": { + "name": "ConnectionDependency", + "package": "catalog", + "description": "A connection that is dependent on a SQL object.", "fields": { - "outcome": { - "name": "outcome", - "type": "any", - "description": "Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run.", - "required": false - }, - "task_key": { - "name": "task_key", - "type": "any", - "description": "The name of the task this task depends on.", + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "Full name of the dependent connection, in the form of\n__connection_name__.", "required": false } } }, - "jobs.TaskEmailNotifications": { - "name": "TaskEmailNotifications", - "package": "jobs", - "description": "task email notifications configuration.", + "catalog.ConnectionInfo": { + "name": "ConnectionInfo", + "package": "catalog", + "description": "Next ID: 23", "fields": { - "no_alert_for_skipped_runs": { - "name": "no_alert_for_skipped_runs", - "type": "any", - "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped.\nThis field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field.", - "required": false, - "deprecated": true + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false }, - "on_duration_warning_threshold_exceeded": { - "name": "on_duration_warning_threshold_exceeded", - "type": "any", - "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", + "connection_id": { + "name": "connection_id", + "type": "string", + "description": "Unique identifier of the Connection.", "required": false }, - "on_failure": { - "name": "on_failure", - "type": "any", - "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "connection_type": { + "name": "connection_type", + "type": "ConnectionType", + "description": "The type of connection.", "required": false }, - "on_start": { - "name": "on_start", - "type": "any", - "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this connection was created, in epoch milliseconds.", "required": false }, - "on_streaming_backlog_exceeded": { - "name": "on_streaming_backlog_exceeded", - "type": "any", - "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of connection creator.", "required": false }, - "on_success": { - "name": "on_success", - "type": "any", - "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "credential_type": { + "name": "credential_type", + "type": "CredentialType", + "description": "The type of credential.", "required": false - } - } - }, - "jobs.TaskNotificationSettings": { - "name": "TaskNotificationSettings", - "package": "jobs", - "description": "Configuration settings for task notification.", - "fields": { - "alert_on_last_attempt": { - "name": "alert_on_last_attempt", - "type": "any", - "description": "If true, do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run.", + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "Full name of connection.", "required": false }, - "no_alert_for_canceled_runs": { - "name": "no_alert_for_canceled_runs", - "type": "any", - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is canceled.", + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", "required": false }, - "no_alert_for_skipped_runs": { - "name": "no_alert_for_skipped_runs", - "type": "any", - "description": "If true, do not send notifications to recipients specified in `on_failure` if the run is skipped.", + "name": { + "name": "name", + "type": "string", + "description": "Name of the connection.", "required": false - } - } - }, - "jobs.TaskRetryMode": { - "name": "TaskRetryMode", - "package": "jobs", - "description": "task retry mode of the continuous job\n* NEVER: The failed task will not be retried.\n* ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt.\nWhen this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started.", - "fields": {} - }, - "jobs.TriggerSettings": { - "name": "TriggerSettings", - "package": "jobs", - "description": "Configuration settings for trigger.", - "fields": { - "file_arrival": { - "name": "file_arrival", - "type": "any", - "description": "File arrival trigger settings.", + }, + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "model": { - "name": "model", - "type": "any", - "description": "", + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of the connection.", "required": false }, - "pause_status": { - "name": "pause_status", - "type": "any", - "description": "Whether this trigger is paused or not.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "periodic": { - "name": "periodic", - "type": "any", - "description": "Periodic trigger settings.", + "provisioning_info": { + "name": "provisioning_info", + "type": "*ProvisioningInfo", + "description": "", "required": false }, - "table_update": { - "name": "table_update", - "type": "any", + "read_only": { + "name": "read_only", + "type": "bool", + "description": "If the connection is read only.", + "required": false + }, + "securable_type": { + "name": "securable_type", + "type": "SecurableType", "description": "", "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this connection was updated, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified connection.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the remote data source, extracted from options.", + "required": false } } }, - "jobs.Webhook": { - "name": "Webhook", - "package": "jobs", - "description": "webhook configuration.", + "catalog.ContinuousUpdateStatus": { + "name": "ContinuousUpdateStatus", + "package": "catalog", + "description": "Detailed status of an online table. Shown if the online table is in the\nONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state.", "fields": { - "id": { - "name": "id", - "type": "any", - "description": "", + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "*PipelineProgress", + "description": "Progress of the initial data synchronization.", + "required": false + }, + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "int64", + "description": "The last source table Delta version that was synced to the online table.\nNote that this Delta version may not be completely synced to the online\ntable yet.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "string", + "description": "The timestamp of the last time any data was synchronized from the source\ntable to the online table.", "required": false } } }, - "jobs.WebhookNotifications": { - "name": "WebhookNotifications", - "package": "jobs", - "description": "webhook notifications configuration.", + "catalog.CreateAccessRequest": { + "name": "CreateAccessRequest", + "package": "catalog", + "description": "", "fields": { - "on_duration_warning_threshold_exceeded": { - "name": "on_duration_warning_threshold_exceeded", - "type": "any", - "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", + "behalf_of": { + "name": "behalf_of", + "type": "*Principal", + "description": "Optional. The principal this request is for. Empty `behalf_of` defaults\nto the requester's identity.\n\nPrincipals must be unique across the API call.", "required": false }, - "on_failure": { - "name": "on_failure", - "type": "any", - "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "comment": { + "name": "comment", + "type": "string", + "description": "Optional. Comment associated with the request.\n\nAt most 200 characters, can only contain lowercase/uppercase letters\n(a-z, A-Z), numbers (0-9), punctuation, and spaces.", "required": false }, - "on_start": { - "name": "on_start", - "type": "any", - "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "securable_permissions": { + "name": "securable_permissions", + "type": "[]SecurablePermissions", + "description": "List of securables and their corresponding requested UC privileges.\n\nAt most 30 securables can be requested for a principal per batched call.\nEach securable can only be requested once per principal.", "required": false - }, - "on_streaming_backlog_exceeded": { - "name": "on_streaming_backlog_exceeded", - "type": "any", - "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + } + } + }, + "catalog.CreateAccessRequestResponse": { + "name": "CreateAccessRequestResponse", + "package": "catalog", + "description": "", + "fields": { + "behalf_of": { + "name": "behalf_of", + "type": "*Principal", + "description": "The principal the request was made on behalf of.", "required": false }, - "on_success": { - "name": "on_success", - "type": "any", - "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "request_destinations": { + "name": "request_destinations", + "type": "[]AccessRequestDestinations", + "description": "The access request destinations for all the securables the principal\nrequested.", "required": false } } }, - "ml.ExperimentTag": { - "name": "ExperimentTag", - "package": "ml", - "description": "A tag for an experiment.", + "catalog.CreateAccountsMetastore": { + "name": "CreateAccountsMetastore", + "package": "catalog", + "description": "", "fields": { - "key": { - "name": "key", - "type": "any", - "description": "The tag key.", + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", "required": false }, - "value": { - "name": "value", - "type": "any", - "description": "The tag value.", + "name": { + "name": "name", + "type": "string", + "description": "The user-specified name of the metastore.", "required": false - } - } - }, - "ml.ModelTag": { - "name": "ModelTag", - "package": "ml", - "description": "Tag for a registered model", - "fields": { - "key": { - "name": "key", - "type": "any", - "description": "The tag key.", + }, + "region": { + "name": "region", + "type": "string", + "description": "Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).", "required": false }, - "value": { - "name": "value", - "type": "any", - "description": "The tag value.", + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "The storage root URL for metastore", "required": false } } }, - "pipelines.ConnectionParameters": { - "name": "ConnectionParameters", - "package": "pipelines", - "description": "connection parameters configuration.", + "catalog.CreateAccountsStorageCredential": { + "name": "CreateAccountsStorageCredential", + "package": "catalog", + "description": "", "fields": { - "source_catalog": { - "name": "source_catalog", - "type": "any", - "description": "Source catalog for initial connection.\nThis is necessary for schema exploration in some database systems like Oracle, and optional but nice-to-have\nin some other database systems like Postgres.\nFor Oracle databases, this maps to a service name.", + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleRequest", + "description": "The AWS IAM role configuration.", "required": false - } - } - }, - "pipelines.CronTrigger": { - "name": "CronTrigger", - "package": "pipelines", - "description": "cron trigger configuration.", - "fields": { - "quartz_cron_schedule": { - "name": "quartz_cron_schedule", - "type": "any", - "description": "", + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityRequest", + "description": "The Azure managed identity configuration.", "required": false }, - "timezone_id": { - "name": "timezone_id", + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", + "required": false + }, + "comment": { + "name": "comment", "type": "string", - "description": "", + "description": "Comment associated with the credential.", "required": false - } - } - }, - "pipelines.DayOfWeek": { - "name": "DayOfWeek", - "package": "pipelines", - "description": "Days of week in which the window is allowed to happen.\nIf not specified all days of the week will be used.", - "fields": {} - }, - "pipelines.DeploymentKind": { - "name": "DeploymentKind", - "package": "pipelines", - "description": "The deployment method that manages the pipeline:\n- BUNDLE: The pipeline is managed by a Databricks Asset Bundle.", - "fields": {} - }, - "pipelines.EventLogSpec": { - "name": "EventLogSpec", - "package": "pipelines", - "description": "Configurable event log parameters.", - "fields": { - "catalog": { - "name": "catalog", - "type": "any", - "description": "The UC catalog the event log is published under.", + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountRequest", + "description": "The Databricks managed GCP service account configuration.", "required": false }, "name": { "name": "name", - "type": "any", - "description": "The name the event log is published to in UC.", + "type": "string", + "description": "The credential name. The name must be unique among storage and service\ncredentials within the metastore.", "required": false }, - "schema": { - "name": "schema", - "type": "any", - "description": "The UC schema the event log is published under.", + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", "required": false } } }, - "pipelines.FileLibrary": { - "name": "FileLibrary", - "package": "pipelines", - "description": "file library configuration.", + "catalog.CreateCatalog": { + "name": "CreateCatalog", + "package": "catalog", + "description": "", "fields": { - "path": { - "name": "path", - "type": "any", - "description": "The absolute path of the source code.", + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false - } - } - }, - "pipelines.Filters": { - "name": "Filters", - "package": "pipelines", - "description": "filters configuration.", - "fields": { - "exclude": { - "name": "exclude", - "type": "any", - "description": "Paths to exclude.", + }, + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "The name of the connection to an external data source.", "required": false }, - "include": { - "name": "include", - "type": "any", - "description": "Paths to include.", + "name": { + "name": "name", + "type": "string", + "description": "Name of catalog.", "required": false - } - } - }, - "pipelines.IngestionConfig": { - "name": "IngestionConfig", - "package": "pipelines", - "description": "ingestion config configuration.", - "fields": { - "report": { - "name": "report", - "type": "any", - "description": "Select a specific source report.", + }, + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "schema": { - "name": "schema", - "type": "any", - "description": "Select all tables from a specific source schema.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "table": { - "name": "table", - "type": "any", - "description": "Select a specific source table.", + "provider_name": { + "name": "provider_name", + "type": "string", + "description": "The name of delta sharing provider.\n\nA Delta Sharing catalog is a catalog that is based on a Delta share on a\nremote sharing server.", + "required": false + }, + "share_name": { + "name": "share_name", + "type": "string", + "description": "The name of the share under the share provider.", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "Storage root URL for managed tables within catalog.", "required": false } } }, - "pipelines.IngestionGatewayPipelineDefinition": { - "name": "IngestionGatewayPipelineDefinition", - "package": "pipelines", - "description": "ingestion gateway pipeline definition configuration.", + "catalog.CreateConnection": { + "name": "CreateConnection", + "package": "catalog", + "description": "", "fields": { - "connection_id": { - "name": "connection_id", + "comment": { + "name": "comment", "type": "string", - "description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", - "required": false, - "deprecated": true + "description": "User-provided free-form text description.", + "required": false }, - "connection_name": { - "name": "connection_name", - "type": "string", - "description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", + "connection_type": { + "name": "connection_type", + "type": "ConnectionType", + "description": "The type of connection.", "required": false }, - "connection_parameters": { - "name": "connection_parameters", - "type": "any", - "description": "Optional, Internal. Parameters required to establish an initial connection with the source.", + "name": { + "name": "name", + "type": "string", + "description": "Name of the connection.", "required": false }, - "gateway_storage_catalog": { - "name": "gateway_storage_catalog", - "type": "any", - "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location.", + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "gateway_storage_name": { - "name": "gateway_storage_name", - "type": "string", - "description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nSpark Declarative Pipelines system will automatically create the storage location under the catalog and schema.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "gateway_storage_schema": { - "name": "gateway_storage_schema", - "type": "any", - "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location.", + "read_only": { + "name": "read_only", + "type": "bool", + "description": "If the connection is read only.", "required": false } } }, - "pipelines.IngestionPipelineDefinition": { - "name": "IngestionPipelineDefinition", - "package": "pipelines", - "description": "ingestion pipeline definition configuration.", + "catalog.CreateCredentialRequest": { + "name": "CreateCredentialRequest", + "package": "catalog", + "description": "", "fields": { - "connection_name": { - "name": "connection_name", - "type": "string", - "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.", + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRole", + "description": "The AWS IAM role configuration.", "required": false }, - "ingest_from_uc_foreign_catalog": { - "name": "ingest_from_uc_foreign_catalog", - "type": "any", - "description": "Immutable. If set to true, the pipeline will ingest tables from the\nUC foreign catalogs directly without the need to specify a UC connection or ingestion gateway.\nThe `source_catalog` fields in objects of IngestionConfig are interpreted as\nthe UC foreign catalogs to ingest from.", + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentity", + "description": "The Azure managed identity configuration.", "required": false }, - "ingestion_gateway_id": { - "name": "ingestion_gateway_id", - "type": "string", - "description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.", + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", "required": false }, - "netsuite_jar_path": { - "name": "netsuite_jar_path", + "comment": { + "name": "comment", "type": "string", - "description": "Netsuite only configuration. When the field is set for a netsuite connector,\nthe jar stored in the field will be validated and added to the classpath of\npipeline's cluster.", + "description": "Comment associated with the credential.", "required": false }, - "objects": { - "name": "objects", - "type": "any", - "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccount", + "description": "The Databricks managed GCP service account configuration.", "required": false }, - "source_configurations": { - "name": "source_configurations", - "type": "any", - "description": "Top-level source configurations", + "name": { + "name": "name", + "type": "string", + "description": "The credential name. The name must be unique among storage and service\ncredentials within the metastore.", "required": false }, - "source_type": { - "name": "source_type", - "type": "any", - "description": "The type of the foreign source.\nThe source type will be inferred from the source connection or ingestion gateway.\nThis field is output only and will be ignored if provided.", - "required": false, - "output_only": true - }, - "table_configuration": { - "name": "table_configuration", - "type": "any", - "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", - "required": false - } - } - }, - "pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig": { - "name": "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", - "package": "pipelines", - "description": "Configurations that are only applicable for query-based ingestion connectors.", - "fields": { - "cursor_columns": { - "name": "cursor_columns", - "type": "any", - "description": "The names of the monotonically increasing columns in the source table that are used to enable\nthe table to be read and ingested incrementally through structured streaming.\nThe columns are allowed to have repeated values but have to be non-decreasing.\nIf the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these\ncolumns will implicitly define the `sequence_by` behavior. You can still explicitly set\n`sequence_by` to override this default.", + "purpose": { + "name": "purpose", + "type": "CredentialPurpose", + "description": "Indicates the purpose of the credential.", "required": false }, - "deletion_condition": { - "name": "deletion_condition", - "type": "any", - "description": "Specifies a SQL WHERE condition that specifies that the source row has been deleted.\nThis is sometimes referred to as \"soft-deletes\".\nFor example: \"Operation = 'DELETE'\" or \"is_deleted = true\".\nThis field is orthogonal to `hard_deletion_sync_interval_in_seconds`,\none for soft-deletes and the other for hard-deletes.\nSee also the hard_deletion_sync_min_interval_in_seconds field for\nhandling of \"hard deletes\" where the source rows are physically removed from the table.", + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", "required": false }, - "hard_deletion_sync_min_interval_in_seconds": { - "name": "hard_deletion_sync_min_interval_in_seconds", - "type": "int", - "description": "Specifies the minimum interval (in seconds) between snapshots on primary keys\nfor detecting and synchronizing hard deletions—i.e., rows that have been\nphysically removed from the source table.\nThis interval acts as a lower bound. If ingestion runs less frequently than\nthis value, hard deletion synchronization will align with the actual ingestion\nfrequency instead of happening more often.\nIf not set, hard deletion synchronization via snapshots is disabled.\nThis field is mutable and can be updated without triggering a full snapshot.", + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Optional. Supplying true to this argument skips validation of the created\nset of credentials.", "required": false } } }, - "pipelines.IngestionPipelineDefinitionWorkdayReportParameters": { - "name": "IngestionPipelineDefinitionWorkdayReportParameters", - "package": "pipelines", - "description": "ingestion pipeline definition workday report parameters configuration.", + "catalog.CreateEntityTagAssignmentRequest": { + "name": "CreateEntityTagAssignmentRequest", + "package": "catalog", + "description": "", "fields": { - "incremental": { - "name": "incremental", - "type": "any", - "description": "(Optional) Marks the report as incremental.\nThis field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now\ncontrolled by the `parameters` field.", - "required": false, - "deprecated": true - }, - "parameters": { - "name": "parameters", - "type": "any", - "description": "Parameters for the Workday report. Each key represents the parameter name (e.g., \"start_date\", \"end_date\"),\nand the corresponding value is a SQL-like expression used to compute the parameter value at runtime.\nExample:\n{\n\"start_date\": \"{ coalesce(current_offset(), date(\\\"2025-02-01\\\")) }\",\n\"end_date\": \"{ current_date() - INTERVAL 1 DAY }\"\n}", + "tag_assignment": { + "name": "tag_assignment", + "type": "EntityTagAssignment", + "description": "", "required": false - }, - "report_parameters": { - "name": "report_parameters", - "type": "any", - "description": "(Optional) Additional custom parameters for Workday Report\nThis field is deprecated and should not be used. Use `parameters` instead.", - "required": false, - "deprecated": true } } }, - "pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue": { - "name": "IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", - "package": "pipelines", - "description": "ingestion pipeline definition workday report parameters query key value configuration.", + "catalog.CreateExternalLineageRelationshipRequest": { + "name": "CreateExternalLineageRelationshipRequest", + "package": "catalog", + "description": "", "fields": { - "key": { - "name": "key", - "type": "any", - "description": "Key for the report parameter, can be a column name or other metadata", - "required": false - }, - "value": { - "name": "value", - "type": "any", - "description": "Value for the report parameter.\nPossible values it can take are these sql functions:\n1. coalesce(current_offset(), date(\"YYYY-MM-DD\")) -\u003e if current_offset() is null, then the passed date, else current_offset()\n2. current_date()\n3. date_sub(current_date(), x) -\u003e subtract x (some non-negative integer) days from current date", + "external_lineage_relationship": { + "name": "external_lineage_relationship", + "type": "CreateRequestExternalLineage", + "description": "", "required": false } } }, - "pipelines.IngestionSourceType": { - "name": "IngestionSourceType", - "package": "pipelines", - "description": "ingestion source type configuration.", - "fields": {} - }, - "pipelines.ManualTrigger": { - "name": "ManualTrigger", - "package": "pipelines", - "description": "manual trigger configuration.", - "fields": {} - }, - "pipelines.NotebookLibrary": { - "name": "NotebookLibrary", - "package": "pipelines", - "description": "notebook library configuration.", + "catalog.CreateExternalLocation": { + "name": "CreateExternalLocation", + "package": "catalog", + "description": "", "fields": { - "path": { - "name": "path", - "type": "any", - "description": "The absolute path of the source code.", + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false - } - } - }, - "pipelines.Notifications": { - "name": "Notifications", - "package": "pipelines", - "description": "notifications configuration.", - "fields": { - "alerts": { - "name": "alerts", - "type": "any", - "description": "A list of alerts that trigger the sending of notifications to the configured\ndestinations. The supported alerts are:\n\n* `on-update-success`: A pipeline update completes successfully.\n* `on-update-failure`: Each time a pipeline update fails.\n* `on-update-fatal-failure`: A pipeline update fails with a non-retryable (fatal) error.\n* `on-flow-failure`: A single data flow fails.", + }, + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "Name of the storage credential used with this location.", "required": false }, - "email_recipients": { - "name": "email_recipients", - "type": "any", - "description": "A list of email addresses notified when a configured alert is triggered.", + "enable_file_events": { + "name": "enable_file_events", + "type": "bool", + "description": "Whether to enable file events on this external location.", + "required": false + }, + "encryption_details": { + "name": "encryption_details", + "type": "*EncryptionDetails", + "description": "", + "required": false + }, + "fallback": { + "name": "fallback", + "type": "bool", + "description": "Indicates whether fallback mode is enabled for this external location.\nWhen fallback mode is enabled, the access to the location falls back to\ncluster credentials if UC credentials are not sufficient.", + "required": false + }, + "file_event_queue": { + "name": "file_event_queue", + "type": "*FileEventQueue", + "description": "File event queue settings. If `enable_file_events` is `true`, must be\ndefined and have exactly one of the documented properties.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the external location.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Indicates whether the external location is read-only.", + "required": false + }, + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Skips validation of the storage credential associated with the external\nlocation.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "Path URL of the external location.", "required": false } } }, - "pipelines.PathPattern": { - "name": "PathPattern", - "package": "pipelines", - "description": "path pattern configuration.", + "catalog.CreateExternalMetadataRequest": { + "name": "CreateExternalMetadataRequest", + "package": "catalog", + "description": "", "fields": { - "include": { - "name": "include", - "type": "any", - "description": "The source code to include for pipelines", + "external_metadata": { + "name": "external_metadata", + "type": "ExternalMetadata", + "description": "", "required": false } } }, - "pipelines.PipelineCluster": { - "name": "PipelineCluster", - "package": "pipelines", - "description": "pipeline cluster configuration.", + "catalog.CreateFunction": { + "name": "CreateFunction", + "package": "catalog", + "description": "", "fields": { - "apply_policy_default_values": { - "name": "apply_policy_default_values", - "type": "any", - "description": "Note: This field won't be persisted. Only API users will check this field.", + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent Catalog.", "required": false }, - "autoscale": { - "name": "autoscale", - "type": "any", - "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false }, - "aws_attributes": { - "name": "aws_attributes", - "type": "any", - "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "data_type": { + "name": "data_type", + "type": "ColumnTypeName", + "description": "Scalar function return data type.", "required": false }, - "azure_attributes": { - "name": "azure_attributes", - "type": "any", - "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "external_language": { + "name": "external_language", + "type": "string", + "description": "External function language.", "required": false }, - "cluster_log_conf": { - "name": "cluster_log_conf", - "type": "any", - "description": "The configuration for delivering spark logs to a long-term storage destination.\nOnly dbfs destinations are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "external_name": { + "name": "external_name", + "type": "string", + "description": "External function name.", "required": false }, - "custom_tags": { - "name": "custom_tags", - "type": "map[string]string", - "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "full_data_type": { + "name": "full_data_type", + "type": "string", + "description": "Pretty printed function data type.", "required": false }, - "driver_instance_pool_id": { - "name": "driver_instance_pool_id", - "type": "string", - "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned.", + "input_params": { + "name": "input_params", + "type": "FunctionParameterInfos", + "description": "Function input parameters.", "required": false }, - "driver_node_type_id": { - "name": "driver_node_type_id", - "type": "string", - "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above.", + "is_deterministic": { + "name": "is_deterministic", + "type": "bool", + "description": "Whether the function is deterministic.", "required": false }, - "enable_local_disk_encryption": { - "name": "enable_local_disk_encryption", + "is_null_call": { + "name": "is_null_call", "type": "bool", - "description": "Whether to enable local disk encryption for the cluster.", + "description": "Function null call.", "required": false }, - "gcp_attributes": { - "name": "gcp_attributes", - "type": "any", - "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "name": { + "name": "name", + "type": "string", + "description": "Name of function, relative to parent schema.", "required": false }, - "init_scripts": { - "name": "init_scripts", - "type": "any", - "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "parameter_style": { + "name": "parameter_style", + "type": "CreateFunctionParameterStyle", + "description": "Function parameter style. **S** is the value for SQL.", "required": false }, - "instance_pool_id": { - "name": "instance_pool_id", + "properties": { + "name": "properties", "type": "string", - "description": "The optional ID of the instance pool to which the cluster belongs.", + "description": "JSON-serialized key-value pair map, encoded (escaped) as a string.", "required": false }, - "label": { - "name": "label", - "type": "any", - "description": "A label for the cluster specification, either `default` to configure the default cluster, or `maintenance` to configure the maintenance cluster. This field is optional. The default value is `default`.", + "return_params": { + "name": "return_params", + "type": "*FunctionParameterInfos", + "description": "Table function return parameters.", "required": false }, - "node_type_id": { - "name": "node_type_id", + "routine_body": { + "name": "routine_body", + "type": "CreateFunctionRoutineBody", + "description": "Function language. When **EXTERNAL** is used, the language of the routine\nfunction should be specified in the **external_language** field, and the\n**return_params** of the function cannot be used (as **TABLE** return\ntype is not supported), and the **sql_data_access** field must be\n**NO_SQL**.", + "required": false + }, + "routine_definition": { + "name": "routine_definition", "type": "string", - "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", + "description": "Function body.", "required": false }, - "num_workers": { - "name": "num_workers", - "type": "any", - "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned.", + "routine_dependencies": { + "name": "routine_dependencies", + "type": "*DependencyList", + "description": "function dependencies.", "required": false }, - "policy_id": { - "name": "policy_id", + "schema_name": { + "name": "schema_name", "type": "string", - "description": "The ID of the cluster policy used to create the cluster if applicable.", + "description": "Name of parent Schema relative to its parent Catalog.", "required": false }, - "spark_conf": { - "name": "spark_conf", - "type": "any", - "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.", + "security_type": { + "name": "security_type", + "type": "CreateFunctionSecurityType", + "description": "Function security type.", "required": false }, - "spark_env_vars": { - "name": "spark_env_vars", - "type": "any", - "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "specific_name": { + "name": "specific_name", + "type": "string", + "description": "Specific name of the function; Reserved for future use.", "required": false }, - "ssh_public_keys": { - "name": "ssh_public_keys", - "type": "any", - "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "sql_data_access": { + "name": "sql_data_access", + "type": "CreateFunctionSqlDataAccess", + "description": "Function SQL data access.", + "required": false + }, + "sql_path": { + "name": "sql_path", + "type": "string", + "description": "List of schemes whose objects can be referenced without qualification.", "required": false } } }, - "pipelines.PipelineClusterAutoscale": { - "name": "PipelineClusterAutoscale", - "package": "pipelines", - "description": "pipeline cluster autoscale configuration.", + "catalog.CreateFunctionRequest": { + "name": "CreateFunctionRequest", + "package": "catalog", + "description": "", "fields": { - "max_workers": { - "name": "max_workers", - "type": "any", - "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`.", + "function_info": { + "name": "function_info", + "type": "CreateFunction", + "description": "Partial __FunctionInfo__ specifying the function to be created.", + "required": false + } + } + }, + "catalog.CreateMetastore": { + "name": "CreateMetastore", + "package": "catalog", + "description": "", + "fields": { + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", "required": false }, - "min_workers": { - "name": "min_workers", - "type": "any", - "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation.", + "name": { + "name": "name", + "type": "string", + "description": "The user-specified name of the metastore.", "required": false }, - "mode": { - "name": "mode", - "type": "any", - "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.", + "region": { + "name": "region", + "type": "string", + "description": "Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "The storage root URL for metastore", "required": false } } }, - "pipelines.PipelineClusterAutoscaleMode": { - "name": "PipelineClusterAutoscaleMode", - "package": "pipelines", - "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.", - "fields": {} - }, - "pipelines.PipelineDeployment": { - "name": "PipelineDeployment", - "package": "pipelines", - "description": "pipeline deployment configuration.", + "catalog.CreateMetastoreAssignment": { + "name": "CreateMetastoreAssignment", + "package": "catalog", + "description": "", "fields": { - "kind": { - "name": "kind", - "type": "any", - "description": "The deployment method that manages the pipeline.", + "default_catalog_name": { + "name": "default_catalog_name", + "type": "string", + "description": "The name of the default catalog in the metastore. This field is\ndeprecated. Please use \"Default Namespace API\" to configure the default\ncatalog for a Databricks workspace.", "required": false }, - "metadata_file_path": { - "name": "metadata_file_path", + "metastore_id": { + "name": "metastore_id", "type": "string", - "description": "The path to the file containing metadata about the deployment.", + "description": "The unique ID of the metastore.", "required": false } } }, - "pipelines.PipelineLibrary": { - "name": "PipelineLibrary", - "package": "pipelines", - "description": "pipeline library configuration.", + "catalog.CreateMonitor": { + "name": "CreateMonitor", + "package": "catalog", + "description": "", "fields": { - "file": { - "name": "file", - "type": "any", - "description": "The path to a file that defines a pipeline and is stored in the Databricks Repos.", + "assets_dir": { + "name": "assets_dir", + "type": "string", + "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a\ncustom directory to store data-monitoring assets. Normally prepopulated\nto a default user location via UI and Python APIs.", "required": false }, - "glob": { - "name": "glob", - "type": "any", - "description": "The unified field to include source codes.\nEach entry can be a notebook path, a file path, or a folder path that ends `/**`.\nThis field cannot be used together with `notebook` or `file`.", + "baseline_table_name": { + "name": "baseline_table_name", + "type": "string", + "description": "[Create:OPT Update:OPT] Baseline table name. Baseline data is used to\ncompute drift from the data in the monitored `table_name`. The baseline\ntable and the monitored table shall have the same schema.", "required": false }, - "jar": { - "name": "jar", - "type": "any", - "description": "URI of the jar to be installed. Currently only DBFS is supported.", + "custom_metrics": { + "name": "custom_metrics", + "type": "[]MonitorMetric", + "description": "[Create:OPT Update:OPT] Custom metrics.", "required": false }, - "maven": { - "name": "maven", - "type": "any", - "description": "Specification of a maven library to be installed.", + "data_classification_config": { + "name": "data_classification_config", + "type": "*MonitorDataClassificationConfig", + "description": "[Create:OPT Update:OPT] Data classification related config.", "required": false }, - "notebook": { - "name": "notebook", - "type": "any", - "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.", + "inference_log": { + "name": "inference_log", + "type": "*MonitorInferenceLog", + "description": "", "required": false }, - "whl": { - "name": "whl", - "type": "any", - "description": "URI of the whl to be installed.", - "required": false, - "deprecated": true - } - } - }, - "pipelines.PipelineTrigger": { - "name": "PipelineTrigger", - "package": "pipelines", - "description": "pipeline trigger configuration.", - "fields": { - "cron": { - "name": "cron", - "type": "any", - "description": "", + "latest_monitor_failure_msg": { + "name": "latest_monitor_failure_msg", + "type": "string", + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", "required": false }, - "manual": { - "name": "manual", - "type": "any", - "description": "", + "notifications": { + "name": "notifications", + "type": "*MonitorNotifications", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", + "required": false + }, + "output_schema_name": { + "name": "output_schema_name", + "type": "string", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to\nbe in 2-level format {catalog}.{schema}", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*MonitorCronSchedule", + "description": "[Create:OPT Update:OPT] The monitor schedule.", + "required": false + }, + "skip_builtin_dashboard": { + "name": "skip_builtin_dashboard", + "type": "bool", + "description": "Whether to skip creating a default dashboard summarizing data quality\nmetrics.", + "required": false + }, + "slicing_exprs": { + "name": "slicing_exprs", + "type": "[]string", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for\ntargeted analysis. The data is grouped by each expression independently,\nresulting in a separate slice for each predicate and its complements. For\nexample `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the\nfollowing slices: two slices for `col_2 \u003e 10` (True and False), and one\nslice per unique value in `col1`. For high-cardinality columns, only the\ntop 100 unique values by frequency will generate slices.", + "required": false + }, + "snapshot": { + "name": "snapshot", + "type": "*MonitorSnapshot", + "description": "Configuration for monitoring snapshot tables.", + "required": false + }, + "time_series": { + "name": "time_series", + "type": "*MonitorTimeSeries", + "description": "Configuration for monitoring time series tables.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional argument to specify the warehouse for dashboard creation. If not\nspecified, the first running warehouse will be used.", "required": false } } }, - "pipelines.PipelinesEnvironment": { - "name": "PipelinesEnvironment", - "package": "pipelines", - "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "catalog.CreateOnlineTableRequest": { + "name": "CreateOnlineTableRequest", + "package": "catalog", + "description": "", "fields": { - "dependencies": { - "name": "dependencies", - "type": "any", - "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e", + "table": { + "name": "table", + "type": "OnlineTable", + "description": "Specification of the online table to be created.", "required": false } } }, - "pipelines.PostgresCatalogConfig": { - "name": "PostgresCatalogConfig", - "package": "pipelines", - "description": "PG-specific catalog-level configuration parameters", + "catalog.CreatePolicyRequest": { + "name": "CreatePolicyRequest", + "package": "catalog", + "description": "", "fields": { - "slot_config": { - "name": "slot_config", - "type": "any", - "description": "Optional. The Postgres slot configuration to use for logical replication", + "policy_info": { + "name": "policy_info", + "type": "PolicyInfo", + "description": "Required. The policy to create.", "required": false } } }, - "pipelines.PostgresSlotConfig": { - "name": "PostgresSlotConfig", - "package": "pipelines", - "description": "PostgresSlotConfig contains the configuration for a Postgres logical replication slot", + "catalog.CreateRegisteredModelRequest": { + "name": "CreateRegisteredModelRequest", + "package": "catalog", + "description": "", "fields": { - "publication_name": { - "name": "publication_name", - "type": "string", - "description": "The name of the publication to use for the Postgres source", + "aliases": { + "name": "aliases", + "type": "[]RegisteredModelAlias", + "description": "List of aliases associated with the registered model", "required": false }, - "slot_name": { - "name": "slot_name", + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", "type": "string", - "description": "The name of the logical replication slot to use for the Postgres source", + "description": "The name of the catalog where the schema and the registered model reside", "required": false - } - } - }, - "pipelines.ReportSpec": { - "name": "ReportSpec", - "package": "pipelines", - "description": "Specification for report.", - "fields": { - "destination_catalog": { - "name": "destination_catalog", - "type": "any", - "description": "Required. Destination catalog to store table.", + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the registered model", "required": false }, - "destination_schema": { - "name": "destination_schema", - "type": "any", - "description": "Required. Destination schema to store table.", + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Creation timestamp of the registered model in milliseconds since the Unix\nepoch", "required": false }, - "destination_table": { - "name": "destination_table", - "type": "any", - "description": "Required. Destination table name. The pipeline fails if a table with that name already exists.", + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the registered model", "required": false }, - "source_url": { - "name": "source_url", + "full_name": { + "name": "full_name", "type": "string", - "description": "Required. Report URL in the source system.", + "description": "The three-level (fully qualified) name of the registered model", "required": false }, - "table_configuration": { - "name": "table_configuration", - "type": "any", - "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object.", + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore", "required": false - } - } - }, - "pipelines.RestartWindow": { - "name": "RestartWindow", - "package": "pipelines", - "description": "restart window configuration.", - "fields": { - "days_of_week": { - "name": "days_of_week", - "type": "any", - "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the registered model", "required": false }, - "start_hour": { - "name": "start_hour", - "type": "any", - "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", + "owner": { + "name": "owner", + "type": "string", + "description": "The identifier of the user who owns the registered model", "required": false }, - "time_zone_id": { - "name": "time_zone_id", + "schema_name": { + "name": "schema_name", "type": "string", - "description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.", + "description": "The name of the schema where the registered model resides", "required": false - } - } - }, - "pipelines.RunAs": { - "name": "RunAs", - "package": "pipelines", - "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", - "fields": { - "service_principal_name": { - "name": "service_principal_name", + }, + "storage_location": { + "name": "storage_location", "type": "string", - "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", + "description": "The storage location on the cloud under which model version data files\nare stored", "required": false }, - "user_name": { - "name": "user_name", + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Last-update timestamp of the registered model in milliseconds since the\nUnix epoch", + "required": false + }, + "updated_by": { + "name": "updated_by", "type": "string", - "description": "The email of an active workspace user. Users can only set this field to their own email.", + "description": "The identifier of the user who updated the registered model last time", "required": false } } }, - "pipelines.SchemaSpec": { - "name": "SchemaSpec", - "package": "pipelines", - "description": "Specification for schema.", + "catalog.CreateRequestExternalLineage": { + "name": "CreateRequestExternalLineage", + "package": "catalog", + "description": "", "fields": { - "destination_catalog": { - "name": "destination_catalog", - "type": "any", - "description": "Required. Destination catalog to store tables.", + "columns": { + "name": "columns", + "type": "[]ColumnRelationship", + "description": "List of column relationships between source and target objects.", "required": false }, - "destination_schema": { - "name": "destination_schema", - "type": "any", - "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists.", + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external lineage relationship.", "required": false }, - "source_catalog": { - "name": "source_catalog", - "type": "any", - "description": "The source catalog name. Might be optional depending on the type of source.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "Key-value properties associated with the external lineage relationship.", "required": false }, - "source_schema": { - "name": "source_schema", - "type": "any", - "description": "Required. Schema name in the source database.", + "source": { + "name": "source", + "type": "ExternalLineageObject", + "description": "Source object of the external lineage relationship.", "required": false }, - "table_configuration": { - "name": "table_configuration", - "type": "any", - "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.", + "target": { + "name": "target", + "type": "ExternalLineageObject", + "description": "Target object of the external lineage relationship.", "required": false } } }, - "pipelines.SourceCatalogConfig": { - "name": "SourceCatalogConfig", - "package": "pipelines", - "description": "SourceCatalogConfig contains catalog-level custom configuration parameters for each source", + "catalog.CreateSchema": { + "name": "CreateSchema", + "package": "catalog", + "description": "", "fields": { - "postgres": { - "name": "postgres", - "type": "any", - "description": "Postgres-specific catalog-level configuration parameters", - "required": false - }, - "source_catalog": { - "name": "source_catalog", - "type": "any", - "description": "Source catalog name", - "required": false - } - } - }, - "pipelines.SourceConfig": { - "name": "SourceConfig", - "package": "pipelines", - "description": "source config configuration.", - "fields": { - "catalog": { - "name": "catalog", - "type": "any", - "description": "Catalog-level source configuration parameters", - "required": false - } - } - }, - "pipelines.TableSpec": { - "name": "TableSpec", - "package": "pipelines", - "description": "Specification for table.", - "fields": { - "destination_catalog": { - "name": "destination_catalog", - "type": "any", - "description": "Required. Destination catalog to store table.", - "required": false - }, - "destination_schema": { - "name": "destination_schema", - "type": "any", - "description": "Required. Destination schema to store table.", - "required": false - }, - "destination_table": { - "name": "destination_table", - "type": "any", - "description": "Optional. Destination table name. The pipeline fails if a table with that name already exists. If not set, the source table name is used.", + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent catalog.", "required": false }, - "source_catalog": { - "name": "source_catalog", - "type": "any", - "description": "Source catalog name. Might be optional depending on the type of source.", + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false }, - "source_schema": { - "name": "source_schema", - "type": "any", - "description": "Schema name in the source database. Might be optional depending on the type of source.", + "name": { + "name": "name", + "type": "string", + "description": "Name of schema, relative to parent catalog.", "required": false }, - "source_table": { - "name": "source_table", - "type": "any", - "description": "Required. Table name in the source database.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "table_configuration": { - "name": "table_configuration", - "type": "any", - "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.", + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "Storage root URL for managed tables within schema.", "required": false } } }, - "pipelines.TableSpecificConfig": { - "name": "TableSpecificConfig", - "package": "pipelines", - "description": "table specific config configuration.", + "catalog.CreateStorageCredential": { + "name": "CreateStorageCredential", + "package": "catalog", + "description": "", "fields": { - "exclude_columns": { - "name": "exclude_columns", - "type": "any", - "description": "A list of column names to be excluded for the ingestion.\nWhen not specified, include_columns fully controls what columns to be ingested.\nWhen specified, all other columns including future ones will be automatically included for ingestion.\nThis field in mutually exclusive with `include_columns`.", + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleRequest", + "description": "The AWS IAM role configuration.", "required": false }, - "include_columns": { - "name": "include_columns", - "type": "any", - "description": "A list of column names to be included for the ingestion.\nWhen not specified, all columns except ones in exclude_columns will be included. Future\ncolumns will be automatically included.\nWhen specified, all other future columns will be automatically excluded from ingestion.\nThis field in mutually exclusive with `exclude_columns`.", + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityRequest", + "description": "The Azure managed identity configuration.", "required": false }, - "primary_keys": { - "name": "primary_keys", - "type": "any", - "description": "The primary key of the table used to apply changes.", + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", "required": false }, - "query_based_connector_config": { - "name": "query_based_connector_config", - "type": "any", - "description": "Configurations that are only applicable for query-based ingestion connectors.", + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", "required": false }, - "row_filter": { - "name": "row_filter", - "type": "any", - "description": "(Optional, Immutable) The row filter condition to be applied to the table.\nIt must not contain the WHERE keyword, only the actual filter condition.\nIt must be in DBSQL format.", + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", "required": false }, - "salesforce_include_formula_fields": { - "name": "salesforce_include_formula_fields", - "type": "any", - "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector", + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountRequest", + "description": "The Databricks managed GCP service account configuration.", "required": false }, - "scd_type": { - "name": "scd_type", - "type": "any", - "description": "The SCD type to use to ingest the table.", + "name": { + "name": "name", + "type": "string", + "description": "The credential name. The name must be unique among storage and service\ncredentials within the metastore.", "required": false }, - "sequence_by": { - "name": "sequence_by", - "type": "any", - "description": "The column names specifying the logical order of events in the source data. Spark Declarative Pipelines uses this sequencing to handle change events that arrive out of order.", + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", "required": false }, - "workday_report_parameters": { - "name": "workday_report_parameters", - "type": "any", - "description": "(Optional) Additional custom parameters for Workday Report", + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Supplying true to this argument skips validation of the created\ncredential.", "required": false } } }, - "pipelines.TableSpecificConfigScdType": { - "name": "TableSpecificConfigScdType", - "package": "pipelines", - "description": "The SCD type to use to ingest the table.", - "fields": {} - }, - "serving.Ai21LabsConfig": { - "name": "Ai21LabsConfig", - "package": "serving", - "description": "ai21 labs config configuration.", + "catalog.CreateTableConstraint": { + "name": "CreateTableConstraint", + "package": "catalog", + "description": "", "fields": { - "ai21labs_api_key": { - "name": "ai21labs_api_key", - "type": "any", - "description": "The Databricks secret key reference for an AI21 Labs API key. If you\nprefer to paste your API key directly, see `ai21labs_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "constraint": { + "name": "constraint", + "type": "TableConstraint", + "description": "", "required": false }, - "ai21labs_api_key_plaintext": { - "name": "ai21labs_api_key_plaintext", - "type": "any", - "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `ai21labs_api_key`. You\nmust provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "full_name_arg": { + "name": "full_name_arg", + "type": "string", + "description": "The full name of the table referenced by the constraint.", "required": false } } }, - "serving.AiGatewayConfig": { - "name": "AiGatewayConfig", - "package": "serving", - "description": "ai gateway config configuration.", + "catalog.CreateTableRequest": { + "name": "CreateTableRequest", + "package": "catalog", + "description": "", "fields": { - "fallback_config": { - "name": "fallback_config", - "type": "any", - "description": "Configuration for traffic fallback which auto fallbacks to other served entities if the request to a served\nentity fails with certain error codes, to increase availability.", - "required": false - }, - "guardrails": { - "name": "guardrails", - "type": "any", - "description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses.", + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent catalog.", "required": false }, - "inference_table_config": { - "name": "inference_table_config", - "type": "any", - "description": "Configuration for payload logging using inference tables.\nUse these tables to monitor and audit data being sent to and received from model APIs and to improve model quality.", + "columns": { + "name": "columns", + "type": "[]ColumnInfo", + "description": "The array of __ColumnInfo__ definitions of the table's columns.", "required": false }, - "rate_limits": { - "name": "rate_limits", - "type": "any", - "description": "Configuration for rate limits which can be set to limit endpoint traffic.", + "data_source_format": { + "name": "data_source_format", + "type": "DataSourceFormat", + "description": "", "required": false }, - "usage_tracking_config": { - "name": "usage_tracking_config", - "type": "any", - "description": "Configuration to enable usage tracking using system tables.\nThese tables allow you to monitor operational usage on endpoints and their associated costs.", + "name": { + "name": "name", + "type": "string", + "description": "Name of table, relative to parent schema.", "required": false - } - } - }, - "serving.AiGatewayGuardrailParameters": { - "name": "AiGatewayGuardrailParameters", - "package": "serving", - "description": "ai gateway guardrail parameters configuration.", - "fields": { - "invalid_keywords": { - "name": "invalid_keywords", - "type": "any", - "description": "List of invalid keywords.\nAI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content.", - "required": false, - "deprecated": true }, - "pii": { - "name": "pii", - "type": "any", - "description": "Configuration for guardrail PII filter.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false }, - "safety": { - "name": "safety", - "type": "any", - "description": "Indicates whether the safety filter is enabled.", + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "Name of parent schema relative to its parent catalog.", "required": false }, - "valid_topics": { - "name": "valid_topics", - "type": "any", - "description": "The list of allowed topics.\nGiven a chat request, this guardrail flags the request if its topic is not in the allowed topics.", - "required": false, - "deprecated": true - } - } - }, - "serving.AiGatewayGuardrailPiiBehavior": { - "name": "AiGatewayGuardrailPiiBehavior", - "package": "serving", - "description": "ai gateway guardrail pii behavior configuration.", - "fields": { - "behavior": { - "name": "behavior", - "type": "any", - "description": "Configuration for input guardrail filters.", - "required": false - } - } - }, - "serving.AiGatewayGuardrailPiiBehaviorBehavior": { - "name": "AiGatewayGuardrailPiiBehaviorBehavior", - "package": "serving", - "description": "ai gateway guardrail pii behavior behavior configuration.", - "fields": {} - }, - "serving.AiGatewayGuardrails": { - "name": "AiGatewayGuardrails", - "package": "serving", - "description": "ai gateway guardrails configuration.", - "fields": { - "input": { - "name": "input", - "type": "any", - "description": "Configuration for input guardrail filters.", + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "Storage root URL for table (for **MANAGED**, **EXTERNAL** tables).", "required": false }, - "output": { - "name": "output", - "type": "any", - "description": "Configuration for output guardrail filters.", + "table_type": { + "name": "table_type", + "type": "TableType", + "description": "", "required": false } } }, - "serving.AiGatewayInferenceTableConfig": { - "name": "AiGatewayInferenceTableConfig", - "package": "serving", - "description": "ai gateway inference table config configuration.", + "catalog.CreateVolumeRequestContent": { + "name": "CreateVolumeRequestContent", + "package": "catalog", + "description": "", "fields": { "catalog_name": { "name": "catalog_name", "type": "string", - "description": "The name of the catalog in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the catalog name.", + "description": "The name of the catalog where the schema and the volume are", "required": false }, - "enabled": { - "name": "enabled", - "type": "bool", - "description": "Indicates whether the inference table is enabled.", + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the volume", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the volume", "required": false }, "schema_name": { "name": "schema_name", "type": "string", - "description": "The name of the schema in Unity Catalog. Required when enabling inference tables.\nNOTE: On update, you have to disable inference table first in order to change the schema name.", + "description": "The name of the schema where the volume is", "required": false }, - "table_name_prefix": { - "name": "table_name_prefix", - "type": "any", - "description": "The prefix of the table in Unity Catalog.\nNOTE: On update, you have to disable inference table first in order to change the prefix name.", - "required": false - } - } - }, - "serving.AiGatewayRateLimit": { - "name": "AiGatewayRateLimit", - "package": "serving", - "description": "ai gateway rate limit configuration.", - "fields": { - "calls": { - "name": "calls", - "type": "any", - "description": "Used to specify how many calls are allowed for a key within the renewal_period.", - "required": false - }, - "key": { - "name": "key", - "type": "any", - "description": "Key field for a rate limit. Currently, 'user', 'user_group, 'service_principal', and 'endpoint' are supported,\nwith 'endpoint' being the default if not specified.", - "required": false - }, - "principal": { - "name": "principal", - "type": "any", - "description": "Principal field for a user, user group, or service principal to apply rate limiting to. Accepts a user email, group name, or service principal application ID.", - "required": false - }, - "renewal_period": { - "name": "renewal_period", - "type": "any", - "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud", "required": false }, - "tokens": { - "name": "tokens", - "type": "any", - "description": "Used to specify how many tokens are allowed for a key within the renewal_period.", + "volume_type": { + "name": "volume_type", + "type": "VolumeType", + "description": "The type of the volume. An external volume is located in the specified\nexternal location. A managed volume is located in the default location\nwhich is specified by the parent schema, or the parent catalog, or the\nMetastore. [Learn more]\n\n[Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external", "required": false } } }, - "serving.AiGatewayRateLimitKey": { - "name": "AiGatewayRateLimitKey", - "package": "serving", - "description": "ai gateway rate limit key configuration.", - "fields": {} - }, - "serving.AiGatewayRateLimitRenewalPeriod": { - "name": "AiGatewayRateLimitRenewalPeriod", - "package": "serving", - "description": "ai gateway rate limit renewal period configuration.", - "fields": {} - }, - "serving.AiGatewayUsageTrackingConfig": { - "name": "AiGatewayUsageTrackingConfig", - "package": "serving", - "description": "ai gateway usage tracking config configuration.", + "catalog.CredentialDependency": { + "name": "CredentialDependency", + "package": "catalog", + "description": "A credential that is dependent on a SQL object.", "fields": { - "enabled": { - "name": "enabled", - "type": "bool", - "description": "Whether to enable usage tracking.", + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "Full name of the dependent credential, in the form of\n__credential_name__.", "required": false } } }, - "serving.AmazonBedrockConfig": { - "name": "AmazonBedrockConfig", - "package": "serving", - "description": "amazon bedrock config configuration.", + "catalog.CredentialInfo": { + "name": "CredentialInfo", + "package": "catalog", + "description": "", "fields": { - "aws_access_key_id": { - "name": "aws_access_key_id", - "type": "string", - "description": "The Databricks secret key reference for an AWS access key ID with\npermissions to interact with Bedrock services. If you prefer to paste\nyour API key directly, see `aws_access_key_id_plaintext`. You must provide an API\nkey using one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.", + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRole", + "description": "The AWS IAM role configuration.", "required": false }, - "aws_access_key_id_plaintext": { - "name": "aws_access_key_id_plaintext", - "type": "any", - "description": "An AWS access key ID with permissions to interact with Bedrock services\nprovided as a plaintext string. If you prefer to reference your key using\nDatabricks Secrets, see `aws_access_key_id`. You must provide an API key\nusing one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.", + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentity", + "description": "The Azure managed identity configuration.", "required": false }, - "aws_region": { - "name": "aws_region", - "type": "any", - "description": "The AWS region to use. Bedrock has to be enabled there.", + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", "required": false }, - "aws_secret_access_key": { - "name": "aws_secret_access_key", - "type": "any", - "description": "The Databricks secret key reference for an AWS secret access key paired\nwith the access key ID, with permissions to interact with Bedrock\nservices. If you prefer to paste your API key directly, see\n`aws_secret_access_key_plaintext`. You must provide an API key using one\nof the following fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", "required": false }, - "aws_secret_access_key_plaintext": { - "name": "aws_secret_access_key_plaintext", - "type": "any", - "description": "An AWS secret access key paired with the access key ID, with permissions\nto interact with Bedrock services provided as a plaintext string. If you\nprefer to reference your key using Databricks Secrets, see\n`aws_secret_access_key`. You must provide an API key using one of the\nfollowing fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this credential was created, in epoch milliseconds.", "required": false }, - "bedrock_provider": { - "name": "bedrock_provider", - "type": "any", - "description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of credential creator.", "required": false }, - "instance_profile_arn": { - "name": "instance_profile_arn", - "type": "any", - "description": "ARN of the instance profile that the external model will use to access AWS resources.\nYou must authenticate using an instance profile or access keys.\nIf you prefer to authenticate using access keys, see `aws_access_key_id`,\n`aws_access_key_id_plaintext`, `aws_secret_access_key` and `aws_secret_access_key_plaintext`.", + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccount", + "description": "The Databricks managed GCP service account configuration.", "required": false - } - } - }, - "serving.AmazonBedrockConfigBedrockProvider": { - "name": "AmazonBedrockConfigBedrockProvider", - "package": "serving", - "description": "amazon bedrock config bedrock provider configuration.", - "fields": {} - }, - "serving.AnthropicConfig": { - "name": "AnthropicConfig", - "package": "serving", - "description": "anthropic config configuration.", - "fields": { - "anthropic_api_key": { - "name": "anthropic_api_key", - "type": "any", - "description": "The Databricks secret key reference for an Anthropic API key. If you\nprefer to paste your API key directly, see `anthropic_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full name of the credential.", "required": false }, - "anthropic_api_key_plaintext": { - "name": "anthropic_api_key_plaintext", - "type": "any", - "description": "The Anthropic API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `anthropic_api_key`. You\nmust provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the credential.", "required": false - } - } - }, - "serving.ApiKeyAuth": { - "name": "ApiKeyAuth", - "package": "serving", - "description": "api key auth configuration.", - "fields": { - "key": { - "name": "key", - "type": "any", - "description": "The name of the API key parameter used for authentication.", + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", "required": false }, - "value": { - "name": "value", - "type": "any", - "description": "The Databricks secret key reference for an API Key.\nIf you prefer to paste your token directly, see `value_plaintext`.", + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of the parent metastore.", "required": false }, - "value_plaintext": { - "name": "value_plaintext", - "type": "any", - "description": "The API Key provided as a plaintext string. If you prefer to reference your\ntoken using Databricks Secrets, see `value`.", + "name": { + "name": "name", + "type": "string", + "description": "The credential name. The name must be unique among storage and service\ncredentials within the metastore.", "required": false - } - } - }, - "serving.AutoCaptureConfigInput": { - "name": "AutoCaptureConfigInput", - "package": "serving", - "description": "auto capture config input configuration.", - "fields": { - "catalog_name": { - "name": "catalog_name", + }, + "owner": { + "name": "owner", "type": "string", - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled.", + "description": "Username of current owner of credential.", "required": false }, - "enabled": { - "name": "enabled", + "purpose": { + "name": "purpose", + "type": "CredentialPurpose", + "description": "Indicates the purpose of the credential.", + "required": false + }, + "read_only": { + "name": "read_only", "type": "bool", - "description": "Indicates whether the inference table is enabled.", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", "required": false }, - "schema_name": { - "name": "schema_name", + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this credential was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", "type": "string", - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled.", + "description": "Username of user who last modified the credential.", "required": false }, - "table_name_prefix": { - "name": "table_name_prefix", - "type": "any", - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled.", + "used_for_managed_storage": { + "name": "used_for_managed_storage", + "type": "bool", + "description": "Whether this credential is the current metastore's root storage\ncredential. Only applicable when purpose is **STORAGE**.", "required": false } } }, - "serving.BearerTokenAuth": { - "name": "BearerTokenAuth", - "package": "serving", - "description": "bearer token auth configuration.", + "catalog.CredentialValidationResult": { + "name": "CredentialValidationResult", + "package": "catalog", + "description": "", "fields": { - "token": { - "name": "token", - "type": "any", - "description": "The Databricks secret key reference for a token.\nIf you prefer to paste your token directly, see `token_plaintext`.", + "message": { + "name": "message", + "type": "string", + "description": "Error message would exist when the result does not equal to **PASS**.", "required": false }, - "token_plaintext": { - "name": "token_plaintext", - "type": "any", - "description": "The token provided as a plaintext string. If you prefer to reference your\ntoken using Databricks Secrets, see `token`.", + "result": { + "name": "result", + "type": "ValidateCredentialResult", + "description": "The results of the tested operation.", "required": false } } }, - "serving.CohereConfig": { - "name": "CohereConfig", - "package": "serving", - "description": "cohere config configuration.", + "catalog.DatabricksGcpServiceAccount": { + "name": "DatabricksGcpServiceAccount", + "package": "catalog", + "description": "GCP long-lived credential. Databricks-created Google Cloud Storage service\naccount.", "fields": { - "cohere_api_base": { - "name": "cohere_api_base", - "type": "any", - "description": "This is an optional field to provide a customized base URL for the Cohere\nAPI. If left unspecified, the standard Cohere base URL is used.", + "credential_id": { + "name": "credential_id", + "type": "string", + "description": "The Databricks internal ID that represents this managed identity.", "required": false }, - "cohere_api_key": { - "name": "cohere_api_key", - "type": "any", - "description": "The Databricks secret key reference for a Cohere API key. If you prefer\nto paste your API key directly, see `cohere_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `cohere_api_key` or\n`cohere_api_key_plaintext`.", + "email": { + "name": "email", + "type": "string", + "description": "The email of the service account.", "required": false }, - "cohere_api_key_plaintext": { - "name": "cohere_api_key_plaintext", - "type": "any", - "description": "The Cohere API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `cohere_api_key`. You\nmust provide an API key using one of the following fields:\n`cohere_api_key` or `cohere_api_key_plaintext`.", + "private_key_id": { + "name": "private_key_id", + "type": "string", + "description": "The ID that represents the private key for this Service Account", "required": false } } }, - "serving.CustomProviderConfig": { - "name": "CustomProviderConfig", - "package": "serving", - "description": "Configs needed to create a custom provider model route.", + "catalog.DatabricksGcpServiceAccountResponse": { + "name": "DatabricksGcpServiceAccountResponse", + "package": "catalog", + "description": "GCP long-lived credential. Databricks-created Google Cloud Storage service\naccount.", "fields": { - "api_key_auth": { - "name": "api_key_auth", - "type": "any", - "description": "This is a field to provide API key authentication for the custom provider API.\nYou can only specify one authentication method.", - "required": false - }, - "bearer_token_auth": { - "name": "bearer_token_auth", - "type": "any", - "description": "This is a field to provide bearer token authentication for the custom provider API.\nYou can only specify one authentication method.", + "credential_id": { + "name": "credential_id", + "type": "string", + "description": "The Databricks internal ID that represents this managed identity.", "required": false }, - "custom_provider_url": { - "name": "custom_provider_url", + "email": { + "name": "email", "type": "string", - "description": "This is a field to provide the URL of the custom provider API.", + "description": "The email of the service account.", "required": false } } }, - "serving.DatabricksModelServingConfig": { - "name": "DatabricksModelServingConfig", - "package": "serving", - "description": "databricks model serving config configuration.", + "catalog.DeleteRequestExternalLineage": { + "name": "DeleteRequestExternalLineage", + "package": "catalog", + "description": "", "fields": { - "databricks_api_token": { - "name": "databricks_api_token", - "type": "any", - "description": "The Databricks secret key reference for a Databricks API token that\ncorresponds to a user or service principal with Can Query access to the\nmodel serving endpoint pointed to by this external model. If you prefer\nto paste your API key directly, see `databricks_api_token_plaintext`. You\nmust provide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external lineage relationship.", "required": false }, - "databricks_api_token_plaintext": { - "name": "databricks_api_token_plaintext", - "type": "any", - "description": "The Databricks API token that corresponds to a user or service principal\nwith Can Query access to the model serving endpoint pointed to by this\nexternal model provided as a plaintext string. If you prefer to reference\nyour key using Databricks Secrets, see `databricks_api_token`. You must\nprovide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "source": { + "name": "source", + "type": "ExternalLineageObject", + "description": "Source object of the external lineage relationship.", "required": false }, - "databricks_workspace_url": { - "name": "databricks_workspace_url", - "type": "string", - "description": "The URL of the Databricks workspace containing the model serving endpoint\npointed to by this external model.", + "target": { + "name": "target", + "type": "ExternalLineageObject", + "description": "Target object of the external lineage relationship.", "required": false } } }, - "serving.EmailNotifications": { - "name": "EmailNotifications", - "package": "serving", - "description": "email notifications configuration.", + "catalog.DeltaRuntimePropertiesKvPairs": { + "name": "DeltaRuntimePropertiesKvPairs", + "package": "catalog", + "description": "Properties pertaining to the current state of the delta table as given by the\ncommit server. This does not contain **delta.*** (input) properties in\n__TableInfo.properties__.", "fields": { - "on_update_failure": { - "name": "on_update_failure", - "type": "any", - "description": "A list of email addresses to be notified when an endpoint fails to update its configuration or state.", - "required": false - }, - "on_update_success": { - "name": "on_update_success", - "type": "any", - "description": "A list of email addresses to be notified when an endpoint successfully updates its configuration or state.", + "delta_runtime_properties": { + "name": "delta_runtime_properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", "required": false } } }, - "serving.EndpointCoreConfigInput": { - "name": "EndpointCoreConfigInput", - "package": "serving", - "description": "endpoint core config input configuration.", + "catalog.Dependency": { + "name": "Dependency", + "package": "catalog", + "description": "A dependency of a SQL object. One of the following fields must be defined:\n__table__, __function__, __connection__, or __credential__.", "fields": { - "auto_capture_config": { - "name": "auto_capture_config", - "type": "any", - "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.\nNote: this field is deprecated for creating new provisioned throughput endpoints,\nor updating existing provisioned throughput endpoints that never have inference table configured;\nin these cases please use AI Gateway to manage inference tables.", + "connection": { + "name": "connection", + "type": "*ConnectionDependency", + "description": "", "required": false }, - "served_entities": { - "name": "served_entities", - "type": "any", - "description": "The list of served entities under the serving endpoint config.", + "credential": { + "name": "credential", + "type": "*CredentialDependency", + "description": "", "required": false }, - "served_models": { - "name": "served_models", - "type": "any", - "description": "(Deprecated, use served_entities instead) The list of served models under the serving endpoint config.", + "function": { + "name": "function", + "type": "*FunctionDependency", + "description": "", "required": false }, - "traffic_config": { - "name": "traffic_config", - "type": "any", - "description": "The traffic configuration associated with the serving endpoint config.", + "table": { + "name": "table", + "type": "*TableDependency", + "description": "", "required": false } } }, - "serving.EndpointTag": { - "name": "EndpointTag", - "package": "serving", - "description": "endpoint tag configuration.", + "catalog.DependencyList": { + "name": "DependencyList", + "package": "catalog", + "description": "A list of dependencies.", "fields": { - "key": { - "name": "key", - "type": "any", - "description": "Key field for a serving endpoint tag.", + "dependencies": { + "name": "dependencies", + "type": "[]Dependency", + "description": "Array of dependencies.", + "required": false + } + } + }, + "catalog.EffectivePermissionsList": { + "name": "EffectivePermissionsList", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", "required": false }, - "value": { - "name": "value", - "type": "any", - "description": "Optional value field for a serving endpoint tag.", + "privilege_assignments": { + "name": "privilege_assignments", + "type": "[]EffectivePrivilegeAssignment", + "description": "The privileges conveyed to each principal (either directly or via\ninheritance)", "required": false } } }, - "serving.ExternalModel": { - "name": "ExternalModel", - "package": "serving", - "description": "external model configuration.", + "catalog.EffectivePredictiveOptimizationFlag": { + "name": "EffectivePredictiveOptimizationFlag", + "package": "catalog", + "description": "", "fields": { - "ai21labs_config": { - "name": "ai21labs_config", - "type": "any", - "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", + "inherited_from_name": { + "name": "inherited_from_name", + "type": "string", + "description": "The name of the object from which the flag was inherited. If there was no\ninheritance, this field is left blank.", "required": false }, - "amazon_bedrock_config": { - "name": "amazon_bedrock_config", - "type": "any", - "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", + "inherited_from_type": { + "name": "inherited_from_type", + "type": "EffectivePredictiveOptimizationFlagInheritedFromType", + "description": "The type of the object from which the flag was inherited. If there was no\ninheritance, this field is left blank.", "required": false }, - "anthropic_config": { - "name": "anthropic_config", - "type": "any", - "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "value": { + "name": "value", + "type": "EnablePredictiveOptimization", + "description": "Whether predictive optimization should be enabled for this object and\nobjects under it.", "required": false - }, - "cohere_config": { - "name": "cohere_config", - "type": "any", - "description": "Cohere Config. Only required if the provider is 'cohere'.", + } + } + }, + "catalog.EffectivePrivilege": { + "name": "EffectivePrivilege", + "package": "catalog", + "description": "", + "fields": { + "inherited_from_name": { + "name": "inherited_from_name", + "type": "string", + "description": "The full name of the object that conveys this privilege via inheritance.\nThis field is omitted when privilege is not inherited (it's assigned to\nthe securable itself).", "required": false }, - "custom_provider_config": { - "name": "custom_provider_config", - "type": "any", - "description": "Custom Provider Config. Only required if the provider is 'custom'.", + "inherited_from_type": { + "name": "inherited_from_type", + "type": "SecurableType", + "description": "The type of the object that conveys this privilege via inheritance. This\nfield is omitted when privilege is not inherited (it's assigned to the\nsecurable itself).", "required": false }, - "databricks_model_serving_config": { - "name": "databricks_model_serving_config", - "type": "any", - "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "privilege": { + "name": "privilege", + "type": "Privilege", + "description": "The privilege assigned to the principal.", "required": false - }, - "google_cloud_vertex_ai_config": { - "name": "google_cloud_vertex_ai_config", - "type": "any", - "description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.", + } + } + }, + "catalog.EffectivePrivilegeAssignment": { + "name": "EffectivePrivilegeAssignment", + "package": "catalog", + "description": "", + "fields": { + "principal": { + "name": "principal", + "type": "string", + "description": "The principal (user email address or group name).", "required": false }, - "name": { - "name": "name", - "type": "any", - "description": "The name of the external model.", + "privileges": { + "name": "privileges", + "type": "[]EffectivePrivilege", + "description": "The privileges conveyed to the principal (either directly or via\ninheritance).", "required": false - }, - "openai_config": { - "name": "openai_config", - "type": "any", - "description": "OpenAI Config. Only required if the provider is 'openai'.", + } + } + }, + "catalog.EnableRequest": { + "name": "EnableRequest", + "package": "catalog", + "description": "", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "the catalog for which the system schema is to enabled in", + "required": false + } + } + }, + "catalog.EncryptionDetails": { + "name": "EncryptionDetails", + "package": "catalog", + "description": "Encryption options that apply to clients connecting to cloud storage.", + "fields": { + "sse_encryption_details": { + "name": "sse_encryption_details", + "type": "*SseEncryptionDetails", + "description": "Server-Side Encryption properties for clients communicating with AWS s3.", + "required": false + } + } + }, + "catalog.EntityTagAssignment": { + "name": "EntityTagAssignment", + "package": "catalog", + "description": "Represents a tag assignment to an entity", + "fields": { + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "The fully qualified name of the entity to which the tag is assigned", "required": false }, - "palm_config": { - "name": "palm_config", - "type": "any", - "description": "PaLM Config. Only required if the provider is 'palm'.", + "entity_type": { + "name": "entity_type", + "type": "string", + "description": "The type of the entity to which the tag is assigned. Allowed values are:\ncatalogs, schemas, tables, columns, volumes.", "required": false }, - "provider": { - "name": "provider", - "type": "any", - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', 'palm', and 'custom'.", + "tag_key": { + "name": "tag_key", + "type": "string", + "description": "The key of the tag", "required": false }, - "task": { - "name": "task", - "type": "any", - "description": "The task type of the external model.", + "tag_value": { + "name": "tag_value", + "type": "string", + "description": "The value of the tag", "required": false } } }, - "serving.ExternalModelProvider": { - "name": "ExternalModelProvider", - "package": "serving", - "description": "external model provider configuration.", - "fields": {} - }, - "serving.FallbackConfig": { - "name": "FallbackConfig", - "package": "serving", - "description": "fallback config configuration.", + "catalog.ExternalLineageExternalMetadata": { + "name": "ExternalLineageExternalMetadata", + "package": "catalog", + "description": "", "fields": { - "enabled": { - "name": "enabled", - "type": "bool", - "description": "Whether to enable traffic fallback. When a served entity in the serving endpoint returns specific error\ncodes (e.g. 500), the request will automatically be round-robin attempted with other served entities in the same\nendpoint, following the order of served entity list, until a successful response is returned.\nIf all attempts fail, return the last response with the error code.", + "name": { + "name": "name", + "type": "string", + "description": "", "required": false } } }, - "serving.GoogleCloudVertexAiConfig": { - "name": "GoogleCloudVertexAiConfig", - "package": "serving", - "description": "google cloud vertex ai config configuration.", + "catalog.ExternalLineageExternalMetadataInfo": { + "name": "ExternalLineageExternalMetadataInfo", + "package": "catalog", + "description": "Represents the external metadata object in the lineage event.", "fields": { - "private_key": { - "name": "private_key", - "type": "any", - "description": "The Databricks secret key reference for a private key for the service\naccount which has access to the Google Cloud Vertex AI Service. See [Best\npractices for managing service account keys]. If you prefer to paste your\nAPI key directly, see `private_key_plaintext`. You must provide an API\nkey using one of the following fields: `private_key` or\n`private_key_plaintext`\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys", + "entity_type": { + "name": "entity_type", + "type": "string", + "description": "Type of entity represented by the external metadata object.", "required": false }, - "private_key_plaintext": { - "name": "private_key_plaintext", - "type": "any", - "description": "The private key for the service account which has access to the Google\nCloud Vertex AI Service provided as a plaintext secret. See [Best\npractices for managing service account keys]. If you prefer to reference\nyour key using Databricks Secrets, see `private_key`. You must provide an\nAPI key using one of the following fields: `private_key` or\n`private_key_plaintext`.\n\n[Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys", + "event_time": { + "name": "event_time", + "type": "string", + "description": "Timestamp of the lineage event.", "required": false }, - "project_id": { - "name": "project_id", + "name": { + "name": "name", "type": "string", - "description": "This is the Google Cloud project id that the service account is\nassociated with.", + "description": "Name of the external metadata object.", "required": false }, - "region": { - "name": "region", - "type": "any", - "description": "This is the region for the Google Cloud Vertex AI Service. See [supported\nregions] for more details. Some models are only available in specific\nregions.\n\n[supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations", + "system_type": { + "name": "system_type", + "type": "SystemType", + "description": "Type of external system.", "required": false } } }, - "serving.OpenAiConfig": { - "name": "OpenAiConfig", - "package": "serving", - "description": "Configs needed to create an OpenAI model route.", + "catalog.ExternalLineageFileInfo": { + "name": "ExternalLineageFileInfo", + "package": "catalog", + "description": "Represents the path information in the lineage event.", "fields": { - "microsoft_entra_client_id": { - "name": "microsoft_entra_client_id", + "event_time": { + "name": "event_time", "type": "string", - "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Client ID.", + "description": "Timestamp of the lineage event.", "required": false }, - "microsoft_entra_client_secret": { - "name": "microsoft_entra_client_secret", - "type": "any", - "description": "The Databricks secret key reference for a client secret used for\nMicrosoft Entra ID authentication. If you prefer to paste your client\nsecret directly, see `microsoft_entra_client_secret_plaintext`. You must\nprovide an API key using one of the following fields:\n`microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "path": { + "name": "path", + "type": "string", + "description": "URL of the path.", "required": false }, - "microsoft_entra_client_secret_plaintext": { - "name": "microsoft_entra_client_secret_plaintext", - "type": "any", - "description": "The client secret used for Microsoft Entra ID authentication provided as\na plaintext string. If you prefer to reference your key using Databricks\nSecrets, see `microsoft_entra_client_secret`. You must provide an API key\nusing one of the following fields: `microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "securable_name": { + "name": "securable_name", + "type": "string", + "description": "The full name of the securable on the path.", "required": false }, - "microsoft_entra_tenant_id": { - "name": "microsoft_entra_tenant_id", + "securable_type": { + "name": "securable_type", "type": "string", - "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Tenant ID.", + "description": "The securable type of the securable on the path.", "required": false }, - "openai_api_base": { - "name": "openai_api_base", - "type": "any", - "description": "This is a field to provide a customized base URl for the OpenAI API. For\nAzure OpenAI, this field is required, and is the base URL for the Azure\nOpenAI API service provided by Azure. For other OpenAI API types, this\nfield is optional, and if left unspecified, the standard OpenAI base URL\nis used.", + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location associated with securable on the path.", "required": false - }, - "openai_api_key": { - "name": "openai_api_key", - "type": "any", - "description": "The Databricks secret key reference for an OpenAI API key using the\nOpenAI or Azure service. If you prefer to paste your API key directly,\nsee `openai_api_key_plaintext`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + } + } + }, + "catalog.ExternalLineageInfo": { + "name": "ExternalLineageInfo", + "package": "catalog", + "description": "Lineage response containing lineage information of a data asset.", + "fields": { + "external_lineage_info": { + "name": "external_lineage_info", + "type": "*ExternalLineageRelationshipInfo", + "description": "Information about the edge metadata of the external lineage relationship.", "required": false }, - "openai_api_key_plaintext": { - "name": "openai_api_key_plaintext", - "type": "any", - "description": "The OpenAI API key using the OpenAI or Azure service provided as a\nplaintext string. If you prefer to reference your key using Databricks\nSecrets, see `openai_api_key`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + "external_metadata_info": { + "name": "external_metadata_info", + "type": "*ExternalLineageExternalMetadataInfo", + "description": "Information about external metadata involved in the lineage relationship.", "required": false }, - "openai_api_type": { - "name": "openai_api_type", - "type": "any", - "description": "This is an optional field to specify the type of OpenAI API to use. For\nAzure OpenAI, this field is required, and adjust this parameter to\nrepresent the preferred security access validation protocol. For access\ntoken validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.", + "file_info": { + "name": "file_info", + "type": "*ExternalLineageFileInfo", + "description": "Information about the file involved in the lineage relationship.", "required": false }, - "openai_api_version": { - "name": "openai_api_version", - "type": "any", - "description": "This is an optional field to specify the OpenAI API version. For Azure\nOpenAI, this field is required, and is the version of the Azure OpenAI\nservice to utilize, specified by a date.", + "model_info": { + "name": "model_info", + "type": "*ExternalLineageModelVersionInfo", + "description": "Information about the model version involved in the lineage relationship.", "required": false }, - "openai_deployment_name": { - "name": "openai_deployment_name", + "table_info": { + "name": "table_info", + "type": "*ExternalLineageTableInfo", + "description": "Information about the table involved in the lineage relationship.", + "required": false + } + } + }, + "catalog.ExternalLineageModelVersion": { + "name": "ExternalLineageModelVersion", + "package": "catalog", + "description": "", + "fields": { + "name": { + "name": "name", "type": "string", - "description": "This field is only required for Azure OpenAI and is the name of the\ndeployment resource for the Azure OpenAI service.", + "description": "", "required": false }, - "openai_organization": { - "name": "openai_organization", - "type": "any", - "description": "This is an optional field to specify the organization in OpenAI or Azure\nOpenAI.", + "version": { + "name": "version", + "type": "string", + "description": "", "required": false } } }, - "serving.PaLmConfig": { - "name": "PaLmConfig", - "package": "serving", - "description": "pa lm config configuration.", + "catalog.ExternalLineageModelVersionInfo": { + "name": "ExternalLineageModelVersionInfo", + "package": "catalog", + "description": "Represents the model version information in the lineage event.", "fields": { - "palm_api_key": { - "name": "palm_api_key", - "type": "any", - "description": "The Databricks secret key reference for a PaLM API key. If you prefer to\npaste your API key directly, see `palm_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "event_time": { + "name": "event_time", + "type": "string", + "description": "Timestamp of the lineage event.", "required": false }, - "palm_api_key_plaintext": { - "name": "palm_api_key_plaintext", - "type": "any", - "description": "The PaLM API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `palm_api_key`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "model_name": { + "name": "model_name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "version": { + "name": "version", + "type": "int64", + "description": "Version number of the model.", "required": false } } }, - "serving.RateLimit": { - "name": "RateLimit", - "package": "serving", - "description": "rate limit configuration.", + "catalog.ExternalLineageObject": { + "name": "ExternalLineageObject", + "package": "catalog", + "description": "", "fields": { - "calls": { - "name": "calls", - "type": "any", - "description": "Used to specify how many calls are allowed for a key within the renewal_period.", + "external_metadata": { + "name": "external_metadata", + "type": "*ExternalLineageExternalMetadata", + "description": "", "required": false }, - "key": { - "name": "key", - "type": "any", - "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "model_version": { + "name": "model_version", + "type": "*ExternalLineageModelVersion", + "description": "", "required": false }, - "renewal_period": { - "name": "renewal_period", - "type": "any", - "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", + "path": { + "name": "path", + "type": "*ExternalLineagePath", + "description": "", + "required": false + }, + "table": { + "name": "table", + "type": "*ExternalLineageTable", + "description": "", "required": false } } }, - "serving.RateLimitKey": { - "name": "RateLimitKey", - "package": "serving", - "description": "rate limit key configuration.", - "fields": {} - }, - "serving.RateLimitRenewalPeriod": { - "name": "RateLimitRenewalPeriod", - "package": "serving", - "description": "rate limit renewal period configuration.", - "fields": {} - }, - "serving.Route": { - "name": "Route", - "package": "serving", - "description": "route configuration.", + "catalog.ExternalLineagePath": { + "name": "ExternalLineagePath", + "package": "catalog", + "description": "", "fields": { - "served_entity_name": { - "name": "served_entity_name", + "url": { + "name": "url", "type": "string", "description": "", "required": false - }, - "served_model_name": { - "name": "served_model_name", - "type": "string", - "description": "The name of the served model this route configures traffic for.", - "required": false - }, - "traffic_percentage": { - "name": "traffic_percentage", - "type": "any", - "description": "The percentage of endpoint traffic to send to this route. It must be an integer between 0 and 100 inclusive.", - "required": false } } }, - "serving.ServedEntityInput": { - "name": "ServedEntityInput", - "package": "serving", - "description": "served entity input configuration.", + "catalog.ExternalLineageRelationship": { + "name": "ExternalLineageRelationship", + "package": "catalog", + "description": "", "fields": { - "entity_name": { - "name": "entity_name", - "type": "string", - "description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.", + "columns": { + "name": "columns", + "type": "[]ColumnRelationship", + "description": "List of column relationships between source and target objects.", "required": false }, - "entity_version": { - "name": "entity_version", - "type": "any", - "description": "", + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external lineage relationship.", "required": false }, - "environment_vars": { - "name": "environment_vars", - "type": "any", - "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "Key-value properties associated with the external lineage relationship.", "required": false }, - "external_model": { - "name": "external_model", - "type": "any", - "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled) can be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model, it cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later. The task type of all external models within an endpoint must be the same.", + "source": { + "name": "source", + "type": "ExternalLineageObject", + "description": "Source object of the external lineage relationship.", "required": false }, - "instance_profile_arn": { - "name": "instance_profile_arn", - "type": "any", - "description": "ARN of the instance profile that the served entity uses to access AWS resources.", + "target": { + "name": "target", + "type": "ExternalLineageObject", + "description": "Target object of the external lineage relationship.", "required": false - }, - "max_provisioned_concurrency": { - "name": "max_provisioned_concurrency", - "type": "any", - "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + } + } + }, + "catalog.ExternalLineageRelationshipInfo": { + "name": "ExternalLineageRelationshipInfo", + "package": "catalog", + "description": "", + "fields": { + "columns": { + "name": "columns", + "type": "[]ColumnRelationship", + "description": "List of column relationships between source and target objects.", "required": false }, - "max_provisioned_throughput": { - "name": "max_provisioned_throughput", - "type": "any", - "description": "The maximum tokens per second that the endpoint can scale up to.", + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external lineage relationship.", "required": false }, - "min_provisioned_concurrency": { - "name": "min_provisioned_concurrency", - "type": "any", - "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "Key-value properties associated with the external lineage relationship.", "required": false }, - "min_provisioned_throughput": { - "name": "min_provisioned_throughput", - "type": "any", - "description": "The minimum tokens per second that the endpoint can scale down to.", + "source": { + "name": "source", + "type": "ExternalLineageObject", + "description": "Source object of the external lineage relationship.", "required": false }, + "target": { + "name": "target", + "type": "ExternalLineageObject", + "description": "Target object of the external lineage relationship.", + "required": false + } + } + }, + "catalog.ExternalLineageTable": { + "name": "ExternalLineageTable", + "package": "catalog", + "description": "", + "fields": { "name": { "name": "name", - "type": "any", - "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", + "type": "string", + "description": "", "required": false - }, - "provisioned_model_units": { - "name": "provisioned_model_units", - "type": "any", - "description": "The number of model units provisioned.", + } + } + }, + "catalog.ExternalLineageTableInfo": { + "name": "ExternalLineageTableInfo", + "package": "catalog", + "description": "Represents the table information in the lineage event.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of Catalog.", "required": false }, - "scale_to_zero_enabled": { - "name": "scale_to_zero_enabled", - "type": "bool", - "description": "Whether the compute resources for the served entity should scale down to zero.", + "event_time": { + "name": "event_time", + "type": "string", + "description": "Timestamp of the lineage event.", "required": false }, - "workload_size": { - "name": "workload_size", - "type": "int", - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", + "name": { + "name": "name", + "type": "string", + "description": "Name of Table.", "required": false }, - "workload_type": { - "name": "workload_type", - "type": "any", - "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).", + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "Name of Schema.", "required": false } } }, - "serving.ServedModelInput": { - "name": "ServedModelInput", - "package": "serving", - "description": "served model input configuration.", + "catalog.ExternalLocationInfo": { + "name": "ExternalLocationInfo", + "package": "catalog", + "description": "", "fields": { - "environment_vars": { - "name": "environment_vars", - "type": "any", - "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", "required": false }, - "instance_profile_arn": { - "name": "instance_profile_arn", - "type": "any", - "description": "ARN of the instance profile that the served entity uses to access AWS resources.", + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", "required": false }, - "max_provisioned_concurrency": { - "name": "max_provisioned_concurrency", - "type": "any", - "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this external location was created, in epoch milliseconds.", "required": false }, - "max_provisioned_throughput": { - "name": "max_provisioned_throughput", - "type": "any", - "description": "The maximum tokens per second that the endpoint can scale up to.", + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of external location creator.", "required": false }, - "min_provisioned_concurrency": { - "name": "min_provisioned_concurrency", - "type": "any", - "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "credential_id": { + "name": "credential_id", + "type": "string", + "description": "Unique ID of the location's storage credential.", "required": false }, - "min_provisioned_throughput": { - "name": "min_provisioned_throughput", - "type": "any", - "description": "The minimum tokens per second that the endpoint can scale down to.", + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "Name of the storage credential used with this location.", "required": false }, - "model_name": { - "name": "model_name", - "type": "string", + "enable_file_events": { + "name": "enable_file_events", + "type": "bool", + "description": "Whether to enable file events on this external location.", + "required": false + }, + "encryption_details": { + "name": "encryption_details", + "type": "*EncryptionDetails", "description": "", "required": false }, - "model_version": { - "name": "model_version", - "type": "any", + "fallback": { + "name": "fallback", + "type": "bool", + "description": "Indicates whether fallback mode is enabled for this external location.\nWhen fallback mode is enabled, the access to the location falls back to\ncluster credentials if UC credentials are not sufficient.", + "required": false + }, + "file_event_queue": { + "name": "file_event_queue", + "type": "*FileEventQueue", + "description": "File event queue settings. If `enable_file_events` is `true`, must be\ndefined and have exactly one of the documented properties.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", "description": "", "required": false }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of metastore hosting the external location.", + "required": false + }, "name": { "name": "name", - "type": "any", - "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", + "type": "string", + "description": "Name of the external location.", "required": false }, - "provisioned_model_units": { - "name": "provisioned_model_units", - "type": "any", - "description": "The number of model units provisioned.", + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the external location.", "required": false }, - "scale_to_zero_enabled": { - "name": "scale_to_zero_enabled", + "read_only": { + "name": "read_only", "type": "bool", - "description": "Whether the compute resources for the served entity should scale down to zero.", + "description": "Indicates whether the external location is read-only.", "required": false }, - "workload_size": { - "name": "workload_size", - "type": "int", - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which external location this was last modified, in epoch\nmilliseconds.", "required": false }, - "workload_type": { - "name": "workload_type", - "type": "any", - "description": "The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is \"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).", + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the external location.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "Path URL of the external location.", "required": false } } }, + "catalog.ExternalMetadata": { + "name": "ExternalMetadata", + "package": "catalog", + "description": "", + "fields": { + "columns": { + "name": "columns", + "type": "[]string", + "description": "List of columns associated with the external metadata object.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string", + "description": "Time at which this external metadata object was created.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of external metadata object creator.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "entity_type": { + "name": "entity_type", + "type": "string", + "description": "Type of entity within the external system.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external metadata object.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the external metadata object.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Owner of the external metadata object.", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the external metadata object.", + "required": false + }, + "system_type": { + "name": "system_type", + "type": "SystemType", + "description": "Type of external system.", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "Time at which this external metadata object was last modified.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified external metadata object.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL associated with the external metadata object.", + "required": false + } + } + }, + "catalog.FailedStatus": { + "name": "FailedStatus", + "package": "catalog", + "description": "Detailed status of an online table. Shown if the online table is in the\nOFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "int64", + "description": "The last source table Delta version that was synced to the online table.\nNote that this Delta version may only be partially synced to the online\ntable. Only populated if the table is still online and available for\nserving.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "string", + "description": "The timestamp of the last time any data was synchronized from the source\ntable to the online table. Only populated if the table is still online\nand available for serving.", + "required": false + } + } + }, + "catalog.FileEventQueue": { + "name": "FileEventQueue", + "package": "catalog", + "description": "", + "fields": { + "managed_aqs": { + "name": "managed_aqs", + "type": "*AzureQueueStorage", + "description": "", + "required": false + }, + "managed_pubsub": { + "name": "managed_pubsub", + "type": "*GcpPubsub", + "description": "", + "required": false + }, + "managed_sqs": { + "name": "managed_sqs", + "type": "*AwsSqsQueue", + "description": "", + "required": false + }, + "provided_aqs": { + "name": "provided_aqs", + "type": "*AzureQueueStorage", + "description": "", + "required": false + }, + "provided_pubsub": { + "name": "provided_pubsub", + "type": "*GcpPubsub", + "description": "", + "required": false + }, + "provided_sqs": { + "name": "provided_sqs", + "type": "*AwsSqsQueue", + "description": "", + "required": false + } + } + }, + "catalog.ForeignKeyConstraint": { + "name": "ForeignKeyConstraint", + "package": "catalog", + "description": "", + "fields": { + "child_columns": { + "name": "child_columns", + "type": "[]string", + "description": "Column names for this constraint.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the constraint.", + "required": false + }, + "parent_columns": { + "name": "parent_columns", + "type": "[]string", + "description": "Column names for this constraint.", + "required": false + }, + "parent_table": { + "name": "parent_table", + "type": "string", + "description": "The full name of the parent constraint.", + "required": false + }, + "rely": { + "name": "rely", + "type": "bool", + "description": "True if the constraint is RELY, false or unset if NORELY.", + "required": false + } + } + }, + "catalog.FunctionArgument": { + "name": "FunctionArgument", + "package": "catalog", + "description": "", + "fields": { + "alias": { + "name": "alias", + "type": "string", + "description": "The alias of a matched column.", + "required": false + }, + "constant": { + "name": "constant", + "type": "string", + "description": "A constant literal.", + "required": false + } + } + }, + "catalog.FunctionDependency": { + "name": "FunctionDependency", + "package": "catalog", + "description": "A function that is dependent on a SQL object.", + "fields": { + "function_full_name": { + "name": "function_full_name", + "type": "string", + "description": "Full name of the dependent function, in the form of\n__catalog_name__.__schema_name__.__function_name__.", + "required": false + } + } + }, + "catalog.FunctionInfo": { + "name": "FunctionInfo", + "package": "catalog", + "description": "", + "fields": { + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent Catalog.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this function was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of function creator.", + "required": false + }, + "data_type": { + "name": "data_type", + "type": "ColumnTypeName", + "description": "Scalar function return data type.", + "required": false + }, + "external_language": { + "name": "external_language", + "type": "string", + "description": "External function language.", + "required": false + }, + "external_name": { + "name": "external_name", + "type": "string", + "description": "External function name.", + "required": false + }, + "full_data_type": { + "name": "full_data_type", + "type": "string", + "description": "Pretty printed function data type.", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "Full name of Function, in form of\n**catalog_name**.**schema_name**.**function_name**", + "required": false + }, + "function_id": { + "name": "function_id", + "type": "string", + "description": "Id of Function, relative to parent schema.", + "required": false + }, + "input_params": { + "name": "input_params", + "type": "*FunctionParameterInfos", + "description": "Function input parameters.", + "required": false + }, + "is_deterministic": { + "name": "is_deterministic", + "type": "bool", + "description": "Whether the function is deterministic.", + "required": false + }, + "is_null_call": { + "name": "is_null_call", + "type": "bool", + "description": "Function null call.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of function, relative to parent schema.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of the function.", + "required": false + }, + "parameter_style": { + "name": "parameter_style", + "type": "FunctionInfoParameterStyle", + "description": "Function parameter style. **S** is the value for SQL.", + "required": false + }, + "properties": { + "name": "properties", + "type": "string", + "description": "JSON-serialized key-value pair map, encoded (escaped) as a string.", + "required": false + }, + "return_params": { + "name": "return_params", + "type": "*FunctionParameterInfos", + "description": "Table function return parameters.", + "required": false + }, + "routine_body": { + "name": "routine_body", + "type": "FunctionInfoRoutineBody", + "description": "Function language. When **EXTERNAL** is used, the language of the routine\nfunction should be specified in the **external_language** field, and the\n**return_params** of the function cannot be used (as **TABLE** return\ntype is not supported), and the **sql_data_access** field must be\n**NO_SQL**.", + "required": false + }, + "routine_definition": { + "name": "routine_definition", + "type": "string", + "description": "Function body.", + "required": false + }, + "routine_dependencies": { + "name": "routine_dependencies", + "type": "*DependencyList", + "description": "function dependencies.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "Name of parent Schema relative to its parent Catalog.", + "required": false + }, + "security_type": { + "name": "security_type", + "type": "FunctionInfoSecurityType", + "description": "Function security type.", + "required": false + }, + "specific_name": { + "name": "specific_name", + "type": "string", + "description": "Specific name of the function; Reserved for future use.", + "required": false + }, + "sql_data_access": { + "name": "sql_data_access", + "type": "FunctionInfoSqlDataAccess", + "description": "Function SQL data access.", + "required": false + }, + "sql_path": { + "name": "sql_path", + "type": "string", + "description": "List of schemes whose objects can be referenced without qualification.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this function was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the function.", + "required": false + } + } + }, + "catalog.FunctionParameterInfo": { + "name": "FunctionParameterInfo", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of Parameter.", + "required": false + }, + "parameter_default": { + "name": "parameter_default", + "type": "string", + "description": "Default value of the parameter.", + "required": false + }, + "parameter_mode": { + "name": "parameter_mode", + "type": "FunctionParameterMode", + "description": "Function parameter mode.", + "required": false + }, + "parameter_type": { + "name": "parameter_type", + "type": "FunctionParameterType", + "description": "Function parameter type.", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "Ordinal position of column (starting at position 0).", + "required": false + }, + "type_interval_type": { + "name": "type_interval_type", + "type": "string", + "description": "Format of IntervalType.", + "required": false + }, + "type_json": { + "name": "type_json", + "type": "string", + "description": "Full data type spec, JSON-serialized.", + "required": false + }, + "type_name": { + "name": "type_name", + "type": "ColumnTypeName", + "description": "Name of type (INT, STRUCT, MAP, etc.)", + "required": false + }, + "type_precision": { + "name": "type_precision", + "type": "int", + "description": "Digits of precision; required on Create for DecimalTypes.", + "required": false + }, + "type_scale": { + "name": "type_scale", + "type": "int", + "description": "Digits to right of decimal; Required on Create for DecimalTypes.", + "required": false + }, + "type_text": { + "name": "type_text", + "type": "string", + "description": "Full data type spec, SQL/catalogString text.", + "required": false + } + } + }, + "catalog.FunctionParameterInfos": { + "name": "FunctionParameterInfos", + "package": "catalog", + "description": "", + "fields": { + "parameters": { + "name": "parameters", + "type": "[]FunctionParameterInfo", + "description": "", + "required": false + } + } + }, + "catalog.GcpOauthToken": { + "name": "GcpOauthToken", + "package": "catalog", + "description": "GCP temporary credentials for API authentication. Read more at\nhttps://developers.google.com/identity/protocols/oauth2/service-account", + "fields": { + "oauth_token": { + "name": "oauth_token", + "type": "string", + "description": "", + "required": false + } + } + }, + "catalog.GcpPubsub": { + "name": "GcpPubsub", + "package": "catalog", + "description": "", + "fields": { + "managed_resource_id": { + "name": "managed_resource_id", + "type": "string", + "description": "Unique identifier included in the name of file events managed cloud\nresources.", + "required": false + }, + "subscription_name": { + "name": "subscription_name", + "type": "string", + "description": "The Pub/Sub subscription name in the format\nprojects/{project}/subscriptions/{subscription name} Required for\nprovided_pubsub.", + "required": false + } + } + }, + "catalog.GenerateTemporaryPathCredentialRequest": { + "name": "GenerateTemporaryPathCredentialRequest", + "package": "catalog", + "description": "", + "fields": { + "dry_run": { + "name": "dry_run", + "type": "bool", + "description": "Optional. When set to true, the service will not validate that the\ngenerated credentials can perform write operations, therefore no new\npaths will be created and the response will not contain valid\ncredentials. Defaults to false.", + "required": false + }, + "operation": { + "name": "operation", + "type": "PathOperation", + "description": "The operation being performed on the path.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL for path-based access.", + "required": false + } + } + }, + "catalog.GenerateTemporaryPathCredentialResponse": { + "name": "GenerateTemporaryPathCredentialResponse", + "package": "catalog", + "description": "", + "fields": { + "aws_temp_credentials": { + "name": "aws_temp_credentials", + "type": "*AwsCredentials", + "description": "", + "required": false + }, + "azure_aad": { + "name": "azure_aad", + "type": "*AzureActiveDirectoryToken", + "description": "", + "required": false + }, + "azure_user_delegation_sas": { + "name": "azure_user_delegation_sas", + "type": "*AzureUserDelegationSas", + "description": "", + "required": false + }, + "expiration_time": { + "name": "expiration_time", + "type": "int64", + "description": "Server time when the credential will expire, in epoch milliseconds. The\nAPI client is advised to cache the credential given this expiration time.", + "required": false + }, + "gcp_oauth_token": { + "name": "gcp_oauth_token", + "type": "*GcpOauthToken", + "description": "", + "required": false + }, + "r2_temp_credentials": { + "name": "r2_temp_credentials", + "type": "*R2Credentials", + "description": "", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "The URL of the storage path accessible by the temporary credential.", + "required": false + } + } + }, + "catalog.GenerateTemporaryServiceCredentialAzureOptions": { + "name": "GenerateTemporaryServiceCredentialAzureOptions", + "package": "catalog", + "description": "The Azure cloud options to customize the requested temporary credential", + "fields": { + "resources": { + "name": "resources", + "type": "[]string", + "description": "The resources to which the temporary Azure credential should apply. These\nresources are the scopes that are passed to the token provider (see\nhttps://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential?view=azure-python)", + "required": false + } + } + }, + "catalog.GenerateTemporaryServiceCredentialGcpOptions": { + "name": "GenerateTemporaryServiceCredentialGcpOptions", + "package": "catalog", + "description": "The GCP cloud options to customize the requested temporary credential", + "fields": { + "scopes": { + "name": "scopes", + "type": "[]string", + "description": "The scopes to which the temporary GCP credential should apply. These\nresources are the scopes that are passed to the token provider (see\nhttps://google-auth.readthedocs.io/en/latest/reference/google.auth.html#google.auth.credentials.Credentials)", + "required": false + } + } + }, + "catalog.GenerateTemporaryServiceCredentialRequest": { + "name": "GenerateTemporaryServiceCredentialRequest", + "package": "catalog", + "description": "", + "fields": { + "azure_options": { + "name": "azure_options", + "type": "*GenerateTemporaryServiceCredentialAzureOptions", + "description": "", + "required": false + }, + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "The name of the service credential used to generate a temporary\ncredential", + "required": false + }, + "gcp_options": { + "name": "gcp_options", + "type": "*GenerateTemporaryServiceCredentialGcpOptions", + "description": "", + "required": false + } + } + }, + "catalog.GenerateTemporaryTableCredentialRequest": { + "name": "GenerateTemporaryTableCredentialRequest", + "package": "catalog", + "description": "", + "fields": { + "operation": { + "name": "operation", + "type": "TableOperation", + "description": "The operation performed against the table data, either READ or\nREAD_WRITE. If READ_WRITE is specified, the credentials returned will\nhave write permissions, otherwise, it will be read only.", + "required": false + }, + "table_id": { + "name": "table_id", + "type": "string", + "description": "UUID of the table to read or write.", + "required": false + } + } + }, + "catalog.GenerateTemporaryTableCredentialResponse": { + "name": "GenerateTemporaryTableCredentialResponse", + "package": "catalog", + "description": "", + "fields": { + "aws_temp_credentials": { + "name": "aws_temp_credentials", + "type": "*AwsCredentials", + "description": "", + "required": false + }, + "azure_aad": { + "name": "azure_aad", + "type": "*AzureActiveDirectoryToken", + "description": "", + "required": false + }, + "azure_user_delegation_sas": { + "name": "azure_user_delegation_sas", + "type": "*AzureUserDelegationSas", + "description": "", + "required": false + }, + "expiration_time": { + "name": "expiration_time", + "type": "int64", + "description": "Server time when the credential will expire, in epoch milliseconds. The\nAPI client is advised to cache the credential given this expiration time.", + "required": false + }, + "gcp_oauth_token": { + "name": "gcp_oauth_token", + "type": "*GcpOauthToken", + "description": "", + "required": false + }, + "r2_temp_credentials": { + "name": "r2_temp_credentials", + "type": "*R2Credentials", + "description": "", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "The URL of the storage path accessible by the temporary credential.", + "required": false + } + } + }, + "catalog.GetCatalogWorkspaceBindingsResponse": { + "name": "GetCatalogWorkspaceBindingsResponse", + "package": "catalog", + "description": "", + "fields": { + "workspaces": { + "name": "workspaces", + "type": "[]int64", + "description": "A list of workspace IDs", + "required": false + } + } + }, + "catalog.GetMetastoreSummaryResponse": { + "name": "GetMetastoreSummaryResponse", + "package": "catalog", + "description": "", + "fields": { + "cloud": { + "name": "cloud", + "type": "string", + "description": "Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`).", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this metastore was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of metastore creator.", + "required": false + }, + "default_data_access_config_id": { + "name": "default_data_access_config_id", + "type": "string", + "description": "Unique identifier of the metastore's (Default) Data Access Configuration.", + "required": false + }, + "delta_sharing_organization_name": { + "name": "delta_sharing_organization_name", + "type": "string", + "description": "The organization name of a Delta Sharing entity, to be used in\nDatabricks-to-Databricks Delta Sharing as the official name.", + "required": false + }, + "delta_sharing_recipient_token_lifetime_in_seconds": { + "name": "delta_sharing_recipient_token_lifetime_in_seconds", + "type": "int64", + "description": "The lifetime of delta sharing recipient token in seconds.", + "required": false + }, + "delta_sharing_scope": { + "name": "delta_sharing_scope", + "type": "DeltaSharingScopeEnum", + "description": "The scope of Delta Sharing enabled for the metastore.", + "required": false + }, + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", + "required": false + }, + "global_metastore_id": { + "name": "global_metastore_id", + "type": "string", + "description": "Globally unique metastore ID across clouds and regions, of the form\n`cloud:region:metastore_id`.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The user-specified name of the metastore.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the metastore.", + "required": false + }, + "privilege_model_version": { + "name": "privilege_model_version", + "type": "string", + "description": "Privilege model version of the metastore, of the form `major.minor`\n(e.g., `1.0`).", + "required": false + }, + "region": { + "name": "region", + "type": "string", + "description": "Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "The storage root URL for metastore", + "required": false + }, + "storage_root_credential_id": { + "name": "storage_root_credential_id", + "type": "string", + "description": "UUID of storage credential to access the metastore storage_root.", + "required": false + }, + "storage_root_credential_name": { + "name": "storage_root_credential_name", + "type": "string", + "description": "Name of the storage credential to access the metastore storage_root.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which the metastore was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the metastore.", + "required": false + } + } + }, + "catalog.GetPermissionsResponse": { + "name": "GetPermissionsResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "privilege_assignments": { + "name": "privilege_assignments", + "type": "[]PrivilegeAssignment", + "description": "The privileges assigned to each principal", + "required": false + } + } + }, + "catalog.GetQuotaResponse": { + "name": "GetQuotaResponse", + "package": "catalog", + "description": "", + "fields": { + "quota_info": { + "name": "quota_info", + "type": "*QuotaInfo", + "description": "The returned QuotaInfo.", + "required": false + } + } + }, + "catalog.GetWorkspaceBindingsResponse": { + "name": "GetWorkspaceBindingsResponse", + "package": "catalog", + "description": "", + "fields": { + "bindings": { + "name": "bindings", + "type": "[]WorkspaceBinding", + "description": "List of workspace bindings", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListAccountMetastoreAssignmentsResponse": { + "name": "ListAccountMetastoreAssignmentsResponse", + "package": "catalog", + "description": "The metastore assignments were successfully returned.", + "fields": { + "workspace_ids": { + "name": "workspace_ids", + "type": "[]int64", + "description": "", + "required": false + } + } + }, + "catalog.ListAccountStorageCredentialsResponse": { + "name": "ListAccountStorageCredentialsResponse", + "package": "catalog", + "description": "The metastore storage credentials were successfully returned.", + "fields": { + "storage_credentials": { + "name": "storage_credentials", + "type": "[]StorageCredentialInfo", + "description": "An array of metastore storage credentials.", + "required": false + } + } + }, + "catalog.ListCatalogsResponse": { + "name": "ListCatalogsResponse", + "package": "catalog", + "description": "", + "fields": { + "catalogs": { + "name": "catalogs", + "type": "[]CatalogInfo", + "description": "An array of catalog information objects.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListConnectionsResponse": { + "name": "ListConnectionsResponse", + "package": "catalog", + "description": "", + "fields": { + "connections": { + "name": "connections", + "type": "[]ConnectionInfo", + "description": "An array of connection information objects.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListCredentialsResponse": { + "name": "ListCredentialsResponse", + "package": "catalog", + "description": "", + "fields": { + "credentials": { + "name": "credentials", + "type": "[]CredentialInfo", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListEntityTagAssignmentsResponse": { + "name": "ListEntityTagAssignmentsResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Optional. Pagination token for retrieving the next page of results", + "required": false + }, + "tag_assignments": { + "name": "tag_assignments", + "type": "[]EntityTagAssignment", + "description": "The list of tag assignments", + "required": false + } + } + }, + "catalog.ListExternalLineageRelationshipsResponse": { + "name": "ListExternalLineageRelationshipsResponse", + "package": "catalog", + "description": "", + "fields": { + "external_lineage_relationships": { + "name": "external_lineage_relationships", + "type": "[]ExternalLineageInfo", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + } + } + }, + "catalog.ListExternalLocationsResponse": { + "name": "ListExternalLocationsResponse", + "package": "catalog", + "description": "", + "fields": { + "external_locations": { + "name": "external_locations", + "type": "[]ExternalLocationInfo", + "description": "An array of external locations.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListExternalMetadataResponse": { + "name": "ListExternalMetadataResponse", + "package": "catalog", + "description": "", + "fields": { + "external_metadata": { + "name": "external_metadata", + "type": "[]ExternalMetadata", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + } + } + }, + "catalog.ListFunctionsResponse": { + "name": "ListFunctionsResponse", + "package": "catalog", + "description": "", + "fields": { + "functions": { + "name": "functions", + "type": "[]FunctionInfo", + "description": "An array of function information objects.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListMetastoresResponse": { + "name": "ListMetastoresResponse", + "package": "catalog", + "description": "", + "fields": { + "metastores": { + "name": "metastores", + "type": "[]MetastoreInfo", + "description": "An array of metastore information objects.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListModelVersionsResponse": { + "name": "ListModelVersionsResponse", + "package": "catalog", + "description": "", + "fields": { + "model_versions": { + "name": "model_versions", + "type": "[]ModelVersionInfo", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + } + } + }, + "catalog.ListPoliciesResponse": { + "name": "ListPoliciesResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Optional opaque token for continuing pagination. `page_token` should be\nset to this value for the next request to retrieve the next page of\nresults.", + "required": false + }, + "policies": { + "name": "policies", + "type": "[]PolicyInfo", + "description": "The list of retrieved policies.", + "required": false + } + } + }, + "catalog.ListQuotasResponse": { + "name": "ListQuotasResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest.", + "required": false + }, + "quotas": { + "name": "quotas", + "type": "[]QuotaInfo", + "description": "An array of returned QuotaInfos.", + "required": false + } + } + }, + "catalog.ListRegisteredModelsResponse": { + "name": "ListRegisteredModelsResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token for pagination. Omitted if there are no more results.\npage_token should be set to this value for fetching the next page.", + "required": false + }, + "registered_models": { + "name": "registered_models", + "type": "[]RegisteredModelInfo", + "description": "", + "required": false + } + } + }, + "catalog.ListSchemasResponse": { + "name": "ListSchemasResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]SchemaInfo", + "description": "An array of schema information objects.", + "required": false + } + } + }, + "catalog.ListStorageCredentialsResponse": { + "name": "ListStorageCredentialsResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "storage_credentials": { + "name": "storage_credentials", + "type": "[]StorageCredentialInfo", + "description": "", + "required": false + } + } + }, + "catalog.ListSystemSchemasResponse": { + "name": "ListSystemSchemasResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]SystemSchemaInfo", + "description": "An array of system schema information objects.", + "required": false + } + } + }, + "catalog.ListTableSummariesResponse": { + "name": "ListTableSummariesResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "tables": { + "name": "tables", + "type": "[]TableSummary", + "description": "List of table summaries.", + "required": false + } + } + }, + "catalog.ListTablesResponse": { + "name": "ListTablesResponse", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest (for the next page of results).", + "required": false + }, + "tables": { + "name": "tables", + "type": "[]TableInfo", + "description": "An array of table information objects.", + "required": false + } + } + }, + "catalog.ListVolumesResponseContent": { + "name": "ListVolumesResponseContent", + "package": "catalog", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Opaque token to retrieve the next page of results. Absent if there are no\nmore pages. __page_token__ should be set to this value for the next\nrequest to retrieve the next page of results.", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "[]VolumeInfo", + "description": "", + "required": false + } + } + }, + "catalog.MatchColumn": { + "name": "MatchColumn", + "package": "catalog", + "description": "", + "fields": { + "alias": { + "name": "alias", + "type": "string", + "description": "Optional alias of the matched column.", + "required": false + }, + "condition": { + "name": "condition", + "type": "string", + "description": "The condition expression used to match a table column.", + "required": false + } + } + }, + "catalog.MetastoreAssignment": { + "name": "MetastoreAssignment", + "package": "catalog", + "description": "", + "fields": { + "default_catalog_name": { + "name": "default_catalog_name", + "type": "string", + "description": "The name of the default catalog in the metastore. This field is\ndeprecated. Please use \"Default Namespace API\" to configure the default\ncatalog for a Databricks workspace.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique ID of the metastore.", + "required": false + }, + "workspace_id": { + "name": "workspace_id", + "type": "int64", + "description": "The unique ID of the Databricks workspace.", + "required": false + } + } + }, + "catalog.MetastoreInfo": { + "name": "MetastoreInfo", + "package": "catalog", + "description": "", + "fields": { + "cloud": { + "name": "cloud", + "type": "string", + "description": "Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`).", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this metastore was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of metastore creator.", + "required": false + }, + "default_data_access_config_id": { + "name": "default_data_access_config_id", + "type": "string", + "description": "Unique identifier of the metastore's (Default) Data Access Configuration.", + "required": false + }, + "delta_sharing_organization_name": { + "name": "delta_sharing_organization_name", + "type": "string", + "description": "The organization name of a Delta Sharing entity, to be used in\nDatabricks-to-Databricks Delta Sharing as the official name.", + "required": false + }, + "delta_sharing_recipient_token_lifetime_in_seconds": { + "name": "delta_sharing_recipient_token_lifetime_in_seconds", + "type": "int64", + "description": "The lifetime of delta sharing recipient token in seconds.", + "required": false + }, + "delta_sharing_scope": { + "name": "delta_sharing_scope", + "type": "DeltaSharingScopeEnum", + "description": "The scope of Delta Sharing enabled for the metastore.", + "required": false + }, + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", + "required": false + }, + "global_metastore_id": { + "name": "global_metastore_id", + "type": "string", + "description": "Globally unique metastore ID across clouds and regions, of the form\n`cloud:region:metastore_id`.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The user-specified name of the metastore.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the metastore.", + "required": false + }, + "privilege_model_version": { + "name": "privilege_model_version", + "type": "string", + "description": "Privilege model version of the metastore, of the form `major.minor`\n(e.g., `1.0`).", + "required": false + }, + "region": { + "name": "region", + "type": "string", + "description": "Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "The storage root URL for metastore", + "required": false + }, + "storage_root_credential_id": { + "name": "storage_root_credential_id", + "type": "string", + "description": "UUID of storage credential to access the metastore storage_root.", + "required": false + }, + "storage_root_credential_name": { + "name": "storage_root_credential_name", + "type": "string", + "description": "Name of the storage credential to access the metastore storage_root.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which the metastore was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the metastore.", + "required": false + } + } + }, + "catalog.ModelVersionInfo": { + "name": "ModelVersionInfo", + "package": "catalog", + "description": "", + "fields": { + "aliases": { + "name": "aliases", + "type": "[]RegisteredModelAlias", + "description": "List of aliases associated with the model version", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog containing the model version", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the model version", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the model version", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the model version", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore containing the model version", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the parent registered model of the model version, relative to\nparent schema", + "required": false + }, + "model_version_dependencies": { + "name": "model_version_dependencies", + "type": "*DependencyList", + "description": "Model version dependencies, for feature-store packaged models", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "MLflow run ID used when creating the model version, if ``source`` was\ngenerated by an experiment run stored in an MLflow tracking server", + "required": false + }, + "run_workspace_id": { + "name": "run_workspace_id", + "type": "int", + "description": "ID of the Databricks workspace containing the MLflow run that generated\nthis model version, if applicable", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema containing the model version, relative to parent\ncatalog", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "URI indicating the location of the source artifacts (files) for the model\nversion", + "required": false + }, + "status": { + "name": "status", + "type": "ModelVersionInfoStatus", + "description": "Current status of the model version. Newly created model versions start\nin PENDING_REGISTRATION status, then move to READY status once the model\nversion files are uploaded and the model version is finalized. Only model\nversions in READY status can be loaded for inference or served.", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud under which model version data files\nare stored", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The identifier of the user who updated the model version last time", + "required": false + }, + "version": { + "name": "version", + "type": "int", + "description": "Integer model version number, used to reference the model version in API\nrequests.", + "required": false + } + } + }, + "catalog.MonitorCronSchedule": { + "name": "MonitorCronSchedule", + "package": "catalog", + "description": "", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "MonitorCronSchedulePauseStatus", + "description": "Read only field that indicates whether a schedule is paused or not.", + "required": false + }, + "quartz_cron_expression": { + "name": "quartz_cron_expression", + "type": "string", + "description": "The expression that determines when to run the monitor. See [examples].\n\n[examples]: https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "The timezone id (e.g., ``PST``) in which to evaluate the quartz\nexpression.", + "required": false + } + } + }, + "catalog.MonitorCronSchedulePauseStatus": { + "name": "MonitorCronSchedulePauseStatus", + "package": "catalog", + "description": "Source link: https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto\nMonitoring workflow schedule pause status.", + "fields": {} + }, + "catalog.MonitorDataClassificationConfig": { + "name": "MonitorDataClassificationConfig", + "package": "catalog", + "description": "Data classification related configuration.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable data classification.", + "required": false + } + } + }, + "catalog.MonitorDestination": { + "name": "MonitorDestination", + "package": "catalog", + "description": "", + "fields": { + "email_addresses": { + "name": "email_addresses", + "type": "[]string", + "description": "The list of email addresses to send the notification to. A maximum of 5\nemail addresses is supported.", + "required": false + } + } + }, + "catalog.MonitorInferenceLog": { + "name": "MonitorInferenceLog", + "package": "catalog", + "description": "", + "fields": { + "granularities": { + "name": "granularities", + "type": "[]string", + "description": "List of granularities to use when aggregating data into time windows\nbased on their timestamp.", + "required": false + }, + "label_col": { + "name": "label_col", + "type": "string", + "description": "Column for the label.", + "required": false + }, + "model_id_col": { + "name": "model_id_col", + "type": "string", + "description": "Column for the model identifier.", + "required": false + }, + "prediction_col": { + "name": "prediction_col", + "type": "string", + "description": "Column for the prediction.", + "required": false + }, + "prediction_proba_col": { + "name": "prediction_proba_col", + "type": "string", + "description": "Column for prediction probabilities", + "required": false + }, + "problem_type": { + "name": "problem_type", + "type": "MonitorInferenceLogProblemType", + "description": "Problem type the model aims to solve.", + "required": false + }, + "timestamp_col": { + "name": "timestamp_col", + "type": "string", + "description": "Column for the timestamp.", + "required": false + } + } + }, + "catalog.MonitorInfo": { + "name": "MonitorInfo", + "package": "catalog", + "description": "", + "fields": { + "assets_dir": { + "name": "assets_dir", + "type": "string", + "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a\ncustom directory to store data-monitoring assets. Normally prepopulated\nto a default user location via UI and Python APIs.", + "required": false + }, + "baseline_table_name": { + "name": "baseline_table_name", + "type": "string", + "description": "[Create:OPT Update:OPT] Baseline table name. Baseline data is used to\ncompute drift from the data in the monitored `table_name`. The baseline\ntable and the monitored table shall have the same schema.", + "required": false + }, + "custom_metrics": { + "name": "custom_metrics", + "type": "[]MonitorMetric", + "description": "[Create:OPT Update:OPT] Custom metrics.", + "required": false + }, + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "[Create:ERR Update:OPT] Id of dashboard that visualizes the computed\nmetrics. This can be empty if the monitor is in PENDING state.", + "required": false + }, + "data_classification_config": { + "name": "data_classification_config", + "type": "*MonitorDataClassificationConfig", + "description": "[Create:OPT Update:OPT] Data classification related config.", + "required": false + }, + "drift_metrics_table_name": { + "name": "drift_metrics_table_name", + "type": "string", + "description": "[Create:ERR Update:IGN] Table that stores drift metrics data. Format:\n`catalog.schema.table_name`.", + "required": false + }, + "inference_log": { + "name": "inference_log", + "type": "*MonitorInferenceLog", + "description": "", + "required": false + }, + "latest_monitor_failure_msg": { + "name": "latest_monitor_failure_msg", + "type": "string", + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", + "required": false + }, + "monitor_version": { + "name": "monitor_version", + "type": "int64", + "description": "[Create:ERR Update:IGN] Represents the current monitor configuration\nversion in use. The version will be represented in a numeric fashion\n(1,2,3...). The field has flexibility to take on negative values, which\ncan indicate corrupted monitor_version numbers.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "*MonitorNotifications", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", + "required": false + }, + "output_schema_name": { + "name": "output_schema_name", + "type": "string", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to\nbe in 2-level format {catalog}.{schema}", + "required": false + }, + "profile_metrics_table_name": { + "name": "profile_metrics_table_name", + "type": "string", + "description": "[Create:ERR Update:IGN] Table that stores profile metrics data. Format:\n`catalog.schema.table_name`.", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*MonitorCronSchedule", + "description": "[Create:OPT Update:OPT] The monitor schedule.", + "required": false + }, + "slicing_exprs": { + "name": "slicing_exprs", + "type": "[]string", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for\ntargeted analysis. The data is grouped by each expression independently,\nresulting in a separate slice for each predicate and its complements. For\nexample `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the\nfollowing slices: two slices for `col_2 \u003e 10` (True and False), and one\nslice per unique value in `col1`. For high-cardinality columns, only the\ntop 100 unique values by frequency will generate slices.", + "required": false + }, + "snapshot": { + "name": "snapshot", + "type": "*MonitorSnapshot", + "description": "Configuration for monitoring snapshot tables.", + "required": false + }, + "status": { + "name": "status", + "type": "MonitorInfoStatus", + "description": "[Create:ERR Update:IGN] The monitor status.", + "required": false + }, + "table_name": { + "name": "table_name", + "type": "string", + "description": "[Create:ERR Update:IGN] UC table to monitor. Format:\n`catalog.schema.table_name`", + "required": false + }, + "time_series": { + "name": "time_series", + "type": "*MonitorTimeSeries", + "description": "Configuration for monitoring time series tables.", + "required": false + } + } + }, + "catalog.MonitorMetric": { + "name": "MonitorMetric", + "package": "catalog", + "description": "Custom metric definition.", + "fields": { + "definition": { + "name": "definition", + "type": "string", + "description": "Jinja template for a SQL expression that specifies how to compute the\nmetric. See [create metric definition].\n\n[create metric definition]: https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition", + "required": false + }, + "input_columns": { + "name": "input_columns", + "type": "[]string", + "description": "A list of column names in the input table the metric should be computed\nfor. Can use ``\":table\"`` to indicate that the metric needs information\nfrom multiple columns.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the metric in the output tables.", + "required": false + }, + "output_data_type": { + "name": "output_data_type", + "type": "string", + "description": "The output type of the custom metric.", + "required": false + }, + "type": { + "name": "type", + "type": "MonitorMetricType", + "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``,\n``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``. The\n``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"``\nmetrics are computed on a single table, whereas the\n``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across baseline and input\ntable, or across the two consecutive time windows. -\nCUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your\ntable - CUSTOM_METRIC_TYPE_DERIVED: depend on...", + "required": false + } + } + }, + "catalog.MonitorMetricType": { + "name": "MonitorMetricType", + "package": "catalog", + "description": "Can only be one of ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"``, ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"``, or ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"``.\nThe ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"`` and ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"`` metrics\nare computed on a single table, whereas the ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", + "fields": {} + }, + "catalog.MonitorNotifications": { + "name": "MonitorNotifications", + "package": "catalog", + "description": "", + "fields": { + "on_failure": { + "name": "on_failure", + "type": "*MonitorDestination", + "description": "Destinations to send notifications on failure/timeout.", + "required": false + }, + "on_new_classification_tag_detected": { + "name": "on_new_classification_tag_detected", + "type": "*MonitorDestination", + "description": "Destinations to send notifications on new classification tag detected.", + "required": false + } + } + }, + "catalog.MonitorRefreshInfo": { + "name": "MonitorRefreshInfo", + "package": "catalog", + "description": "", + "fields": { + "end_time_ms": { + "name": "end_time_ms", + "type": "int64", + "description": "Time at which refresh operation completed (milliseconds since 1/1/1970\nUTC).", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "An optional message to give insight into the current state of the job\n(e.g. FAILURE messages).", + "required": false + }, + "refresh_id": { + "name": "refresh_id", + "type": "int64", + "description": "Unique id of the refresh operation.", + "required": false + }, + "start_time_ms": { + "name": "start_time_ms", + "type": "int64", + "description": "Time at which refresh operation was initiated (milliseconds since\n1/1/1970 UTC).", + "required": false + }, + "state": { + "name": "state", + "type": "MonitorRefreshInfoState", + "description": "The current state of the refresh.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "MonitorRefreshInfoTrigger", + "description": "The method by which the refresh was triggered.", + "required": false + } + } + }, + "catalog.MonitorRefreshListResponse": { + "name": "MonitorRefreshListResponse", + "package": "catalog", + "description": "", + "fields": { + "refreshes": { + "name": "refreshes", + "type": "[]MonitorRefreshInfo", + "description": "List of refreshes.", + "required": false + } + } + }, + "catalog.MonitorSnapshot": { + "name": "MonitorSnapshot", + "package": "catalog", + "description": "Snapshot analysis configuration", + "fields": {} + }, + "catalog.MonitorTimeSeries": { + "name": "MonitorTimeSeries", + "package": "catalog", + "description": "Time series analysis configuration.", + "fields": { + "granularities": { + "name": "granularities", + "type": "[]string", + "description": "Granularities for aggregating data into time windows based on their\ntimestamp. Currently the following static granularities are supported:\n{``\\\"5 minutes\\\"``, ``\\\"30 minutes\\\"``, ``\\\"1 hour\\\"``, ``\\\"1 day\\\"``,\n``\\\"\\u003cn\\u003e week(s)\\\"``, ``\\\"1 month\\\"``, ``\\\"1 year\\\"``}.", + "required": false + }, + "timestamp_col": { + "name": "timestamp_col", + "type": "string", + "description": "Column for the timestamp.", + "required": false + } + } + }, + "catalog.NamedTableConstraint": { + "name": "NamedTableConstraint", + "package": "catalog", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The name of the constraint.", + "required": false + } + } + }, + "catalog.NotificationDestination": { + "name": "NotificationDestination", + "package": "catalog", + "description": "", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "The identifier for the destination. This is the email address for EMAIL\ndestinations, the URL for URL destinations, or the unique Databricks\nnotification destination ID for all other external destinations.", + "required": false + }, + "destination_type": { + "name": "destination_type", + "type": "DestinationType", + "description": "The type of the destination.", + "required": false + }, + "special_destination": { + "name": "special_destination", + "type": "SpecialDestination", + "description": "This field is used to denote whether the destination is the email of the\nowner of the securable object. The special destination cannot be assigned\nto a securable and only represents the default destination of the\nsecurable. The securable types that support default special destinations\nare: \"catalog\", \"external_location\", \"connection\", \"credential\", and\n\"metastore\". The **destination_type** of a **special_destination** is\nalways EMAIL.", + "required": false + } + } + }, + "catalog.OnlineTable": { + "name": "OnlineTable", + "package": "catalog", + "description": "Online Table information.", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Full three-part (catalog, schema, table) name of the table.", + "required": false + }, + "spec": { + "name": "spec", + "type": "*OnlineTableSpec", + "description": "Specification of the online table.", + "required": false + }, + "status": { + "name": "status", + "type": "*OnlineTableStatus", + "description": "Online Table data synchronization status", + "required": false + }, + "table_serving_url": { + "name": "table_serving_url", + "type": "string", + "description": "Data serving REST API URL for this table", + "required": false + }, + "unity_catalog_provisioning_state": { + "name": "unity_catalog_provisioning_state", + "type": "ProvisioningInfoState", + "description": "The provisioning state of the online table entity in Unity Catalog. This\nis distinct from the state of the data synchronization pipeline (i.e. the\ntable may be in \"ACTIVE\" but the pipeline may be in \"PROVISIONING\" as it\nruns asynchronously).", + "required": false + } + } + }, + "catalog.OnlineTableSpec": { + "name": "OnlineTableSpec", + "package": "catalog", + "description": "Specification of an online table.", + "fields": { + "perform_full_copy": { + "name": "perform_full_copy", + "type": "bool", + "description": "Whether to create a full-copy pipeline -- a pipeline that stops after\ncreates a full copy of the source table upon initialization and does not\nprocess any change data feeds (CDFs) afterwards. The pipeline can still\nbe manually triggered afterwards, but it always perform a full copy of\nthe source table and there are no incremental updates. This mode is\nuseful for syncing views or tables without CDFs to online tables. Note\nthat the full-copy pipeline only supports \"triggered\" scheduling policy.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "ID of the associated pipeline. Generated by the server - cannot be set by\nthe caller.", + "required": false + }, + "primary_key_columns": { + "name": "primary_key_columns", + "type": "[]string", + "description": "Primary Key columns to be used for data insert/update in the destination.", + "required": false + }, + "run_continuously": { + "name": "run_continuously", + "type": "*OnlineTableSpecContinuousSchedulingPolicy", + "description": "Pipeline runs continuously after generating the initial data.", + "required": false + }, + "run_triggered": { + "name": "run_triggered", + "type": "*OnlineTableSpecTriggeredSchedulingPolicy", + "description": "Pipeline stops after generating the initial data and can be triggered\nlater (manually, through a cron job or through data triggers)", + "required": false + }, + "source_table_full_name": { + "name": "source_table_full_name", + "type": "string", + "description": "Three-part (catalog, schema, table) name of the source Delta table.", + "required": false + }, + "timeseries_key": { + "name": "timeseries_key", + "type": "string", + "description": "Time series key to deduplicate (tie-break) rows with the same primary\nkey.", + "required": false + } + } + }, + "catalog.OnlineTableStatus": { + "name": "OnlineTableStatus", + "package": "catalog", + "description": "Status of an online table.", + "fields": { + "continuous_update_status": { + "name": "continuous_update_status", + "type": "*ContinuousUpdateStatus", + "description": "", + "required": false + }, + "detailed_state": { + "name": "detailed_state", + "type": "OnlineTableState", + "description": "The state of the online table.", + "required": false + }, + "failed_status": { + "name": "failed_status", + "type": "*FailedStatus", + "description": "", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "A text description of the current state of the online table.", + "required": false + }, + "provisioning_status": { + "name": "provisioning_status", + "type": "*ProvisioningStatus", + "description": "", + "required": false + }, + "triggered_update_status": { + "name": "triggered_update_status", + "type": "*TriggeredUpdateStatus", + "description": "", + "required": false + } + } + }, + "catalog.OptionSpec": { + "name": "OptionSpec", + "package": "catalog", + "description": "Spec of an allowed option on a securable kind and its attributes. This is\nmostly used by UI to provide user friendly hints and descriptions in order to\nfacilitate the securable creation process.", + "fields": { + "allowed_values": { + "name": "allowed_values", + "type": "[]string", + "description": "For drop down / radio button selections, UI will want to know the\npossible input values, it can also be used by other option types to limit\ninput selections.", + "required": false + }, + "default_value": { + "name": "default_value", + "type": "string", + "description": "The default value of the option, for example, value '443' for 'port'\noption.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "A concise user facing description of what the input value of this option\nshould look like.", + "required": false + }, + "hint": { + "name": "hint", + "type": "string", + "description": "The hint is used on the UI to suggest what the input value can possibly\nbe like, for example: example.com for 'host' option. Unlike default\nvalue, it will not be applied automatically without user input.", + "required": false + }, + "is_copiable": { + "name": "is_copiable", + "type": "bool", + "description": "Indicates whether an option should be displayed with copy button on the\nUI.", + "required": false + }, + "is_creatable": { + "name": "is_creatable", + "type": "bool", + "description": "Indicates whether an option can be provided by users in the create/update\npath of an entity.", + "required": false + }, + "is_hidden": { + "name": "is_hidden", + "type": "bool", + "description": "Is the option value not user settable and is thus not shown on the UI.", + "required": false + }, + "is_loggable": { + "name": "is_loggable", + "type": "bool", + "description": "Specifies whether this option is safe to log, i.e. no sensitive\ninformation.", + "required": false + }, + "is_required": { + "name": "is_required", + "type": "bool", + "description": "Is the option required.", + "required": false + }, + "is_secret": { + "name": "is_secret", + "type": "bool", + "description": "Is the option value considered secret and thus redacted on the UI.", + "required": false + }, + "is_updatable": { + "name": "is_updatable", + "type": "bool", + "description": "Is the option updatable by users.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The unique name of the option.", + "required": false + }, + "oauth_stage": { + "name": "oauth_stage", + "type": "OptionSpecOauthStage", + "description": "Specifies when the option value is displayed on the UI within the OAuth\nflow.", + "required": false + }, + "type": { + "name": "type", + "type": "OptionSpecOptionType", + "description": "The type of the option.", + "required": false + } + } + }, + "catalog.PermissionsChange": { + "name": "PermissionsChange", + "package": "catalog", + "description": "", + "fields": { + "add": { + "name": "add", + "type": "[]Privilege", + "description": "The set of privileges to add.", + "required": false + }, + "principal": { + "name": "principal", + "type": "string", + "description": "The principal whose privileges we are changing. Only one of principal or\nprincipal_id should be specified, never both at the same time.", + "required": false + }, + "remove": { + "name": "remove", + "type": "[]Privilege", + "description": "The set of privileges to remove.", + "required": false + } + } + }, + "catalog.PipelineProgress": { + "name": "PipelineProgress", + "package": "catalog", + "description": "Progress information of the Online Table data synchronization pipeline.", + "fields": { + "estimated_completion_time_seconds": { + "name": "estimated_completion_time_seconds", + "type": "float64", + "description": "The estimated time remaining to complete this update in seconds.", + "required": false + }, + "latest_version_currently_processing": { + "name": "latest_version_currently_processing", + "type": "int64", + "description": "The source table Delta version that was last processed by the pipeline.\nThe pipeline may not have completely processed this version yet.", + "required": false + }, + "sync_progress_completion": { + "name": "sync_progress_completion", + "type": "float64", + "description": "The completion ratio of this update. This is a number between 0 and 1.", + "required": false + }, + "synced_row_count": { + "name": "synced_row_count", + "type": "int64", + "description": "The number of rows that have been synced in this update.", + "required": false + }, + "total_row_count": { + "name": "total_row_count", + "type": "int64", + "description": "The total number of rows that need to be synced in this update. This\nnumber may be an estimate.", + "required": false + } + } + }, + "catalog.PolicyInfo": { + "name": "PolicyInfo", + "package": "catalog", + "description": "", + "fields": { + "column_mask": { + "name": "column_mask", + "type": "*ColumnMaskOptions", + "description": "Options for column mask policies. Valid only if `policy_type` is\n`POLICY_TYPE_COLUMN_MASK`. Required on create and optional on update.\nWhen specified on update, the new options will replace the existing\noptions as a whole.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "Optional description of the policy.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which the policy was created, in epoch milliseconds. Output only.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of the user who created the policy. Output only.", + "required": false + }, + "except_principals": { + "name": "except_principals", + "type": "[]string", + "description": "Optional list of user or group names that should be excluded from the\npolicy.", + "required": false + }, + "for_securable_type": { + "name": "for_securable_type", + "type": "SecurableType", + "description": "Type of securables that the policy should take effect on. Only `TABLE` is\nsupported at this moment. Required on create and optional on update.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the policy. This field is output only and is\ngenerated by the system.", + "required": false + }, + "match_columns": { + "name": "match_columns", + "type": "[]MatchColumn", + "description": "Optional list of condition expressions used to match table columns. Only\nvalid when `for_securable_type` is `TABLE`. When specified, the policy\nonly applies to tables whose columns satisfy all match conditions.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the policy. Required on create and optional on update. To rename\nthe policy, set `name` to a different value on update.", + "required": false + }, + "on_securable_fullname": { + "name": "on_securable_fullname", + "type": "string", + "description": "Full name of the securable on which the policy is defined. Required on\ncreate and ignored on update.", + "required": false + }, + "on_securable_type": { + "name": "on_securable_type", + "type": "SecurableType", + "description": "Type of the securable on which the policy is defined. Only `CATALOG`,\n`SCHEMA` and `TABLE` are supported at this moment. Required on create and\nignored on update.", + "required": false + }, + "policy_type": { + "name": "policy_type", + "type": "PolicyType", + "description": "Type of the policy. Required on create and ignored on update.", + "required": false + }, + "row_filter": { + "name": "row_filter", + "type": "*RowFilterOptions", + "description": "Options for row filter policies. Valid only if `policy_type` is\n`POLICY_TYPE_ROW_FILTER`. Required on create and optional on update. When\nspecified on update, the new options will replace the existing options as\na whole.", + "required": false + }, + "to_principals": { + "name": "to_principals", + "type": "[]string", + "description": "List of user or group names that the policy applies to. Required on\ncreate and optional on update.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which the policy was last modified, in epoch milliseconds. Output\nonly.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of the user who last modified the policy. Output only.", + "required": false + }, + "when_condition": { + "name": "when_condition", + "type": "string", + "description": "Optional condition when the policy should take effect.", + "required": false + } + } + }, + "catalog.PrimaryKeyConstraint": { + "name": "PrimaryKeyConstraint", + "package": "catalog", + "description": "", + "fields": { + "child_columns": { + "name": "child_columns", + "type": "[]string", + "description": "Column names for this constraint.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the constraint.", + "required": false + }, + "rely": { + "name": "rely", + "type": "bool", + "description": "True if the constraint is RELY, false or unset if NORELY.", + "required": false + }, + "timeseries_columns": { + "name": "timeseries_columns", + "type": "[]string", + "description": "Column names that represent a timeseries.", + "required": false + } + } + }, + "catalog.Principal": { + "name": "Principal", + "package": "catalog", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "Databricks user, group or service principal ID.", + "required": false + }, + "principal_type": { + "name": "principal_type", + "type": "PrincipalType", + "description": "", + "required": false + } + } + }, + "catalog.PrivilegeAssignment": { + "name": "PrivilegeAssignment", + "package": "catalog", + "description": "", + "fields": { + "principal": { + "name": "principal", + "type": "string", + "description": "The principal (user email address or group name). For deleted principals,\n`principal` is empty while `principal_id` is populated.", + "required": false + }, + "privileges": { + "name": "privileges", + "type": "[]Privilege", + "description": "The privileges assigned to the principal.", + "required": false + } + } + }, + "catalog.ProvisioningInfo": { + "name": "ProvisioningInfo", + "package": "catalog", + "description": "Status of an asynchronously provisioned resource.", + "fields": { + "state": { + "name": "state", + "type": "ProvisioningInfoState", + "description": "The provisioning state of the resource.", + "required": false + } + } + }, + "catalog.ProvisioningStatus": { + "name": "ProvisioningStatus", + "package": "catalog", + "description": "Detailed status of an online table. Shown if the online table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "fields": { + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "*PipelineProgress", + "description": "Details about initial data synchronization. Only populated when in the\nPROVISIONING_INITIAL_SNAPSHOT state.", + "required": false + } + } + }, + "catalog.QuotaInfo": { + "name": "QuotaInfo", + "package": "catalog", + "description": "", + "fields": { + "last_refreshed_at": { + "name": "last_refreshed_at", + "type": "int64", + "description": "The timestamp that indicates when the quota count was last updated.", + "required": false + }, + "parent_full_name": { + "name": "parent_full_name", + "type": "string", + "description": "Name of the parent resource. Returns metastore ID if the parent is a\nmetastore.", + "required": false + }, + "parent_securable_type": { + "name": "parent_securable_type", + "type": "SecurableType", + "description": "The quota parent securable type.", + "required": false + }, + "quota_count": { + "name": "quota_count", + "type": "int", + "description": "The current usage of the resource quota.", + "required": false + }, + "quota_limit": { + "name": "quota_limit", + "type": "int", + "description": "The current limit of the resource quota.", + "required": false + }, + "quota_name": { + "name": "quota_name", + "type": "string", + "description": "The name of the quota.", + "required": false + } + } + }, + "catalog.R2Credentials": { + "name": "R2Credentials", + "package": "catalog", + "description": "R2 temporary credentials for API authentication. Read more at\nhttps://developers.cloudflare.com/r2/api/s3/tokens/.", + "fields": { + "access_key_id": { + "name": "access_key_id", + "type": "string", + "description": "The access key ID that identifies the temporary credentials.", + "required": false + }, + "secret_access_key": { + "name": "secret_access_key", + "type": "string", + "description": "The secret access key associated with the access key.", + "required": false + }, + "session_token": { + "name": "session_token", + "type": "string", + "description": "The generated JWT that users must pass to use the temporary credentials.", + "required": false + } + } + }, + "catalog.RegenerateDashboardRequest": { + "name": "RegenerateDashboardRequest", + "package": "catalog", + "description": "", + "fields": { + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional argument to specify the warehouse for dashboard regeneration. If\nnot specified, the first running warehouse will be used.", + "required": false + } + } + }, + "catalog.RegenerateDashboardResponse": { + "name": "RegenerateDashboardResponse", + "package": "catalog", + "description": "", + "fields": { + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "", + "required": false + }, + "parent_folder": { + "name": "parent_folder", + "type": "string", + "description": "Parent folder is equivalent to {assets_dir}/{tableName}", + "required": false + } + } + }, + "catalog.RegisteredModelAlias": { + "name": "RegisteredModelAlias", + "package": "catalog", + "description": "", + "fields": { + "alias_name": { + "name": "alias_name", + "type": "string", + "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog containing the model version", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the alias", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the parent registered model of the model version, relative to\nparent schema", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema containing the model version, relative to parent\ncatalog", + "required": false + }, + "version_num": { + "name": "version_num", + "type": "int", + "description": "Integer version number of the model version to which this alias points.", + "required": false + } + } + }, + "catalog.RegisteredModelInfo": { + "name": "RegisteredModelInfo", + "package": "catalog", + "description": "", + "fields": { + "aliases": { + "name": "aliases", + "type": "[]RegisteredModelAlias", + "description": "List of aliases associated with the registered model", + "required": false + }, + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog where the schema and the registered model reside", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the registered model", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Creation timestamp of the registered model in milliseconds since the Unix\nepoch", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the registered model", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The three-level (fully qualified) name of the registered model", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the registered model", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The identifier of the user who owns the registered model", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema where the registered model resides", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud under which model version data files\nare stored", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Last-update timestamp of the registered model in milliseconds since the\nUnix epoch", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The identifier of the user who updated the registered model last time", + "required": false + } + } + }, + "catalog.RowFilterOptions": { + "name": "RowFilterOptions", + "package": "catalog", + "description": "", + "fields": { + "function_name": { + "name": "function_name", + "type": "string", + "description": "The fully qualified name of the row filter function. The function is\ncalled on each row of the target table. It should return a boolean value\nindicating whether the row should be visible to the user. Required on\ncreate and update.", + "required": false + }, + "using": { + "name": "using", + "type": "[]FunctionArgument", + "description": "Optional list of column aliases or constant literals to be passed as\narguments to the row filter function. The type of each column should\nmatch the positional argument of the row filter function.", + "required": false + } + } + }, + "catalog.SchemaInfo": { + "name": "SchemaInfo", + "package": "catalog", + "description": "Next ID: 44", + "fields": { + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent catalog.", + "required": false + }, + "catalog_type": { + "name": "catalog_type", + "type": "CatalogType", + "description": "The type of the parent catalog.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this schema was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of schema creator.", + "required": false + }, + "effective_predictive_optimization_flag": { + "name": "effective_predictive_optimization_flag", + "type": "*EffectivePredictiveOptimizationFlag", + "description": "", + "required": false + }, + "enable_predictive_optimization": { + "name": "enable_predictive_optimization", + "type": "EnablePredictiveOptimization", + "description": "Whether predictive optimization should be enabled for this object and\nobjects under it.", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "Full name of schema, in form of __catalog_name__.__schema_name__.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of schema, relative to parent catalog.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of schema.", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + }, + "schema_id": { + "name": "schema_id", + "type": "string", + "description": "The unique identifier of the schema.", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "Storage location for managed tables within schema.", + "required": false + }, + "storage_root": { + "name": "storage_root", + "type": "string", + "description": "Storage root URL for managed tables within schema.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this schema was created, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified schema.", + "required": false + } + } + }, + "catalog.Securable": { + "name": "Securable", + "package": "catalog", + "description": "Generic definition of a securable, which is uniquely defined in a metastore\nby its type and full name.", + "fields": { + "full_name": { + "name": "full_name", + "type": "string", + "description": "Required. The full name of the catalog/schema/table. Optional if\nresource_name is present.", + "required": false + }, + "provider_share": { + "name": "provider_share", + "type": "string", + "description": "Optional. The name of the Share object that contains the securable when\nthe securable is getting shared in D2D Delta Sharing.", + "required": false + }, + "type": { + "name": "type", + "type": "SecurableType", + "description": "Required. The type of securable (catalog/schema/table). Optional if\nresource_name is present.", + "required": false + } + } + }, + "catalog.SecurableKindManifest": { + "name": "SecurableKindManifest", + "package": "catalog", + "description": "Manifest of a specific securable kind.", + "fields": { + "assignable_privileges": { + "name": "assignable_privileges", + "type": "[]string", + "description": "Privileges that can be assigned to the securable.", + "required": false + }, + "capabilities": { + "name": "capabilities", + "type": "[]string", + "description": "A list of capabilities in the securable kind.", + "required": false + }, + "options": { + "name": "options", + "type": "[]OptionSpec", + "description": "Detailed specs of allowed options.", + "required": false + }, + "securable_kind": { + "name": "securable_kind", + "type": "SecurableKind", + "description": "Securable kind to get manifest of.", + "required": false + }, + "securable_type": { + "name": "securable_type", + "type": "SecurableType", + "description": "Securable Type of the kind.", + "required": false + } + } + }, + "catalog.SecurablePermissions": { + "name": "SecurablePermissions", + "package": "catalog", + "description": "", + "fields": { + "permissions": { + "name": "permissions", + "type": "[]string", + "description": "List of requested Unity Catalog permissions.", + "required": false + }, + "securable": { + "name": "securable", + "type": "*Securable", + "description": "The securable for which the access request destinations are being\nrequested.", + "required": false + } + } + }, + "catalog.SetArtifactAllowlist": { + "name": "SetArtifactAllowlist", + "package": "catalog", + "description": "", + "fields": { + "artifact_matchers": { + "name": "artifact_matchers", + "type": "[]ArtifactMatcher", + "description": "A list of allowed artifact match patterns.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this artifact allowlist was set, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of the user who set the artifact allowlist.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", + "required": false + } + } + }, + "catalog.SetRegisteredModelAliasRequest": { + "name": "SetRegisteredModelAliasRequest", + "package": "catalog", + "description": "", + "fields": { + "version_num": { + "name": "version_num", + "type": "int", + "description": "The version number of the model version to which the alias points", + "required": false + } + } + }, + "catalog.SseEncryptionDetails": { + "name": "SseEncryptionDetails", + "package": "catalog", + "description": "Server-Side Encryption properties for clients communicating with AWS s3.", + "fields": { + "algorithm": { + "name": "algorithm", + "type": "SseEncryptionDetailsAlgorithm", + "description": "Sets the value of the 'x-amz-server-side-encryption' header in S3\nrequest.", + "required": false + }, + "aws_kms_key_arn": { + "name": "aws_kms_key_arn", + "type": "string", + "description": "Optional. The ARN of the SSE-KMS key used with the S3 location, when\nalgorithm = \"SSE-KMS\". Sets the value of the\n'x-amz-server-side-encryption-aws-kms-key-id' header.", + "required": false + } + } + }, + "catalog.StorageCredentialInfo": { + "name": "StorageCredentialInfo", + "package": "catalog", + "description": "", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleResponse", + "description": "The AWS IAM role configuration.", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityResponse", + "description": "The Azure managed identity configuration.", + "required": false + }, + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this credential was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of credential creator.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountResponse", + "description": "The Databricks managed GCP service account configuration.", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full name of the credential.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the credential.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of the parent metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The credential name. The name must be unique among storage and service\ncredentials within the metastore.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of credential.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this credential was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the credential.", + "required": false + }, + "used_for_managed_storage": { + "name": "used_for_managed_storage", + "type": "bool", + "description": "Whether this credential is the current metastore's root storage\ncredential. Only applicable when purpose is **STORAGE**.", + "required": false + } + } + }, + "catalog.SystemSchemaInfo": { + "name": "SystemSchemaInfo", + "package": "catalog", + "description": "", + "fields": { + "schema": { + "name": "schema", + "type": "string", + "description": "Name of the system schema.", + "required": false + }, + "state": { + "name": "state", + "type": "string", + "description": "The current state of enablement for the system schema. An empty string\nmeans the system schema is available and ready for opt-in. Possible\nvalues: AVAILABLE | ENABLE_INITIALIZED | ENABLE_COMPLETED |\nDISABLE_INITIALIZED | UNAVAILABLE | MANAGED", + "required": false + } + } + }, + "catalog.TableConstraint": { + "name": "TableConstraint", + "package": "catalog", + "description": "A table constraint, as defined by *one* of the following fields being set:\n__primary_key_constraint__, __foreign_key_constraint__,\n__named_table_constraint__.", + "fields": { + "foreign_key_constraint": { + "name": "foreign_key_constraint", + "type": "*ForeignKeyConstraint", + "description": "", + "required": false + }, + "named_table_constraint": { + "name": "named_table_constraint", + "type": "*NamedTableConstraint", + "description": "", + "required": false + }, + "primary_key_constraint": { + "name": "primary_key_constraint", + "type": "*PrimaryKeyConstraint", + "description": "", + "required": false + } + } + }, + "catalog.TableDependency": { + "name": "TableDependency", + "package": "catalog", + "description": "A table that is dependent on a SQL object.", + "fields": { + "table_full_name": { + "name": "table_full_name", + "type": "string", + "description": "Full name of the dependent table, in the form of\n__catalog_name__.__schema_name__.__table_name__.", + "required": false + } + } + }, + "catalog.TableExistsResponse": { + "name": "TableExistsResponse", + "package": "catalog", + "description": "", + "fields": { + "table_exists": { + "name": "table_exists", + "type": "bool", + "description": "Whether the table exists or not.", + "required": false + } + } + }, + "catalog.TableInfo": { + "name": "TableInfo", + "package": "catalog", + "description": "", + "fields": { + "access_point": { + "name": "access_point", + "type": "string", + "description": "The AWS access point to use when accesing s3 for this external location.", + "required": false + }, + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "Name of parent catalog.", + "required": false + }, + "columns": { + "name": "columns", + "type": "[]ColumnInfo", + "description": "The array of __ColumnInfo__ definitions of the table's columns.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Time at which this table was created, in epoch milliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "Username of table creator.", + "required": false + }, + "data_access_configuration_id": { + "name": "data_access_configuration_id", + "type": "string", + "description": "Unique ID of the Data Access Configuration to use with the table data.", + "required": false + }, + "data_source_format": { + "name": "data_source_format", + "type": "DataSourceFormat", + "description": "", + "required": false + }, + "deleted_at": { + "name": "deleted_at", + "type": "int64", + "description": "Time at which this table was deleted, in epoch milliseconds. Field is\nomitted if table is not deleted.", + "required": false + }, + "delta_runtime_properties_kvpairs": { + "name": "delta_runtime_properties_kvpairs", + "type": "*DeltaRuntimePropertiesKvPairs", + "description": "Information pertaining to current state of the delta table.", + "required": false + }, + "effective_predictive_optimization_flag": { + "name": "effective_predictive_optimization_flag", + "type": "*EffectivePredictiveOptimizationFlag", + "description": "", + "required": false + }, + "enable_predictive_optimization": { + "name": "enable_predictive_optimization", + "type": "EnablePredictiveOptimization", + "description": "", + "required": false + }, + "encryption_details": { + "name": "encryption_details", + "type": "*EncryptionDetails", + "description": "", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "Full name of table, in form of\n__catalog_name__.__schema_name__.__table_name__", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "Unique identifier of parent metastore.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of table, relative to parent schema.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of table.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The pipeline ID of the table. Applicable for tables created by pipelines\n(Materialized View, Streaming Table, etc.).", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + }, + "row_filter": { + "name": "row_filter", + "type": "*TableRowFilter", + "description": "", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "Name of parent schema relative to its parent catalog.", + "required": false + }, + "securable_kind_manifest": { + "name": "securable_kind_manifest", + "type": "*SecurableKindManifest", + "description": "SecurableKindManifest of table, including capabilities the table has.", + "required": false + }, + "sql_path": { + "name": "sql_path", + "type": "string", + "description": "List of schemes whose objects can be referenced without qualification.", + "required": false + }, + "storage_credential_name": { + "name": "storage_credential_name", + "type": "string", + "description": "Name of the storage credential, when a storage credential is configured\nfor use with this table.", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "Storage root URL for table (for **MANAGED**, **EXTERNAL** tables).", + "required": false + }, + "table_constraints": { + "name": "table_constraints", + "type": "[]TableConstraint", + "description": "List of table constraints. Note: this field is not set in the output of\nthe __listTables__ API.", + "required": false + }, + "table_id": { + "name": "table_id", + "type": "string", + "description": "The unique identifier of the table.", + "required": false + }, + "table_type": { + "name": "table_type", + "type": "TableType", + "description": "", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Time at which this table was last modified, in epoch milliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "Username of user who last modified the table.", + "required": false + }, + "view_definition": { + "name": "view_definition", + "type": "string", + "description": "View definition SQL (when __table_type__ is **VIEW**,\n**MATERIALIZED_VIEW**, or **STREAMING_TABLE**)", + "required": false + }, + "view_dependencies": { + "name": "view_dependencies", + "type": "*DependencyList", + "description": "View dependencies (when table_type == **VIEW** or **MATERIALIZED_VIEW**,\n**STREAMING_TABLE**) - when DependencyList is None, the dependency is not\nprovided; - when DependencyList is an empty list, the dependency is\nprovided but is empty; - when DependencyList is not an empty list,\ndependencies are provided and recorded. Note: this field is not set in\nthe output of the __listTables__ API.", + "required": false + } + } + }, + "catalog.TableRowFilter": { + "name": "TableRowFilter", + "package": "catalog", + "description": "", + "fields": { + "function_name": { + "name": "function_name", + "type": "string", + "description": "The full name of the row filter SQL UDF.", + "required": false + }, + "input_column_names": { + "name": "input_column_names", + "type": "[]string", + "description": "The list of table columns to be passed as input to the row filter\nfunction. The column types should match the types of the filter function\narguments.", + "required": false + } + } + }, + "catalog.TableSummary": { + "name": "TableSummary", + "package": "catalog", + "description": "", + "fields": { + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full name of the table.", + "required": false + }, + "securable_kind_manifest": { + "name": "securable_kind_manifest", + "type": "*SecurableKindManifest", + "description": "SecurableKindManifest of table, including capabilities the table has.", + "required": false + }, + "table_type": { + "name": "table_type", + "type": "TableType", + "description": "", + "required": false + } + } + }, + "catalog.TagKeyValue": { + "name": "TagKeyValue", + "package": "catalog", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "name of the tag", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "value of the tag associated with the key, could be optional", + "required": false + } + } + }, + "catalog.TemporaryCredentials": { + "name": "TemporaryCredentials", + "package": "catalog", + "description": "", + "fields": { + "aws_temp_credentials": { + "name": "aws_temp_credentials", + "type": "*AwsCredentials", + "description": "", + "required": false + }, + "azure_aad": { + "name": "azure_aad", + "type": "*AzureActiveDirectoryToken", + "description": "", + "required": false + }, + "expiration_time": { + "name": "expiration_time", + "type": "int64", + "description": "Server time when the credential will expire, in epoch milliseconds. The\nAPI client is advised to cache the credential given this expiration time.", + "required": false + }, + "gcp_oauth_token": { + "name": "gcp_oauth_token", + "type": "*GcpOauthToken", + "description": "", + "required": false + } + } + }, + "catalog.TriggeredUpdateStatus": { + "name": "TriggeredUpdateStatus", + "package": "catalog", + "description": "Detailed status of an online table. Shown if the online table is in the\nONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "int64", + "description": "The last source table Delta version that was synced to the online table.\nNote that this Delta version may not be completely synced to the online\ntable yet.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "string", + "description": "The timestamp of the last time any data was synchronized from the source\ntable to the online table.", + "required": false + }, + "triggered_update_progress": { + "name": "triggered_update_progress", + "type": "*PipelineProgress", + "description": "Progress of the active data synchronization pipeline.", + "required": false + } + } + }, + "catalog.UpdateAccessRequestDestinationsRequest": { + "name": "UpdateAccessRequestDestinationsRequest", + "package": "catalog", + "description": "", + "fields": { + "access_request_destinations": { + "name": "access_request_destinations", + "type": "AccessRequestDestinations", + "description": "The access request destinations to assign to the securable. For each\ndestination, a **destination_id** and **destination_type** must be\ndefined.", + "required": false + } + } + }, + "catalog.UpdateAccountsMetastore": { + "name": "UpdateAccountsMetastore", + "package": "catalog", + "description": "", + "fields": { + "delta_sharing_organization_name": { + "name": "delta_sharing_organization_name", + "type": "string", + "description": "The organization name of a Delta Sharing entity, to be used in\nDatabricks-to-Databricks Delta Sharing as the official name.", + "required": false + }, + "delta_sharing_recipient_token_lifetime_in_seconds": { + "name": "delta_sharing_recipient_token_lifetime_in_seconds", + "type": "int64", + "description": "The lifetime of delta sharing recipient token in seconds.", + "required": false + }, + "delta_sharing_scope": { + "name": "delta_sharing_scope", + "type": "DeltaSharingScopeEnum", + "description": "The scope of Delta Sharing enabled for the metastore.", + "required": false + }, + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the metastore.", + "required": false + }, + "privilege_model_version": { + "name": "privilege_model_version", + "type": "string", + "description": "Privilege model version of the metastore, of the form `major.minor`\n(e.g., `1.0`).", + "required": false + }, + "storage_root_credential_id": { + "name": "storage_root_credential_id", + "type": "string", + "description": "UUID of storage credential to access the metastore storage_root.", + "required": false + } + } + }, + "catalog.UpdateAccountsStorageCredential": { + "name": "UpdateAccountsStorageCredential", + "package": "catalog", + "description": "", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleRequest", + "description": "The AWS IAM role configuration.", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityResponse", + "description": "The Azure managed identity configuration.", + "required": false + }, + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountRequest", + "description": "The Databricks managed GCP service account configuration.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of credential.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", + "required": false + } + } + }, + "catalog.UpdateCatalog": { + "name": "UpdateCatalog", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "enable_predictive_optimization": { + "name": "enable_predictive_optimization", + "type": "EnablePredictiveOptimization", + "description": "Whether predictive optimization should be enabled for this object and\nobjects under it.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "CatalogIsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the catalog.", + "required": false + }, + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of catalog.", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + } + } + }, + "catalog.UpdateCatalogWorkspaceBindingsResponse": { + "name": "UpdateCatalogWorkspaceBindingsResponse", + "package": "catalog", + "description": "", + "fields": { + "workspaces": { + "name": "workspaces", + "type": "[]int64", + "description": "A list of workspace IDs", + "required": false + } + } + }, + "catalog.UpdateConnection": { + "name": "UpdateConnection", + "package": "catalog", + "description": "", + "fields": { + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the connection.", + "required": false + }, + "options": { + "name": "options", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of the connection.", + "required": false + } + } + }, + "catalog.UpdateCredentialRequest": { + "name": "UpdateCredentialRequest", + "package": "catalog", + "description": "", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRole", + "description": "The AWS IAM role configuration.", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentity", + "description": "The Azure managed identity configuration.", + "required": false + }, + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccount", + "description": "The Databricks managed GCP service account configuration.", + "required": false + }, + "force": { + "name": "force", + "type": "bool", + "description": "Force an update even if there are dependent services (when purpose is\n**SERVICE**) or dependent external locations and external tables (when\npurpose is **STORAGE**).", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name of credential.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of credential.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", + "required": false + }, + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Supply true to this argument to skip validation of the updated\ncredential.", + "required": false + } + } + }, + "catalog.UpdateEntityTagAssignmentRequest": { + "name": "UpdateEntityTagAssignmentRequest", + "package": "catalog", + "description": "", + "fields": { + "tag_assignment": { + "name": "tag_assignment", + "type": "EntityTagAssignment", + "description": "", + "required": false + } + } + }, + "catalog.UpdateExternalLineageRelationshipRequest": { + "name": "UpdateExternalLineageRelationshipRequest", + "package": "catalog", + "description": "", + "fields": { + "external_lineage_relationship": { + "name": "external_lineage_relationship", + "type": "UpdateRequestExternalLineage", + "description": "", + "required": false + } + } + }, + "catalog.UpdateExternalLocation": { + "name": "UpdateExternalLocation", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "Name of the storage credential used with this location.", + "required": false + }, + "enable_file_events": { + "name": "enable_file_events", + "type": "bool", + "description": "Whether to enable file events on this external location.", + "required": false + }, + "encryption_details": { + "name": "encryption_details", + "type": "*EncryptionDetails", + "description": "", + "required": false + }, + "fallback": { + "name": "fallback", + "type": "bool", + "description": "Indicates whether fallback mode is enabled for this external location.\nWhen fallback mode is enabled, the access to the location falls back to\ncluster credentials if UC credentials are not sufficient.", + "required": false + }, + "file_event_queue": { + "name": "file_event_queue", + "type": "*FileEventQueue", + "description": "File event queue settings. If `enable_file_events` is `true`, must be\ndefined and have exactly one of the documented properties.", + "required": false + }, + "force": { + "name": "force", + "type": "bool", + "description": "Force update even if changing url invalidates dependent external tables\nor mounts.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the external location.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the external location.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Indicates whether the external location is read-only.", + "required": false + }, + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Skips validation of the storage credential associated with the external\nlocation.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "Path URL of the external location.", + "required": false + } + } + }, + "catalog.UpdateExternalMetadataRequest": { + "name": "UpdateExternalMetadataRequest", + "package": "catalog", + "description": "", + "fields": { + "external_metadata": { + "name": "external_metadata", + "type": "ExternalMetadata", + "description": "", + "required": false + } + } + }, + "catalog.UpdateFunction": { + "name": "UpdateFunction", + "package": "catalog", + "description": "", + "fields": { + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of the function.", + "required": false + } + } + }, + "catalog.UpdateMetastore": { + "name": "UpdateMetastore", + "package": "catalog", + "description": "", + "fields": { + "delta_sharing_organization_name": { + "name": "delta_sharing_organization_name", + "type": "string", + "description": "The organization name of a Delta Sharing entity, to be used in\nDatabricks-to-Databricks Delta Sharing as the official name.", + "required": false + }, + "delta_sharing_recipient_token_lifetime_in_seconds": { + "name": "delta_sharing_recipient_token_lifetime_in_seconds", + "type": "int64", + "description": "The lifetime of delta sharing recipient token in seconds.", + "required": false + }, + "delta_sharing_scope": { + "name": "delta_sharing_scope", + "type": "DeltaSharingScopeEnum", + "description": "The scope of Delta Sharing enabled for the metastore.", + "required": false + }, + "external_access_enabled": { + "name": "external_access_enabled", + "type": "bool", + "description": "Whether to allow non-DBR clients to directly access entities under the\nmetastore.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the metastore.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The owner of the metastore.", + "required": false + }, + "privilege_model_version": { + "name": "privilege_model_version", + "type": "string", + "description": "Privilege model version of the metastore, of the form `major.minor`\n(e.g., `1.0`).", + "required": false + }, + "storage_root_credential_id": { + "name": "storage_root_credential_id", + "type": "string", + "description": "UUID of storage credential to access the metastore storage_root.", + "required": false + } + } + }, + "catalog.UpdateMetastoreAssignment": { + "name": "UpdateMetastoreAssignment", + "package": "catalog", + "description": "", + "fields": { + "default_catalog_name": { + "name": "default_catalog_name", + "type": "string", + "description": "The name of the default catalog in the metastore. This field is\ndeprecated. Please use \"Default Namespace API\" to configure the default\ncatalog for a Databricks workspace.", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique ID of the metastore.", + "required": false + } + } + }, + "catalog.UpdateModelVersionRequest": { + "name": "UpdateModelVersionRequest", + "package": "catalog", + "description": "", + "fields": { + "aliases": { + "name": "aliases", + "type": "[]RegisteredModelAlias", + "description": "List of aliases associated with the model version", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog containing the model version", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the model version", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the model version", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The unique identifier of the model version", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore containing the model version", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the parent registered model of the model version, relative to\nparent schema", + "required": false + }, + "model_version_dependencies": { + "name": "model_version_dependencies", + "type": "*DependencyList", + "description": "Model version dependencies, for feature-store packaged models", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "MLflow run ID used when creating the model version, if ``source`` was\ngenerated by an experiment run stored in an MLflow tracking server", + "required": false + }, + "run_workspace_id": { + "name": "run_workspace_id", + "type": "int", + "description": "ID of the Databricks workspace containing the MLflow run that generated\nthis model version, if applicable", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema containing the model version, relative to parent\ncatalog", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "URI indicating the location of the source artifacts (files) for the model\nversion", + "required": false + }, + "status": { + "name": "status", + "type": "ModelVersionInfoStatus", + "description": "Current status of the model version. Newly created model versions start\nin PENDING_REGISTRATION status, then move to READY status once the model\nversion files are uploaded and the model version is finalized. Only model\nversions in READY status can be loaded for inference or served.", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud under which model version data files\nare stored", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The identifier of the user who updated the model version last time", + "required": false + } + } + }, + "catalog.UpdateMonitor": { + "name": "UpdateMonitor", + "package": "catalog", + "description": "", + "fields": { + "baseline_table_name": { + "name": "baseline_table_name", + "type": "string", + "description": "[Create:OPT Update:OPT] Baseline table name. Baseline data is used to\ncompute drift from the data in the monitored `table_name`. The baseline\ntable and the monitored table shall have the same schema.", + "required": false + }, + "custom_metrics": { + "name": "custom_metrics", + "type": "[]MonitorMetric", + "description": "[Create:OPT Update:OPT] Custom metrics.", + "required": false + }, + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "[Create:ERR Update:OPT] Id of dashboard that visualizes the computed\nmetrics. This can be empty if the monitor is in PENDING state.", + "required": false + }, + "data_classification_config": { + "name": "data_classification_config", + "type": "*MonitorDataClassificationConfig", + "description": "[Create:OPT Update:OPT] Data classification related config.", + "required": false + }, + "inference_log": { + "name": "inference_log", + "type": "*MonitorInferenceLog", + "description": "", + "required": false + }, + "latest_monitor_failure_msg": { + "name": "latest_monitor_failure_msg", + "type": "string", + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "*MonitorNotifications", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", + "required": false + }, + "output_schema_name": { + "name": "output_schema_name", + "type": "string", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to\nbe in 2-level format {catalog}.{schema}", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*MonitorCronSchedule", + "description": "[Create:OPT Update:OPT] The monitor schedule.", + "required": false + }, + "slicing_exprs": { + "name": "slicing_exprs", + "type": "[]string", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for\ntargeted analysis. The data is grouped by each expression independently,\nresulting in a separate slice for each predicate and its complements. For\nexample `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the\nfollowing slices: two slices for `col_2 \u003e 10` (True and False), and one\nslice per unique value in `col1`. For high-cardinality columns, only the\ntop 100 unique values by frequency will generate slices.", + "required": false + }, + "snapshot": { + "name": "snapshot", + "type": "*MonitorSnapshot", + "description": "Configuration for monitoring snapshot tables.", + "required": false + }, + "time_series": { + "name": "time_series", + "type": "*MonitorTimeSeries", + "description": "Configuration for monitoring time series tables.", + "required": false + } + } + }, + "catalog.UpdatePermissions": { + "name": "UpdatePermissions", + "package": "catalog", + "description": "", + "fields": { + "changes": { + "name": "changes", + "type": "[]PermissionsChange", + "description": "Array of permissions change objects.", + "required": false + } + } + }, + "catalog.UpdatePermissionsResponse": { + "name": "UpdatePermissionsResponse", + "package": "catalog", + "description": "", + "fields": { + "privilege_assignments": { + "name": "privilege_assignments", + "type": "[]PrivilegeAssignment", + "description": "The privileges assigned to each principal", + "required": false + } + } + }, + "catalog.UpdatePolicyRequest": { + "name": "UpdatePolicyRequest", + "package": "catalog", + "description": "", + "fields": { + "policy_info": { + "name": "policy_info", + "type": "PolicyInfo", + "description": "Optional fields to update. This is the request body for updating a\npolicy. Use `update_mask` field to specify which fields in the request is\nto be updated. - If `update_mask` is empty or \"*\", all specified fields\nwill be updated. - If `update_mask` is specified, only the fields\nspecified in the `update_mask` will be updated. If a field is specified\nin `update_mask` and not set in the request, the field will be cleared.\nUsers can use the update mask to explicitly unset optional fields such as\n...", + "required": false + } + } + }, + "catalog.UpdateRegisteredModelRequest": { + "name": "UpdateRegisteredModelRequest", + "package": "catalog", + "description": "", + "fields": { + "aliases": { + "name": "aliases", + "type": "[]RegisteredModelAlias", + "description": "List of aliases associated with the registered model", + "required": false + }, + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog where the schema and the registered model reside", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the registered model", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Creation timestamp of the registered model in milliseconds since the Unix\nepoch", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the registered model", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the registered model", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the registered model.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The identifier of the user who owns the registered model", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema where the registered model resides", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud under which model version data files\nare stored", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "Last-update timestamp of the registered model in milliseconds since the\nUnix epoch", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The identifier of the user who updated the registered model last time", + "required": false + } + } + }, + "catalog.UpdateRequestExternalLineage": { + "name": "UpdateRequestExternalLineage", + "package": "catalog", + "description": "", + "fields": { + "columns": { + "name": "columns", + "type": "[]ColumnRelationship", + "description": "List of column relationships between source and target objects.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of the external lineage relationship.", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "Key-value properties associated with the external lineage relationship.", + "required": false + }, + "source": { + "name": "source", + "type": "ExternalLineageObject", + "description": "Source object of the external lineage relationship.", + "required": false + }, + "target": { + "name": "target", + "type": "ExternalLineageObject", + "description": "Target object of the external lineage relationship.", + "required": false + } + } + }, + "catalog.UpdateSchema": { + "name": "UpdateSchema", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided free-form text description.", + "required": false + }, + "enable_predictive_optimization": { + "name": "enable_predictive_optimization", + "type": "EnablePredictiveOptimization", + "description": "Whether predictive optimization should be enabled for this object and\nobjects under it.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the schema.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of schema.", + "required": false + }, + "properties": { + "name": "properties", + "type": "map[string]string", + "description": "A map of key-value properties attached to the securable.", + "required": false + } + } + }, + "catalog.UpdateStorageCredential": { + "name": "UpdateStorageCredential", + "package": "catalog", + "description": "", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleRequest", + "description": "The AWS IAM role configuration.", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityResponse", + "description": "The Azure managed identity configuration.", + "required": false + }, + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "Comment associated with the credential.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountRequest", + "description": "The Databricks managed GCP service account configuration.", + "required": false + }, + "force": { + "name": "force", + "type": "bool", + "description": "Force update even if there are dependent external locations or external\ntables.", + "required": false + }, + "isolation_mode": { + "name": "isolation_mode", + "type": "IsolationMode", + "description": "Whether the current securable is accessible from all workspaces or a\nspecific set of workspaces.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the storage credential.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of credential.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is usable only for read operations. Only\napplicable when purpose is **STORAGE**.", + "required": false + }, + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "Supplying true to this argument skips validation of the updated\ncredential.", + "required": false + } + } + }, + "catalog.UpdateTableRequest": { + "name": "UpdateTableRequest", + "package": "catalog", + "description": "", + "fields": { + "owner": { + "name": "owner", + "type": "string", + "description": "Username of current owner of table.", + "required": false + } + } + }, + "catalog.UpdateVolumeRequestContent": { + "name": "UpdateVolumeRequestContent", + "package": "catalog", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the volume", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "New name for the volume.", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The identifier of the user who owns the volume", + "required": false + } + } + }, + "catalog.UpdateWorkspaceBindings": { + "name": "UpdateWorkspaceBindings", + "package": "catalog", + "description": "", + "fields": { + "assign_workspaces": { + "name": "assign_workspaces", + "type": "[]int64", + "description": "A list of workspace IDs.", + "required": false + }, + "unassign_workspaces": { + "name": "unassign_workspaces", + "type": "[]int64", + "description": "A list of workspace IDs.", + "required": false + } + } + }, + "catalog.UpdateWorkspaceBindingsParameters": { + "name": "UpdateWorkspaceBindingsParameters", + "package": "catalog", + "description": "", + "fields": { + "add": { + "name": "add", + "type": "[]WorkspaceBinding", + "description": "List of workspace bindings to add. If a binding for the workspace already\nexists with a different binding_type, adding it again with a new\nbinding_type will update the existing binding (e.g., from READ_WRITE to\nREAD_ONLY).", + "required": false + }, + "remove": { + "name": "remove", + "type": "[]WorkspaceBinding", + "description": "List of workspace bindings to remove.", + "required": false + } + } + }, + "catalog.UpdateWorkspaceBindingsResponse": { + "name": "UpdateWorkspaceBindingsResponse", + "package": "catalog", + "description": "A list of workspace IDs that are bound to the securable", + "fields": { + "bindings": { + "name": "bindings", + "type": "[]WorkspaceBinding", + "description": "List of workspace bindings.", + "required": false + } + } + }, + "catalog.ValidateCredentialRequest": { + "name": "ValidateCredentialRequest", + "package": "catalog", + "description": "Next ID: 17", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRole", + "description": "", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentity", + "description": "", + "required": false + }, + "credential_name": { + "name": "credential_name", + "type": "string", + "description": "Required. The name of an existing credential or long-lived cloud\ncredential to validate.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccount", + "description": "", + "required": false + }, + "external_location_name": { + "name": "external_location_name", + "type": "string", + "description": "The name of an existing external location to validate. Only applicable\nfor storage credentials (purpose is **STORAGE**.)", + "required": false + }, + "purpose": { + "name": "purpose", + "type": "CredentialPurpose", + "description": "The purpose of the credential. This should only be used when the\ncredential is specified.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the credential is only usable for read operations. Only\napplicable for storage credentials (purpose is **STORAGE**.)", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "The external location url to validate. Only applicable when purpose is\n**STORAGE**.", + "required": false + } + } + }, + "catalog.ValidateCredentialResponse": { + "name": "ValidateCredentialResponse", + "package": "catalog", + "description": "", + "fields": { + "isDir": { + "name": "isDir", + "type": "bool", + "description": "Whether the tested location is a directory in cloud storage. Only\napplicable for when purpose is **STORAGE**.", + "required": false + }, + "results": { + "name": "results", + "type": "[]CredentialValidationResult", + "description": "The results of the validation check.", + "required": false + } + } + }, + "catalog.ValidateStorageCredential": { + "name": "ValidateStorageCredential", + "package": "catalog", + "description": "", + "fields": { + "aws_iam_role": { + "name": "aws_iam_role", + "type": "*AwsIamRoleRequest", + "description": "The AWS IAM role configuration.", + "required": false + }, + "azure_managed_identity": { + "name": "azure_managed_identity", + "type": "*AzureManagedIdentityRequest", + "description": "The Azure managed identity configuration.", + "required": false + }, + "azure_service_principal": { + "name": "azure_service_principal", + "type": "*AzureServicePrincipal", + "description": "The Azure service principal configuration.", + "required": false + }, + "cloudflare_api_token": { + "name": "cloudflare_api_token", + "type": "*CloudflareApiToken", + "description": "The Cloudflare API token configuration.", + "required": false + }, + "databricks_gcp_service_account": { + "name": "databricks_gcp_service_account", + "type": "*DatabricksGcpServiceAccountRequest", + "description": "The Databricks created GCP service account configuration.", + "required": false + }, + "external_location_name": { + "name": "external_location_name", + "type": "string", + "description": "The name of an existing external location to validate.", + "required": false + }, + "read_only": { + "name": "read_only", + "type": "bool", + "description": "Whether the storage credential is only usable for read operations.", + "required": false + }, + "storage_credential_name": { + "name": "storage_credential_name", + "type": "string", + "description": "Required. The name of an existing credential or long-lived cloud\ncredential to validate.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "The external location url to validate.", + "required": false + } + } + }, + "catalog.ValidateStorageCredentialResponse": { + "name": "ValidateStorageCredentialResponse", + "package": "catalog", + "description": "", + "fields": { + "isDir": { + "name": "isDir", + "type": "bool", + "description": "Whether the tested location is a directory in cloud storage.", + "required": false + }, + "results": { + "name": "results", + "type": "[]ValidationResult", + "description": "The results of the validation check.", + "required": false + } + } + }, + "catalog.ValidationResult": { + "name": "ValidationResult", + "package": "catalog", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "string", + "description": "Error message would exist when the result does not equal to **PASS**.", + "required": false + }, + "operation": { + "name": "operation", + "type": "ValidationResultOperation", + "description": "The operation tested.", + "required": false + }, + "result": { + "name": "result", + "type": "ValidationResultResult", + "description": "The results of the tested operation.", + "required": false + } + } + }, + "catalog.VolumeInfo": { + "name": "VolumeInfo", + "package": "catalog", + "description": "", + "fields": { + "access_point": { + "name": "access_point", + "type": "string", + "description": "The AWS access point to use when accesing s3 for this external location.", + "required": false + }, + "browse_only": { + "name": "browse_only", + "type": "bool", + "description": "Indicates whether the principal is limited to retrieving metadata for the\nassociated object through the BROWSE privilege when include_browse is\nenabled in the request.", + "required": false + }, + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog where the schema and the volume are", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "The comment attached to the volume", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "int64", + "description": "", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The identifier of the user who created the volume", + "required": false + }, + "encryption_details": { + "name": "encryption_details", + "type": "*EncryptionDetails", + "description": "", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The three-level (fully qualified) name of the volume", + "required": false + }, + "metastore_id": { + "name": "metastore_id", + "type": "string", + "description": "The unique identifier of the metastore", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the volume", + "required": false + }, + "owner": { + "name": "owner", + "type": "string", + "description": "The identifier of the user who owns the volume", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema where the volume is", + "required": false + }, + "storage_location": { + "name": "storage_location", + "type": "string", + "description": "The storage location on the cloud", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int64", + "description": "", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The identifier of the user who updated the volume last time", + "required": false + }, + "volume_id": { + "name": "volume_id", + "type": "string", + "description": "The unique identifier of the volume", + "required": false + }, + "volume_type": { + "name": "volume_type", + "type": "VolumeType", + "description": "The type of the volume. An external volume is located in the specified\nexternal location. A managed volume is located in the default location\nwhich is specified by the parent schema, or the parent catalog, or the\nMetastore. [Learn more]\n\n[Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external", + "required": false + } + } + }, + "catalog.WaitGetOnlineTableActive": { + "name": "WaitGetOnlineTableActive", + "package": "catalog", + "description": "WaitGetOnlineTableActive is a wrapper that calls [OnlineTablesAPI.WaitGetOnlineTableActive] and waits to reach ACTIVE state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*OnlineTable)) (*OnlineTable, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*OnlineTable)", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "catalog.WorkspaceBinding": { + "name": "WorkspaceBinding", + "package": "catalog", + "description": "", + "fields": { + "binding_type": { + "name": "binding_type", + "type": "WorkspaceBindingBindingType", + "description": "One of READ_WRITE/READ_ONLY. Default is READ_WRITE.", + "required": false + }, + "workspace_id": { + "name": "workspace_id", + "type": "int64", + "description": "Required", + "required": false + } + } + }, + "catalog.accountMetastoreAssignmentsImpl": { + "name": "accountMetastoreAssignmentsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just AccountMetastoreAssignments API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.accountMetastoresImpl": { + "name": "accountMetastoresImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just AccountMetastores API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.accountStorageCredentialsImpl": { + "name": "accountStorageCredentialsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just AccountStorageCredentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.artifactAllowlistsImpl": { + "name": "artifactAllowlistsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ArtifactAllowlists API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.catalogsImpl": { + "name": "catalogsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Catalogs API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.connectionsImpl": { + "name": "connectionsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Connections API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.credentialsImpl": { + "name": "credentialsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Credentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.entityTagAssignmentsImpl": { + "name": "entityTagAssignmentsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just EntityTagAssignments API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.externalLineageImpl": { + "name": "externalLineageImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ExternalLineage API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.externalLocationsImpl": { + "name": "externalLocationsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ExternalLocations API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.externalMetadataImpl": { + "name": "externalMetadataImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ExternalMetadata API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.functionsImpl": { + "name": "functionsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Functions API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.grantsImpl": { + "name": "grantsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Grants API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.metastoresImpl": { + "name": "metastoresImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Metastores API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.modelVersionsImpl": { + "name": "modelVersionsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ModelVersions API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.onlineTablesImpl": { + "name": "onlineTablesImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just OnlineTables API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.policiesImpl": { + "name": "policiesImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Policies API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.qualityMonitorsImpl": { + "name": "qualityMonitorsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just QualityMonitors API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.registeredModelsImpl": { + "name": "registeredModelsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just RegisteredModels API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.resourceQuotasImpl": { + "name": "resourceQuotasImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just ResourceQuotas API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.rfaImpl": { + "name": "rfaImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Rfa API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.schemasImpl": { + "name": "schemasImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Schemas API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.storageCredentialsImpl": { + "name": "storageCredentialsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just StorageCredentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.systemSchemasImpl": { + "name": "systemSchemasImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just SystemSchemas API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.tableConstraintsImpl": { + "name": "tableConstraintsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just TableConstraints API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.tablesImpl": { + "name": "tablesImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Tables API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.temporaryPathCredentialsImpl": { + "name": "temporaryPathCredentialsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just TemporaryPathCredentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.temporaryTableCredentialsImpl": { + "name": "temporaryTableCredentialsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just TemporaryTableCredentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.volumesImpl": { + "name": "volumesImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just Volumes API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "catalog.workspaceBindingsImpl": { + "name": "workspaceBindingsImpl", + "package": "catalog", + "description": "unexported type that holds implementations of just WorkspaceBindings API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.AddInstanceProfile": { + "name": "AddInstanceProfile", + "package": "compute", + "description": "", + "fields": { + "iam_role_arn": { + "name": "iam_role_arn", + "type": "string", + "description": "The AWS IAM role ARN of the role associated with the instance profile.\nThis field is required if your role name and instance profile name do not\nmatch and you want to use the instance profile with [Databricks SQL\nServerless].\n\nOtherwise, this field is optional.\n\n[Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "The AWS ARN of the instance profile to register with Databricks. This\nfield is required.", + "required": false + }, + "is_meta_instance_profile": { + "name": "is_meta_instance_profile", + "type": "bool", + "description": "Boolean flag indicating whether the instance profile should only be used\nin credential passthrough scenarios. If true, it means the instance\nprofile contains an meta IAM role which could assume a wide range of\nroles. Therefore it should always be used with authorization. This field\nis optional, the default value is `false`.", + "required": false + }, + "skip_validation": { + "name": "skip_validation", + "type": "bool", + "description": "By default, Databricks validates that it has sufficient permissions to\nlaunch instances with the instance profile. This validation uses AWS\ndry-run mode for the RunInstances API. If validation fails with an error\nmessage that does not indicate an IAM related permission issue, (e.g.\n“Your requested instance type is not supported in your requested\navailability zone”), you can pass this flag to skip the validation and\nforcibly add the instance profile.", + "required": false + } + } + }, + "compute.Adlsgen2Info": { + "name": "Adlsgen2Info", + "package": "compute", + "description": "A storage location in Adls Gen2", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "abfss destination, e.g.\n`abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`.", + "required": false + } + } + }, + "compute.AutoScale": { + "name": "AutoScale", + "package": "compute", + "description": "", + "fields": { + "max_workers": { + "name": "max_workers", + "type": "int", + "description": "The maximum number of workers to which the cluster can scale up when\noverloaded. Note that `max_workers` must be strictly greater than\n`min_workers`.", + "required": false + }, + "min_workers": { + "name": "min_workers", + "type": "int", + "description": "The minimum number of workers to which the cluster can scale down when\nunderutilized. It is also the initial number of workers the cluster will\nhave after creation.", + "required": false + } + } + }, + "compute.AwsAttributes": { + "name": "AwsAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to Amazon Web Services.", + "fields": { + "availability": { + "name": "availability", + "type": "AwsAvailability", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "required": false + }, + "ebs_volume_count": { + "name": "ebs_volume_count", + "type": "int", + "description": "The number of volumes launched for each instance. Users can choose up to\n10 volumes. This feature is only enabled for supported node types. Legacy\nnode types cannot specify custom EBS volumes. For node types with no\ninstance store, at least one EBS volume needs to be specified; otherwise,\ncluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance\nstore volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, D...", + "required": false + }, + "ebs_volume_iops": { + "name": "ebs_volume_iops", + "type": "int", + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set,\nthe maximum performance of a gp2 volume with the same volume size will be\nused.", + "required": false + }, + "ebs_volume_size": { + "name": "ebs_volume_size", + "type": "int", + "description": "The size of each EBS volume (in GiB) launched for each instance. For\ngeneral purpose SSD, this value must be within the range 100 - 4096. For\nthroughput optimized HDD, this value must be within the range 500 - 4096.", + "required": false + }, + "ebs_volume_throughput": { + "name": "ebs_volume_throughput", + "type": "int", + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not\nset, the maximum performance of a gp2 volume with the same volume size\nwill be used.", + "required": false + }, + "ebs_volume_type": { + "name": "ebs_volume_type", + "type": "EbsVolumeType", + "description": "The type of EBS volumes that will be launched with this cluster.", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "int", + "description": "The first `first_on_demand` nodes of the cluster will be placed on\non-demand instances. If this value is greater than 0, the cluster driver\nnode in particular will be placed on an on-demand instance. If this value\nis greater than or equal to the current cluster size, all nodes will be\nplaced on on-demand instances. If this value is less than the current\ncluster size, `first_on_demand` nodes will be placed on on-demand\ninstances and the remainder will be placed on `availability` instances.\nNot...", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "Nodes for this cluster will only be placed on AWS instances with this\ninstance profile. If ommitted, nodes will be placed on instances without\nan IAM instance profile. The instance profile must have previously been\nadded to the Databricks environment by an account administrator.\n\nThis feature may only be available to certain customer plans.", + "required": false + }, + "spot_bid_price_percent": { + "name": "spot_bid_price_percent", + "type": "int", + "description": "The bid price for AWS spot instances, as a percentage of the\ncorresponding instance type's on-demand price. For example, if this field\nis set to 50, and the cluster needs a new `r3.xlarge` spot instance, then\nthe bid price is half of the price of on-demand `r3.xlarge` instances.\nSimilarly, if this field is set to 200, the bid price is twice the price\nof on-demand `r3.xlarge` instances. If not specified, the default value\nis 100. When spot instances are requested for this cluster, only spot\nin...", + "required": false + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone/datacenter in which the cluster\nresides. This string will be of a form like \"us-west-2a\". The provided\navailability zone must be in the same region as the Databricks\ndeployment. For example, \"us-west-2a\" is not a valid zone id if the\nDatabricks deployment resides in the \"us-east-1\" region. This is an\noptional field at cluster creation, and if not specified, the zone \"auto\"\nwill be used. If the zone specified is \"auto\", will try to place cluster\nin a zone w...", + "required": false + } + } + }, + "compute.AwsAvailability": { + "name": "AwsAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "fields": {} + }, + "compute.AzureAttributes": { + "name": "AzureAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to Microsoft Azure.", + "fields": { + "availability": { + "name": "availability", + "type": "AzureAvailability", + "description": "Availability type used for all subsequent nodes past the\n`first_on_demand` ones. Note: If `first_on_demand` is zero, this\navailability type will be used for the entire cluster.", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "int", + "description": "The first `first_on_demand` nodes of the cluster will be placed on\non-demand instances. This value should be greater than 0, to make sure\nthe cluster driver node is placed on an on-demand instance. If this value\nis greater than or equal to the current cluster size, all nodes will be\nplaced on on-demand instances. If this value is less than the current\ncluster size, `first_on_demand` nodes will be placed on on-demand\ninstances and the remainder will be placed on `availability` instances.\nNote ...", + "required": false + }, + "log_analytics_info": { + "name": "log_analytics_info", + "type": "*LogAnalyticsInfo", + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "required": false + }, + "spot_bid_max_price": { + "name": "spot_bid_max_price", + "type": "float64", + "description": "The max bid price to be used for Azure spot instances. The Max price for\nthe bid cannot be higher than the on-demand price of the instance. If not\nspecified, the default value is -1, which specifies that the instance\ncannot be evicted on the basis of price, and only on the basis of\navailability. Further, the value should \u003e 0 or -1.", + "required": false + } + } + }, + "compute.AzureAvailability": { + "name": "AzureAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand` ones.\nNote: If `first_on_demand` is zero, this availability type will be used for the entire cluster.", + "fields": {} + }, + "compute.CancelCommand": { + "name": "CancelCommand", + "package": "compute", + "description": "", + "fields": { + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "", + "required": false + }, + "commandId": { + "name": "commandId", + "type": "string", + "description": "", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.ChangeClusterOwner": { + "name": "ChangeClusterOwner", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + }, + "owner_username": { + "name": "owner_username", + "type": "string", + "description": "New owner of the cluster_id after this RPC.", + "required": false + } + } + }, + "compute.ClientsTypes": { + "name": "ClientsTypes", + "package": "compute", + "description": "", + "fields": { + "jobs": { + "name": "jobs", + "type": "bool", + "description": "With jobs set, the cluster can be used for jobs", + "required": false + }, + "notebooks": { + "name": "notebooks", + "type": "bool", + "description": "With notebooks set, this cluster can be used for notebooks", + "required": false + } + } + }, + "compute.CloneCluster": { + "name": "CloneCluster", + "package": "compute", + "description": "", + "fields": { + "source_cluster_id": { + "name": "source_cluster_id", + "type": "string", + "description": "The cluster that is being cloned.", + "required": false + } + } + }, + "compute.CloudProviderNodeInfo": { + "name": "CloudProviderNodeInfo", + "package": "compute", + "description": "", + "fields": { + "status": { + "name": "status", + "type": "[]CloudProviderNodeStatus", + "description": "Status as reported by the cloud provider", + "required": false + } + } + }, + "compute.ClusterAccessControlRequest": { + "name": "ClusterAccessControlRequest", + "package": "compute", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.ClusterAccessControlResponse": { + "name": "ClusterAccessControlResponse", + "package": "compute", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]ClusterPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.ClusterAttributes": { + "name": "ClusterAttributes", + "package": "compute", + "description": "Common set of attributes set during cluster creation. These attributes cannot\nbe changed over the lifetime of a cluster.", + "fields": { + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "", + "required": false + } + } + }, + "compute.ClusterCompliance": { + "name": "ClusterCompliance", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "Canonical unique identifier for a cluster.", + "required": false + }, + "is_compliant": { + "name": "is_compliant", + "type": "bool", + "description": "Whether this cluster is in compliance with the latest version of its\npolicy.", + "required": false + }, + "violations": { + "name": "violations", + "type": "map[string]string", + "description": "An object containing key-value mappings representing the first 200 policy\nvalidation errors. The keys indicate the path where the policy validation\nerror is occurring. The values indicate an error message describing the\npolicy validation error.", + "required": false + } + } + }, + "compute.ClusterDetails": { + "name": "ClusterDetails", + "package": "compute", + "description": "Describes all of the metadata about a single Spark cluster in Databricks.", + "fields": { + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_cores": { + "name": "cluster_cores", + "type": "float64", + "description": "Number of CPU cores available for this cluster. Note that this can be\nfractional, e.g. 7.5 cores, since certain node types are configured to\nshare cores between Spark nodes on the same instance.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "Canonical identifier for the cluster. This id is retained during cluster\nrestarts and resizes, while each new cluster has a globally unique id.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_log_status": { + "name": "cluster_log_status", + "type": "*LogSyncStatus", + "description": "Cluster log delivery status.", + "required": false + }, + "cluster_memory_mb": { + "name": "cluster_memory_mb", + "type": "int64", + "description": "Total amount of cluster memory, in megabytes", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "cluster_source": { + "name": "cluster_source", + "type": "ClusterSource", + "description": "Determines whether the cluster was created by a user through the UI,\ncreated by the Databricks Jobs Scheduler, or through an API request.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "Creator user name. The field won't be included in the response if the\nuser has already been deleted.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "", + "required": false + }, + "default_tags": { + "name": "default_tags", + "type": "map[string]string", + "description": "Tags that are added by Databricks regardless of any `custom_tags`,\nincluding:\n\n- Vendor: Databricks\n\n- Creator: \u003cusername_of_creator\u003e\n\n- ClusterName: \u003cname_of_cluster\u003e\n\n- ClusterId: \u003cid_of_cluster\u003e\n\n- Name: \u003cDatabricks internal use\u003e", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver": { + "name": "driver", + "type": "*SparkNode", + "description": "Node on which the Spark driver resides. The driver node contains the\nSpark master and the Databricks application that manages the per-notebook\nSpark REPLs.", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "executors": { + "name": "executors", + "type": "[]SparkNode", + "description": "Nodes on which the Spark executors reside.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "jdbc_port": { + "name": "jdbc_port", + "type": "int", + "description": "Port on which Spark JDBC server is listening, in the driver nod. No\nservice will be listeningon on this port in executor nodes.", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "", + "required": false + }, + "last_restarted_time": { + "name": "last_restarted_time", + "type": "int64", + "description": "the timestamp that the cluster was started/restarted", + "required": false + }, + "last_state_loss_time": { + "name": "last_state_loss_time", + "type": "int64", + "description": "Time when the cluster driver last lost its state (due to a restart or\ndriver failure).", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_context_id": { + "name": "spark_context_id", + "type": "int64", + "description": "A canonical SparkContext identifier. This value *does* change when the\nSpark driver restarts. The pair `(cluster_id, spark_context_id)` is a\nglobally unique identifier over all Spark contexts.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "spec": { + "name": "spec", + "type": "*ClusterSpec", + "description": "The spec contains a snapshot of the latest user specified settings that\nwere used to create/edit the cluster. Note: not included in the response\nof the ListClusters API.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "Time (in epoch milliseconds) when the cluster creation request was\nreceived (when the cluster entered a `PENDING` state).", + "required": false + }, + "state": { + "name": "state", + "type": "State", + "description": "Current state of the cluster.", + "required": false + }, + "state_message": { + "name": "state_message", + "type": "string", + "description": "A message associated with the most recent state transition (e.g., the\nreason why the cluster entered a `TERMINATED` state).", + "required": false + }, + "terminated_time": { + "name": "terminated_time", + "type": "int64", + "description": "Time (in epoch milliseconds) when the cluster was terminated, if\napplicable.", + "required": false + }, + "termination_reason": { + "name": "termination_reason", + "type": "*TerminationReason", + "description": "Information about why the cluster was terminated. This field only appears\nwhen the cluster is in a `TERMINATING` or `TERMINATED` state.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "", + "required": false + } + } + }, + "compute.ClusterEvent": { + "name": "ClusterEvent", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + }, + "data_plane_event_details": { + "name": "data_plane_event_details", + "type": "*DataPlaneEventDetails", + "description": "", + "required": false + }, + "details": { + "name": "details", + "type": "*EventDetails", + "description": "", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "int64", + "description": "The timestamp when the event occurred, stored as the number of\nmilliseconds since the Unix epoch. If not provided, this will be assigned\nby the Timeline service.", + "required": false + }, + "type": { + "name": "type", + "type": "EventType", + "description": "", + "required": false + } + } + }, + "compute.ClusterLibraryStatuses": { + "name": "ClusterLibraryStatuses", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "Unique identifier for the cluster.", + "required": false + }, + "library_statuses": { + "name": "library_statuses", + "type": "[]LibraryFullStatus", + "description": "Status of all libraries on the cluster.", + "required": false + } + } + }, + "compute.ClusterLogConf": { + "name": "ClusterLogConf", + "package": "compute", + "description": "Cluster log delivery config", + "fields": { + "dbfs": { + "name": "dbfs", + "type": "*DbfsStorageInfo", + "description": "destination needs to be provided. e.g. `{ \"dbfs\" : { \"destination\" :\n\"dbfs:/home/cluster_log\" } }`", + "required": false + }, + "s3": { + "name": "s3", + "type": "*S3StorageInfo", + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" :\n\"us-west-2\" } }` Cluster iam role is used to access s3, please make sure\nthe cluster iam role in `instance_profile_arn` has permission to write\ndata to the s3 destination.", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "*VolumesStorageInfo", + "description": "destination needs to be provided, e.g. `{ \"volumes\": { \"destination\":\n\"/Volumes/catalog/schema/volume/cluster_log\" } }`", + "required": false + } + } + }, + "compute.ClusterPermission": { + "name": "ClusterPermission", + "package": "compute", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.ClusterPermissions": { + "name": "ClusterPermissions", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ClusterAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.ClusterPermissionsDescription": { + "name": "ClusterPermissionsDescription", + "package": "compute", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.ClusterPermissionsRequest": { + "name": "ClusterPermissionsRequest", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ClusterAccessControlRequest", + "description": "", + "required": false + } + } + }, + "compute.ClusterPolicyAccessControlRequest": { + "name": "ClusterPolicyAccessControlRequest", + "package": "compute", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPolicyPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.ClusterPolicyAccessControlResponse": { + "name": "ClusterPolicyAccessControlResponse", + "package": "compute", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]ClusterPolicyPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.ClusterPolicyPermission": { + "name": "ClusterPolicyPermission", + "package": "compute", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPolicyPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.ClusterPolicyPermissions": { + "name": "ClusterPolicyPermissions", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ClusterPolicyAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.ClusterPolicyPermissionsDescription": { + "name": "ClusterPolicyPermissionsDescription", + "package": "compute", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ClusterPolicyPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.ClusterPolicyPermissionsRequest": { + "name": "ClusterPolicyPermissionsRequest", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ClusterPolicyAccessControlRequest", + "description": "", + "required": false + } + } + }, + "compute.ClusterSettingsChange": { + "name": "ClusterSettingsChange", + "package": "compute", + "description": "Represents a change to the cluster settings required for the cluster to\nbecome compliant with its policy.", + "fields": { + "field": { + "name": "field", + "type": "string", + "description": "The field where this change would be made.", + "required": false + }, + "new_value": { + "name": "new_value", + "type": "string", + "description": "The new value of this field after enforcing policy compliance (either a\nnumber, a boolean, or a string) converted to a string. This is intended\nto be read by a human. The typed new value of this field can be retrieved\nby reading the settings field in the API response.", + "required": false + }, + "previous_value": { + "name": "previous_value", + "type": "string", + "description": "The previous value of this field before enforcing policy compliance\n(either a number, a boolean, or a string) converted to a string. This is\nintended to be read by a human. The type of the field can be retrieved by\nreading the settings field in the API response.", + "required": false + } + } + }, + "compute.ClusterSize": { + "name": "ClusterSize", + "package": "compute", + "description": "", + "fields": { + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + } + } + }, + "compute.ClusterSpec": { + "name": "ClusterSpec", + "package": "compute", + "description": "Contains a snapshot of the latest user specified settings that were used to create/edit the cluster.", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "bool", + "description": "When set to true, fixed and default values from the policy will be used\nfor fields that are omitted. When set to false, only fixed values from\nthe policy will be applied.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "The kind of compute described by this compute specification.\n\nDepending on `kind`, different validations and default values will be applied.\n\nClusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no specified `kind` do not.\n* [is_single_node](/api/workspace/clusters/create#is_single_node)\n* [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime)\n* [data_security_mode](/api/workspace/clusters/create#data_security_mode) set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`\n\nBy using the [simple form](https://docs.databricks.com/compute/simple-form.html), your clusters are automatically using `kind = CLASSIC_PREVIEW`.", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "Cluster Attributes showing for clusters workload types.", + "required": false + } + } + }, + "compute.Command": { + "name": "Command", + "package": "compute", + "description": "", + "fields": { + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "Running cluster id", + "required": false + }, + "command": { + "name": "command", + "type": "string", + "description": "Executable code", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "Running context id", + "required": false + }, + "language": { + "name": "language", + "type": "Language", + "description": "", + "required": false + } + } + }, + "compute.CommandExecutorV2": { + "name": "CommandExecutorV2", + "package": "compute", + "description": "", + "fields": { + "clusterID": { + "name": "clusterID", + "type": "string", + "description": "", + "required": false + }, + "clustersAPI": { + "name": "clustersAPI", + "type": "*ClustersAPI", + "description": "", + "required": false + }, + "contextID": { + "name": "contextID", + "type": "string", + "description": "", + "required": false + }, + "executionAPI": { + "name": "executionAPI", + "type": "*CommandExecutionAPI", + "description": "", + "required": false + }, + "language": { + "name": "language", + "type": "Language", + "description": "", + "required": false + } + } + }, + "compute.CommandStatusResponse": { + "name": "CommandStatusResponse", + "package": "compute", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "results": { + "name": "results", + "type": "*Results", + "description": "", + "required": false + }, + "status": { + "name": "status", + "type": "CommandStatus", + "description": "", + "required": false + } + } + }, + "compute.CommandsHighLevelAPI": { + "name": "CommandsHighLevelAPI", + "package": "compute", + "description": "CommandsHighLevelAPI exposes more friendly wrapper over command execution", + "fields": { + "clusters": { + "name": "clusters", + "type": "*ClustersAPI", + "description": "", + "required": false + }, + "execution": { + "name": "execution", + "type": "*CommandExecutionAPI", + "description": "", + "required": false + } + } + }, + "compute.ContextStatusResponse": { + "name": "ContextStatusResponse", + "package": "compute", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "status": { + "name": "status", + "type": "ContextStatus", + "description": "", + "required": false + } + } + }, + "compute.CreateCluster": { + "name": "CreateCluster", + "package": "compute", + "description": "", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "bool", + "description": "When set to true, fixed and default values from the policy will be used\nfor fields that are omitted. When set to false, only fixed values from\nthe policy will be applied.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "clone_from": { + "name": "clone_from", + "type": "*CloneCluster", + "description": "When specified, this clones libraries from a source cluster during the\ncreation of a new cluster.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "", + "required": false + } + } + }, + "compute.CreateClusterResponse": { + "name": "CreateClusterResponse", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.CreateContext": { + "name": "CreateContext", + "package": "compute", + "description": "", + "fields": { + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "Running cluster id", + "required": false + }, + "language": { + "name": "language", + "type": "Language", + "description": "", + "required": false + } + } + }, + "compute.CreateInstancePool": { + "name": "CreateInstancePool", + "package": "compute", + "description": "", + "fields": { + "aws_attributes": { + "name": "aws_attributes", + "type": "*InstancePoolAwsAttributes", + "description": "Attributes related to instance pools running on Amazon Web Services. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*InstancePoolAzureAttributes", + "description": "Attributes related to instance pools running on Azure. If not specified\nat pool creation, a set of default values will be used.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for pool resources. Databricks will tag all pool\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags", + "required": false + }, + "disk_spec": { + "name": "disk_spec", + "type": "*DiskSpec", + "description": "Defines the specification of the disks that will be attached to all spark\ncontainers.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this instances in this pool will\ndynamically acquire additional disk space when its Spark workers are\nrunning low on disk space. In AWS, this feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more\ndetails.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*InstancePoolGcpAttributes", + "description": "Attributes related to instance pools running on Google Cloud Platform. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "idle_instance_autotermination_minutes": { + "name": "idle_instance_autotermination_minutes", + "type": "int", + "description": "Automatically terminates the extra instances in the pool cache after they\nare inactive for this time in minutes if min_idle_instances requirement\nis already met. If not set, the extra pool instances will be\nautomatically terminated after a default timeout. If specified, the\nthreshold must be between 0 and 10000 minutes. Users can also set this\nvalue to 0 to instantly remove idle instances from the cache if min cache\nsize could still hold.", + "required": false + }, + "instance_pool_name": { + "name": "instance_pool_name", + "type": "string", + "description": "Pool name requested by the user. Pool name must be unique. Length must be\nbetween 1 and 100 characters.", + "required": false + }, + "max_capacity": { + "name": "max_capacity", + "type": "int", + "description": "Maximum number of outstanding instances to keep in the pool, including\nboth instances used by clusters and idle instances. Clusters that require\nfurther instance provisioning will fail during upsize requests.", + "required": false + }, + "min_idle_instances": { + "name": "min_idle_instances", + "type": "int", + "description": "Minimum number of idle instances to keep in the instance pool", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "preloaded_docker_images": { + "name": "preloaded_docker_images", + "type": "[]DockerImage", + "description": "Custom Docker Image BYOC", + "required": false + }, + "preloaded_spark_versions": { + "name": "preloaded_spark_versions", + "type": "[]string", + "description": "A list containing at most one preloaded Spark image version for the pool.\nPool-backed clusters started with the preloaded Spark version will start\nfaster. A list of available Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + } + } + }, + "compute.CreateInstancePoolResponse": { + "name": "CreateInstancePoolResponse", + "package": "compute", + "description": "", + "fields": { + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The ID of the created instance pool.", + "required": false + } + } + }, + "compute.CreatePolicy": { + "name": "CreatePolicy", + "package": "compute", + "description": "", + "fields": { + "definition": { + "name": "definition", + "type": "string", + "description": "Policy definition document expressed in [Databricks Cluster Policy\nDefinition Language].\n\n[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Additional human-readable description of the cluster policy.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]Library", + "description": "A list of libraries to be installed on the next cluster restart that uses\nthis policy. The maximum number of libraries is 500.", + "required": false + }, + "max_clusters_per_user": { + "name": "max_clusters_per_user", + "type": "int64", + "description": "Max number of clusters per user that can be active using this policy. If\nnot present, there is no max limit.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Cluster Policy name requested by the user. This has to be unique. Length\nmust be between 1 and 100 characters.", + "required": false + }, + "policy_family_definition_overrides": { + "name": "policy_family_definition_overrides", + "type": "string", + "description": "Policy definition JSON document expressed in [Databricks Policy\nDefinition Language]. The JSON document must be passed as a string and\ncannot be embedded in the requests.\n\nYou can use this to customize the policy definition inherited from the\npolicy family. Policy rules specified here are merged into the inherited\npolicy definition.\n\n[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "policy_family_id": { + "name": "policy_family_id", + "type": "string", + "description": "ID of the policy family. The cluster policy's policy definition inherits\nthe policy family's policy definition.\n\nCannot be used with `definition`. Use\n`policy_family_definition_overrides` instead to customize the policy\ndefinition.", + "required": false + } + } + }, + "compute.CreatePolicyResponse": { + "name": "CreatePolicyResponse", + "package": "compute", + "description": "", + "fields": { + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "Canonical unique identifier for the cluster policy.", + "required": false + } + } + }, + "compute.CreateResponse": { + "name": "CreateResponse", + "package": "compute", + "description": "", + "fields": { + "script_id": { + "name": "script_id", + "type": "string", + "description": "The global init script ID.", + "required": false + } + } + }, + "compute.Created": { + "name": "Created", + "package": "compute", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.CustomPolicyTag": { + "name": "CustomPolicyTag", + "package": "compute", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The key of the tag. - Must be unique among all custom tags of the same\npolicy - Cannot be “budget-policy-name”, “budget-policy-id” or\n\"budget-policy-resolution-result\" - these tags are preserved.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value of the tag.", + "required": false + } + } + }, + "compute.DataPlaneEventDetails": { + "name": "DataPlaneEventDetails", + "package": "compute", + "description": "", + "fields": { + "event_type": { + "name": "event_type", + "type": "DataPlaneEventDetailsEventType", + "description": "", + "required": false + }, + "executor_failures": { + "name": "executor_failures", + "type": "int", + "description": "", + "required": false + }, + "host_id": { + "name": "host_id", + "type": "string", + "description": "", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "int64", + "description": "", + "required": false + } + } + }, + "compute.DataSecurityMode": { + "name": "DataSecurityMode", + "package": "compute", + "description": "Data security mode decides what data governance model to use when accessing data\nfrom a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`.\n* `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.\n* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.\n* `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`.\n* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.\n* `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.\n* `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.\n\nThe following modes are deprecated starting with Databricks Runtime 15.0 and\nwill be removed for future Databricks Runtime versions:\n\n* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters.\n* `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters.\n* `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters.\n* `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.", + "fields": {} + }, + "compute.DbfsStorageInfo": { + "name": "DbfsStorageInfo", + "package": "compute", + "description": "A storage location in DBFS", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "dbfs destination, e.g. `dbfs:/my/path`", + "required": false + } + } + }, + "compute.DeleteCluster": { + "name": "DeleteCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The cluster to be terminated.", + "required": false + } + } + }, + "compute.DeleteInstancePool": { + "name": "DeleteInstancePool", + "package": "compute", + "description": "", + "fields": { + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The instance pool to be terminated.", + "required": false + } + } + }, + "compute.DeletePolicy": { + "name": "DeletePolicy", + "package": "compute", + "description": "", + "fields": { + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the policy to delete.", + "required": false + } + } + }, + "compute.DestroyContext": { + "name": "DestroyContext", + "package": "compute", + "description": "", + "fields": { + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.DiskSpec": { + "name": "DiskSpec", + "package": "compute", + "description": "Describes the disks that are launched for each instance in the spark cluster.\nFor example, if the cluster has 3 instances, each instance is configured to\nlaunch 2 disks, 100 GiB each, then Databricks will launch a total of 6 disks,\n100 GiB each, for this cluster.", + "fields": { + "disk_count": { + "name": "disk_count", + "type": "int", + "description": "The number of disks launched for each instance: - This feature is only\nenabled for supported node types. - Users can choose up to the limit of\nthe disks supported by the node type. - For node types with no OS disk,\nat least one disk must be specified; otherwise, cluster creation will\nfail.\n\nIf disks are attached, Databricks will configure Spark to use only the\ndisks for scratch storage, because heterogenously sized scratch devices\ncan lead to inefficient disk utilization. If no disks are atta...", + "required": false + }, + "disk_iops": { + "name": "disk_iops", + "type": "int", + "description": "", + "required": false + }, + "disk_size": { + "name": "disk_size", + "type": "int", + "description": "The size of each disk (in GiB) launched for each instance. Values must\nfall into the supported range for a particular instance type.\n\nFor AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized\nHDD: 500 - 4096 GiB\n\nFor Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1-\n1023 GiB", + "required": false + }, + "disk_throughput": { + "name": "disk_throughput", + "type": "int", + "description": "", + "required": false + }, + "disk_type": { + "name": "disk_type", + "type": "*DiskType", + "description": "The type of disks that will be launched with this cluster.", + "required": false + } + } + }, + "compute.DiskType": { + "name": "DiskType", + "package": "compute", + "description": "Describes the disk type.", + "fields": { + "azure_disk_volume_type": { + "name": "azure_disk_volume_type", + "type": "DiskTypeAzureDiskVolumeType", + "description": "", + "required": false + }, + "ebs_volume_type": { + "name": "ebs_volume_type", + "type": "DiskTypeEbsVolumeType", + "description": "", + "required": false + } + } + }, + "compute.DockerBasicAuth": { + "name": "DockerBasicAuth", + "package": "compute", + "description": "", + "fields": { + "password": { + "name": "password", + "type": "string", + "description": "Password of the user", + "required": false + }, + "username": { + "name": "username", + "type": "string", + "description": "Name of the user", + "required": false + } + } + }, + "compute.DockerImage": { + "name": "DockerImage", + "package": "compute", + "description": "", + "fields": { + "basic_auth": { + "name": "basic_auth", + "type": "*DockerBasicAuth", + "description": "Basic auth with username and password", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the docker image.", + "required": false + } + } + }, + "compute.EbsVolumeType": { + "name": "EbsVolumeType", + "package": "compute", + "description": "All EBS volume types that Databricks supports.\nSee https://aws.amazon.com/ebs/details/ for details.", + "fields": {} + }, + "compute.EditCluster": { + "name": "EditCluster", + "package": "compute", + "description": "", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "bool", + "description": "When set to true, fixed and default values from the policy will be used\nfor fields that are omitted. When set to false, only fixed values from\nthe policy will be applied.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "ID of the cluster", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "", + "required": false + } + } + }, + "compute.EditInstancePool": { + "name": "EditInstancePool", + "package": "compute", + "description": "", + "fields": { + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for pool resources. Databricks will tag all pool\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags", + "required": false + }, + "idle_instance_autotermination_minutes": { + "name": "idle_instance_autotermination_minutes", + "type": "int", + "description": "Automatically terminates the extra instances in the pool cache after they\nare inactive for this time in minutes if min_idle_instances requirement\nis already met. If not set, the extra pool instances will be\nautomatically terminated after a default timeout. If specified, the\nthreshold must be between 0 and 10000 minutes. Users can also set this\nvalue to 0 to instantly remove idle instances from the cache if min cache\nsize could still hold.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "Instance pool ID", + "required": false + }, + "instance_pool_name": { + "name": "instance_pool_name", + "type": "string", + "description": "Pool name requested by the user. Pool name must be unique. Length must be\nbetween 1 and 100 characters.", + "required": false + }, + "max_capacity": { + "name": "max_capacity", + "type": "int", + "description": "Maximum number of outstanding instances to keep in the pool, including\nboth instances used by clusters and idle instances. Clusters that require\nfurther instance provisioning will fail during upsize requests.", + "required": false + }, + "min_idle_instances": { + "name": "min_idle_instances", + "type": "int", + "description": "Minimum number of idle instances to keep in the instance pool", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + } + } + }, + "compute.EditPolicy": { + "name": "EditPolicy", + "package": "compute", + "description": "", + "fields": { + "definition": { + "name": "definition", + "type": "string", + "description": "Policy definition document expressed in [Databricks Cluster Policy\nDefinition Language].\n\n[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Additional human-readable description of the cluster policy.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]Library", + "description": "A list of libraries to be installed on the next cluster restart that uses\nthis policy. The maximum number of libraries is 500.", + "required": false + }, + "max_clusters_per_user": { + "name": "max_clusters_per_user", + "type": "int64", + "description": "Max number of clusters per user that can be active using this policy. If\nnot present, there is no max limit.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Cluster Policy name requested by the user. This has to be unique. Length\nmust be between 1 and 100 characters.", + "required": false + }, + "policy_family_definition_overrides": { + "name": "policy_family_definition_overrides", + "type": "string", + "description": "Policy definition JSON document expressed in [Databricks Policy\nDefinition Language]. The JSON document must be passed as a string and\ncannot be embedded in the requests.\n\nYou can use this to customize the policy definition inherited from the\npolicy family. Policy rules specified here are merged into the inherited\npolicy definition.\n\n[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "policy_family_id": { + "name": "policy_family_id", + "type": "string", + "description": "ID of the policy family. The cluster policy's policy definition inherits\nthe policy family's policy definition.\n\nCannot be used with `definition`. Use\n`policy_family_definition_overrides` instead to customize the policy\ndefinition.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the policy to update.", + "required": false + } + } + }, + "compute.EnforceClusterComplianceRequest": { + "name": "EnforceClusterComplianceRequest", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The ID of the cluster you want to enforce policy compliance on.", + "required": false + }, + "validate_only": { + "name": "validate_only", + "type": "bool", + "description": "If set, previews the changes that would be made to a cluster to enforce\ncompliance but does not update the cluster.", + "required": false + } + } + }, + "compute.EnforceClusterComplianceResponse": { + "name": "EnforceClusterComplianceResponse", + "package": "compute", + "description": "", + "fields": { + "changes": { + "name": "changes", + "type": "[]ClusterSettingsChange", + "description": "A list of changes that have been made to the cluster settings for the\ncluster to become compliant with its policy.", + "required": false + }, + "has_changes": { + "name": "has_changes", + "type": "bool", + "description": "Whether any changes have been made to the cluster settings for the\ncluster to become compliant with its policy.", + "required": false + } + } + }, + "compute.Environment": { + "name": "Environment", + "package": "compute", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "fields": { + "client": { + "name": "client", + "type": "string", + "description": "Use `environment_version` instead.", + "required": false + }, + "dependencies": { + "name": "dependencies", + "type": "[]string", + "description": "List of pip dependencies, as supported by the version of pip in this\nenvironment. Each dependency is a valid pip requirements file line per\nhttps://pip.pypa.io/en/stable/reference/requirements-file-format/.\nAllowed dependencies include a requirement specifier, an archive URL, a\nlocal project path (such as WSFS or UC Volumes in Databricks), or a VCS\nproject URL.", + "required": false + }, + "environment_version": { + "name": "environment_version", + "type": "string", + "description": "Required. Environment version used by the environment. Each version comes\nwith a specific Python version and a set of Python packages. The version\nis a string, consisting of an integer.", + "required": false + }, + "java_dependencies": { + "name": "java_dependencies", + "type": "[]string", + "description": "List of java dependencies. Each dependency is a string representing a\njava library path. For example: `/Volumes/path/to/test.jar`.", + "required": false + } + } + }, + "compute.EventDetails": { + "name": "EventDetails", + "package": "compute", + "description": "", + "fields": { + "attributes": { + "name": "attributes", + "type": "*ClusterAttributes", + "description": "* For created clusters, the attributes of the cluster. * For edited\nclusters, the new attributes of the cluster.", + "required": false + }, + "cause": { + "name": "cause", + "type": "EventDetailsCause", + "description": "The cause of a change in target size.", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "*ClusterSize", + "description": "The actual cluster size that was set in the cluster creation or edit.", + "required": false + }, + "current_num_vcpus": { + "name": "current_num_vcpus", + "type": "int", + "description": "The current number of vCPUs in the cluster.", + "required": false + }, + "current_num_workers": { + "name": "current_num_workers", + "type": "int", + "description": "The current number of nodes in the cluster.", + "required": false + }, + "did_not_expand_reason": { + "name": "did_not_expand_reason", + "type": "string", + "description": "", + "required": false + }, + "disk_size": { + "name": "disk_size", + "type": "int64", + "description": "Current disk size in bytes", + "required": false + }, + "driver_state_message": { + "name": "driver_state_message", + "type": "string", + "description": "More details about the change in driver's state", + "required": false + }, + "enable_termination_for_node_blocklisted": { + "name": "enable_termination_for_node_blocklisted", + "type": "bool", + "description": "Whether or not a blocklisted node should be terminated. For\nClusterEventType NODE_BLACKLISTED.", + "required": false + }, + "free_space": { + "name": "free_space", + "type": "int64", + "description": "", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "*InitScriptEventDetails", + "description": "List of global and cluster init scripts associated with this cluster\nevent.", + "required": false + }, + "instance_id": { + "name": "instance_id", + "type": "string", + "description": "Instance Id where the event originated from", + "required": false + }, + "job_run_name": { + "name": "job_run_name", + "type": "string", + "description": "Unique identifier of the specific job run associated with this cluster\nevent * For clusters created for jobs, this will be the same as the\ncluster name", + "required": false + }, + "previous_attributes": { + "name": "previous_attributes", + "type": "*ClusterAttributes", + "description": "The cluster attributes before a cluster was edited.", + "required": false + }, + "previous_cluster_size": { + "name": "previous_cluster_size", + "type": "*ClusterSize", + "description": "The size of the cluster before an edit or resize.", + "required": false + }, + "previous_disk_size": { + "name": "previous_disk_size", + "type": "int64", + "description": "Previous disk size in bytes", + "required": false + }, + "reason": { + "name": "reason", + "type": "*TerminationReason", + "description": "A termination reason: * On a TERMINATED event, this is the reason of the\ntermination. * On a RESIZE_COMPLETE event, this indicates the reason that\nwe failed to acquire some nodes.", + "required": false + }, + "target_num_vcpus": { + "name": "target_num_vcpus", + "type": "int", + "description": "The targeted number of vCPUs in the cluster.", + "required": false + }, + "target_num_workers": { + "name": "target_num_workers", + "type": "int", + "description": "The targeted number of nodes in the cluster.", + "required": false + }, + "user": { + "name": "user", + "type": "string", + "description": "The user that caused the event to occur. (Empty if it was done by the\ncontrol plane.)", + "required": false + } + } + }, + "compute.GcpAttributes": { + "name": "GcpAttributes", + "package": "compute", + "description": "Attributes set during cluster creation which are related to GCP.", + "fields": { + "availability": { + "name": "availability", + "type": "GcpAvailability", + "description": "This field determines whether the spark executors will be scheduled to\nrun on preemptible VMs, on-demand VMs, or preemptible VMs with a fallback\nto on-demand VMs if the former is unavailable.", + "required": false + }, + "boot_disk_size": { + "name": "boot_disk_size", + "type": "int", + "description": "Boot disk size in GB", + "required": false + }, + "first_on_demand": { + "name": "first_on_demand", + "type": "int", + "description": "The first `first_on_demand` nodes of the cluster will be placed on\non-demand instances. This value should be greater than 0, to make sure\nthe cluster driver node is placed on an on-demand instance. If this value\nis greater than or equal to the current cluster size, all nodes will be\nplaced on on-demand instances. If this value is less than the current\ncluster size, `first_on_demand` nodes will be placed on on-demand\ninstances and the remainder will be placed on `availability` instances.\nNote ...", + "required": false + }, + "google_service_account": { + "name": "google_service_account", + "type": "string", + "description": "If provided, the cluster will impersonate the google service account when\naccessing gcloud services (like GCS). The google service account must\nhave previously been added to the Databricks environment by an account\nadministrator.", + "required": false + }, + "local_ssd_count": { + "name": "local_ssd_count", + "type": "int", + "description": "If provided, each node (workers and driver) in the cluster will have this\nnumber of local SSDs attached. Each local SSD is 375GB in size. Refer to\n[GCP documentation] for the supported number of local SSDs for each\ninstance type.\n\n[GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds", + "required": false + }, + "use_preemptible_executors": { + "name": "use_preemptible_executors", + "type": "bool", + "description": "This field determines whether the spark executors will be scheduled to\nrun on preemptible VMs (when set to true) versus standard compute engine\nVMs (when set to false; default). Note: Soon to be deprecated, use the\n'availability' field instead.", + "required": false + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone in which the cluster resides. This\ncan be one of the following: - \"HA\" =\u003e High availability, spread nodes\nacross availability zones for a Databricks deployment region [default]. -\n\"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster\non. - A GCP availability zone =\u003e Pick One of the available zones for\n(machine type + region) from\nhttps://cloud.google.com/compute/docs/regions-zones.", + "required": false + } + } + }, + "compute.GcpAvailability": { + "name": "GcpAvailability", + "package": "compute", + "description": "This field determines whether the instance pool will contain preemptible\nVMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.", + "fields": {} + }, + "compute.GcsStorageInfo": { + "name": "GcsStorageInfo", + "package": "compute", + "description": "A storage location in Google Cloud Platform's GCS", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`", + "required": false + } + } + }, + "compute.GetClusterComplianceResponse": { + "name": "GetClusterComplianceResponse", + "package": "compute", + "description": "", + "fields": { + "is_compliant": { + "name": "is_compliant", + "type": "bool", + "description": "Whether the cluster is compliant with its policy or not. Clusters could\nbe out of compliance if the policy was updated after the cluster was last\nedited.", + "required": false + }, + "violations": { + "name": "violations", + "type": "map[string]string", + "description": "An object containing key-value mappings representing the first 200 policy\nvalidation errors. The keys indicate the path where the policy validation\nerror is occurring. The values indicate an error message describing the\npolicy validation error.", + "required": false + } + } + }, + "compute.GetClusterPermissionLevelsResponse": { + "name": "GetClusterPermissionLevelsResponse", + "package": "compute", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]ClusterPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "compute.GetClusterPolicyPermissionLevelsResponse": { + "name": "GetClusterPolicyPermissionLevelsResponse", + "package": "compute", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]ClusterPolicyPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "compute.GetEvents": { + "name": "GetEvents", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The ID of the cluster to retrieve events about.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "The end time in epoch milliseconds. If empty, returns events up to the\ncurrent time.", + "required": false + }, + "event_types": { + "name": "event_types", + "type": "[]EventType", + "description": "An optional set of event types to filter on. If empty, all event types\nare returned.", + "required": false + }, + "limit": { + "name": "limit", + "type": "int64", + "description": "Deprecated: use page_token in combination with page_size instead.\n\nThe maximum number of events to include in a page of events. Defaults to\n50, and maximum allowed value is 500.", + "required": false + }, + "offset": { + "name": "offset", + "type": "int64", + "description": "Deprecated: use page_token in combination with page_size instead.\n\nThe offset in the result set. Defaults to 0 (no offset). When an offset\nis specified and the results are requested in descending order, the\nend_time field is required.", + "required": false + }, + "order": { + "name": "order", + "type": "GetEventsOrder", + "description": "The order to list events in; either \"ASC\" or \"DESC\". Defaults to \"DESC\".", + "required": false + }, + "page_size": { + "name": "page_size", + "type": "int", + "description": "The maximum number of events to include in a page of events. The server\nmay further constrain the maximum number of results returned in a single\npage. If the page_size is empty or 0, the server will decide the number\nof results to be returned. The field has to be in the range [0,500]. If\nthe value is outside the range, the server enforces 0 or 500.", + "required": false + }, + "page_token": { + "name": "page_token", + "type": "string", + "description": "Use next_page_token or prev_page_token returned from the previous request\nto list the next or previous page of events respectively. If page_token\nis empty, the first page is returned.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The start time in epoch milliseconds. If empty, returns events starting\nfrom the beginning of time.", + "required": false + } + } + }, + "compute.GetEventsResponse": { + "name": "GetEventsResponse", + "package": "compute", + "description": "", + "fields": { + "events": { + "name": "events", + "type": "[]ClusterEvent", + "description": "", + "required": false + }, + "next_page": { + "name": "next_page", + "type": "*GetEvents", + "description": "Deprecated: use next_page_token or prev_page_token instead.\n\nThe parameters required to retrieve the next page of events. Omitted if\nthere are no more events to read.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the next page of\nresults. If the value is \"\", it means no further results for the request.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the previous page\nof results. If the value is \"\", it means no further results for the\nrequest.", + "required": false + }, + "total_count": { + "name": "total_count", + "type": "int64", + "description": "Deprecated: Returns 0 when request uses page_token. Will start returning\nzero when request uses offset/limit soon.\n\nThe total number of events filtered by the start_time, end_time, and\nevent_types.", + "required": false + } + } + }, + "compute.GetInstancePool": { + "name": "GetInstancePool", + "package": "compute", + "description": "", + "fields": { + "aws_attributes": { + "name": "aws_attributes", + "type": "*InstancePoolAwsAttributes", + "description": "Attributes related to instance pools running on Amazon Web Services. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*InstancePoolAzureAttributes", + "description": "Attributes related to instance pools running on Azure. If not specified\nat pool creation, a set of default values will be used.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for pool resources. Databricks will tag all pool\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags", + "required": false + }, + "default_tags": { + "name": "default_tags", + "type": "map[string]string", + "description": "Tags that are added by Databricks regardless of any ``custom_tags``,\nincluding:\n\n- Vendor: Databricks\n\n- InstancePoolCreator: \u003cuser_id_of_creator\u003e\n\n- InstancePoolName: \u003cname_of_pool\u003e\n\n- InstancePoolId: \u003cid_of_pool\u003e", + "required": false + }, + "disk_spec": { + "name": "disk_spec", + "type": "*DiskSpec", + "description": "Defines the specification of the disks that will be attached to all spark\ncontainers.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this instances in this pool will\ndynamically acquire additional disk space when its Spark workers are\nrunning low on disk space. In AWS, this feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more\ndetails.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*InstancePoolGcpAttributes", + "description": "Attributes related to instance pools running on Google Cloud Platform. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "idle_instance_autotermination_minutes": { + "name": "idle_instance_autotermination_minutes", + "type": "int", + "description": "Automatically terminates the extra instances in the pool cache after they\nare inactive for this time in minutes if min_idle_instances requirement\nis already met. If not set, the extra pool instances will be\nautomatically terminated after a default timeout. If specified, the\nthreshold must be between 0 and 10000 minutes. Users can also set this\nvalue to 0 to instantly remove idle instances from the cache if min cache\nsize could still hold.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "Canonical unique identifier for the pool.", + "required": false + }, + "instance_pool_name": { + "name": "instance_pool_name", + "type": "string", + "description": "Pool name requested by the user. Pool name must be unique. Length must be\nbetween 1 and 100 characters.", + "required": false + }, + "max_capacity": { + "name": "max_capacity", + "type": "int", + "description": "Maximum number of outstanding instances to keep in the pool, including\nboth instances used by clusters and idle instances. Clusters that require\nfurther instance provisioning will fail during upsize requests.", + "required": false + }, + "min_idle_instances": { + "name": "min_idle_instances", + "type": "int", + "description": "Minimum number of idle instances to keep in the instance pool", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "preloaded_docker_images": { + "name": "preloaded_docker_images", + "type": "[]DockerImage", + "description": "Custom Docker Image BYOC", + "required": false + }, + "preloaded_spark_versions": { + "name": "preloaded_spark_versions", + "type": "[]string", + "description": "A list containing at most one preloaded Spark image version for the pool.\nPool-backed clusters started with the preloaded Spark version will start\nfaster. A list of available Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + }, + "state": { + "name": "state", + "type": "InstancePoolState", + "description": "Current state of the instance pool.", + "required": false + }, + "stats": { + "name": "stats", + "type": "*InstancePoolStats", + "description": "Usage statistics about the instance pool.", + "required": false + }, + "status": { + "name": "status", + "type": "*InstancePoolStatus", + "description": "Status of failed pending instances in the pool.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + } + } + }, + "compute.GetInstancePoolPermissionLevelsResponse": { + "name": "GetInstancePoolPermissionLevelsResponse", + "package": "compute", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]InstancePoolPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "compute.GetSparkVersionsResponse": { + "name": "GetSparkVersionsResponse", + "package": "compute", + "description": "", + "fields": { + "versions": { + "name": "versions", + "type": "[]SparkVersion", + "description": "All the available Spark versions.", + "required": false + } + } + }, + "compute.GlobalInitScriptCreateRequest": { + "name": "GlobalInitScriptCreateRequest", + "package": "compute", + "description": "", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Specifies whether the script is enabled. The script runs only if enabled.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the script", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "The position of a global init script, where 0 represents the first script\nto run, 1 is the second script to run, in ascending order.\n\nIf you omit the numeric position for a new global init script, it\ndefaults to last position. It will run after all current scripts. Setting\nany value greater than the position of the last script is equivalent to\nthe last position. Example: Take three existing scripts with positions 0,\n1, and 2. Any position of (3) or greater puts the script in the last\nposition...", + "required": false + }, + "script": { + "name": "script", + "type": "string", + "description": "The Base64-encoded content of the script.", + "required": false + } + } + }, + "compute.GlobalInitScriptDetails": { + "name": "GlobalInitScriptDetails", + "package": "compute", + "description": "", + "fields": { + "created_at": { + "name": "created_at", + "type": "int", + "description": "Time when the script was created, represented as a Unix timestamp in\nmilliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The username of the user who created the script.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Specifies whether the script is enabled. The script runs only if enabled.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the script", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "The position of a script, where 0 represents the first script to run, 1\nis the second script to run, in ascending order.", + "required": false + }, + "script_id": { + "name": "script_id", + "type": "string", + "description": "The global init script ID.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int", + "description": "Time when the script was updated, represented as a Unix timestamp in\nmilliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The username of the user who last updated the script", + "required": false + } + } + }, + "compute.GlobalInitScriptDetailsWithContent": { + "name": "GlobalInitScriptDetailsWithContent", + "package": "compute", + "description": "", + "fields": { + "created_at": { + "name": "created_at", + "type": "int", + "description": "Time when the script was created, represented as a Unix timestamp in\nmilliseconds.", + "required": false + }, + "created_by": { + "name": "created_by", + "type": "string", + "description": "The username of the user who created the script.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Specifies whether the script is enabled. The script runs only if enabled.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the script", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "The position of a script, where 0 represents the first script to run, 1\nis the second script to run, in ascending order.", + "required": false + }, + "script": { + "name": "script", + "type": "string", + "description": "The Base64-encoded content of the script.", + "required": false + }, + "script_id": { + "name": "script_id", + "type": "string", + "description": "The global init script ID.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "int", + "description": "Time when the script was updated, represented as a Unix timestamp in\nmilliseconds.", + "required": false + }, + "updated_by": { + "name": "updated_by", + "type": "string", + "description": "The username of the user who last updated the script", + "required": false + } + } + }, + "compute.GlobalInitScriptUpdateRequest": { + "name": "GlobalInitScriptUpdateRequest", + "package": "compute", + "description": "", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Specifies whether the script is enabled. The script runs only if enabled.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the script", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "The position of a script, where 0 represents the first script to run, 1\nis the second script to run, in ascending order. To move the script to\nrun first, set its position to 0.\n\nTo move the script to the end, set its position to any value greater or\nequal to the position of the last script. Example, three existing scripts\nwith positions 0, 1, and 2. Any position value of 2 or greater puts the\nscript in the last position (2).\n\nIf an explicit position value conflicts with an existing script, yo...", + "required": false + }, + "script": { + "name": "script", + "type": "string", + "description": "The Base64-encoded content of the script.", + "required": false + } + } + }, + "compute.InitScriptEventDetails": { + "name": "InitScriptEventDetails", + "package": "compute", + "description": "", + "fields": { + "cluster": { + "name": "cluster", + "type": "[]InitScriptInfoAndExecutionDetails", + "description": "The cluster scoped init scripts associated with this cluster event.", + "required": false + }, + "global": { + "name": "global", + "type": "[]InitScriptInfoAndExecutionDetails", + "description": "The global init scripts associated with this cluster event.", + "required": false + }, + "reported_for_node": { + "name": "reported_for_node", + "type": "string", + "description": "The private ip of the node we are reporting init script execution details\nfor (we will select the execution details from only one node rather than\nreporting the execution details from every node to keep these event\ndetails small)\n\nThis should only be defined for the INIT_SCRIPTS_FINISHED event", + "required": false + } + } + }, + "compute.InitScriptInfo": { + "name": "InitScriptInfo", + "package": "compute", + "description": "Config for an individual init script\nNext ID: 11", + "fields": { + "abfss": { + "name": "abfss", + "type": "*Adlsgen2Info", + "description": "destination needs to be provided, e.g.\n`abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`", + "required": false + }, + "dbfs": { + "name": "dbfs", + "type": "*DbfsStorageInfo", + "description": "destination needs to be provided. e.g. `{ \"dbfs\": { \"destination\" :\n\"dbfs:/home/cluster_log\" } }`", + "required": false + }, + "file": { + "name": "file", + "type": "*LocalFileInfo", + "description": "destination needs to be provided, e.g. `{ \"file\": { \"destination\":\n\"file:/my/local/file.sh\" } }`", + "required": false + }, + "gcs": { + "name": "gcs", + "type": "*GcsStorageInfo", + "description": "destination needs to be provided, e.g. `{ \"gcs\": { \"destination\":\n\"gs://my-bucket/file.sh\" } }`", + "required": false + }, + "s3": { + "name": "s3", + "type": "*S3StorageInfo", + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \\\"s3\\\": { \\\"destination\\\": \\\"s3://cluster_log_bucket/prefix\\\",\n\\\"region\\\": \\\"us-west-2\\\" } }` Cluster iam role is used to access s3,\nplease make sure the cluster iam role in `instance_profile_arn` has\npermission to write data to the s3 destination.", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "*VolumesStorageInfo", + "description": "destination needs to be provided. e.g. `{ \\\"volumes\\\" : { \\\"destination\\\"\n: \\\"/Volumes/my-init.sh\\\" } }`", + "required": false + }, + "workspace": { + "name": "workspace", + "type": "*WorkspaceStorageInfo", + "description": "destination needs to be provided, e.g. `{ \"workspace\": { \"destination\":\n\"/cluster-init-scripts/setup-datadog.sh\" } }`", + "required": false + } + } + }, + "compute.InitScriptInfoAndExecutionDetails": { + "name": "InitScriptInfoAndExecutionDetails", + "package": "compute", + "description": "", + "fields": { + "abfss": { + "name": "abfss", + "type": "*Adlsgen2Info", + "description": "destination needs to be provided, e.g.\n`abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`", + "required": false + }, + "dbfs": { + "name": "dbfs", + "type": "*DbfsStorageInfo", + "description": "destination needs to be provided. e.g. `{ \"dbfs\": { \"destination\" :\n\"dbfs:/home/cluster_log\" } }`", + "required": false + }, + "error_message": { + "name": "error_message", + "type": "string", + "description": "Additional details regarding errors (such as a file not found message if\nthe status is FAILED_FETCH). This field should only be used to provide\n*additional* information to the status field, not duplicate it.", + "required": false + }, + "execution_duration_seconds": { + "name": "execution_duration_seconds", + "type": "int", + "description": "The number duration of the script execution in seconds", + "required": false + }, + "file": { + "name": "file", + "type": "*LocalFileInfo", + "description": "destination needs to be provided, e.g. `{ \"file\": { \"destination\":\n\"file:/my/local/file.sh\" } }`", + "required": false + }, + "gcs": { + "name": "gcs", + "type": "*GcsStorageInfo", + "description": "destination needs to be provided, e.g. `{ \"gcs\": { \"destination\":\n\"gs://my-bucket/file.sh\" } }`", + "required": false + }, + "s3": { + "name": "s3", + "type": "*S3StorageInfo", + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \\\"s3\\\": { \\\"destination\\\": \\\"s3://cluster_log_bucket/prefix\\\",\n\\\"region\\\": \\\"us-west-2\\\" } }` Cluster iam role is used to access s3,\nplease make sure the cluster iam role in `instance_profile_arn` has\npermission to write data to the s3 destination.", + "required": false + }, + "status": { + "name": "status", + "type": "InitScriptExecutionDetailsInitScriptExecutionStatus", + "description": "The current status of the script", + "required": false + }, + "volumes": { + "name": "volumes", + "type": "*VolumesStorageInfo", + "description": "destination needs to be provided. e.g. `{ \\\"volumes\\\" : { \\\"destination\\\"\n: \\\"/Volumes/my-init.sh\\\" } }`", + "required": false + }, + "workspace": { + "name": "workspace", + "type": "*WorkspaceStorageInfo", + "description": "destination needs to be provided, e.g. `{ \"workspace\": { \"destination\":\n\"/cluster-init-scripts/setup-datadog.sh\" } }`", + "required": false + } + } + }, + "compute.InstallLibraries": { + "name": "InstallLibraries", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "Unique identifier for the cluster on which to install these libraries.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]Library", + "description": "The libraries to install.", + "required": false + } + } + }, + "compute.InstancePoolAccessControlRequest": { + "name": "InstancePoolAccessControlRequest", + "package": "compute", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "InstancePoolPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.InstancePoolAccessControlResponse": { + "name": "InstancePoolAccessControlResponse", + "package": "compute", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]InstancePoolPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "compute.InstancePoolAndStats": { + "name": "InstancePoolAndStats", + "package": "compute", + "description": "", + "fields": { + "aws_attributes": { + "name": "aws_attributes", + "type": "*InstancePoolAwsAttributes", + "description": "Attributes related to instance pools running on Amazon Web Services. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*InstancePoolAzureAttributes", + "description": "Attributes related to instance pools running on Azure. If not specified\nat pool creation, a set of default values will be used.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for pool resources. Databricks will tag all pool\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags", + "required": false + }, + "default_tags": { + "name": "default_tags", + "type": "map[string]string", + "description": "Tags that are added by Databricks regardless of any ``custom_tags``,\nincluding:\n\n- Vendor: Databricks\n\n- InstancePoolCreator: \u003cuser_id_of_creator\u003e\n\n- InstancePoolName: \u003cname_of_pool\u003e\n\n- InstancePoolId: \u003cid_of_pool\u003e", + "required": false + }, + "disk_spec": { + "name": "disk_spec", + "type": "*DiskSpec", + "description": "Defines the specification of the disks that will be attached to all spark\ncontainers.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this instances in this pool will\ndynamically acquire additional disk space when its Spark workers are\nrunning low on disk space. In AWS, this feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more\ndetails.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*InstancePoolGcpAttributes", + "description": "Attributes related to instance pools running on Google Cloud Platform. If\nnot specified at pool creation, a set of default values will be used.", + "required": false + }, + "idle_instance_autotermination_minutes": { + "name": "idle_instance_autotermination_minutes", + "type": "int", + "description": "Automatically terminates the extra instances in the pool cache after they\nare inactive for this time in minutes if min_idle_instances requirement\nis already met. If not set, the extra pool instances will be\nautomatically terminated after a default timeout. If specified, the\nthreshold must be between 0 and 10000 minutes. Users can also set this\nvalue to 0 to instantly remove idle instances from the cache if min cache\nsize could still hold.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "Canonical unique identifier for the pool.", + "required": false + }, + "instance_pool_name": { + "name": "instance_pool_name", + "type": "string", + "description": "Pool name requested by the user. Pool name must be unique. Length must be\nbetween 1 and 100 characters.", + "required": false + }, + "max_capacity": { + "name": "max_capacity", + "type": "int", + "description": "Maximum number of outstanding instances to keep in the pool, including\nboth instances used by clusters and idle instances. Clusters that require\nfurther instance provisioning will fail during upsize requests.", + "required": false + }, + "min_idle_instances": { + "name": "min_idle_instances", + "type": "int", + "description": "Minimum number of idle instances to keep in the instance pool", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "preloaded_docker_images": { + "name": "preloaded_docker_images", + "type": "[]DockerImage", + "description": "Custom Docker Image BYOC", + "required": false + }, + "preloaded_spark_versions": { + "name": "preloaded_spark_versions", + "type": "[]string", + "description": "A list containing at most one preloaded Spark image version for the pool.\nPool-backed clusters started with the preloaded Spark version will start\nfaster. A list of available Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + }, + "state": { + "name": "state", + "type": "InstancePoolState", + "description": "Current state of the instance pool.", + "required": false + }, + "stats": { + "name": "stats", + "type": "*InstancePoolStats", + "description": "Usage statistics about the instance pool.", + "required": false + }, + "status": { + "name": "status", + "type": "*InstancePoolStatus", + "description": "Status of failed pending instances in the pool.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED types.", + "required": false + } + } + }, + "compute.InstancePoolAwsAttributes": { + "name": "InstancePoolAwsAttributes", + "package": "compute", + "description": "Attributes set during instance pool creation which are related to Amazon Web\nServices.", + "fields": { + "availability": { + "name": "availability", + "type": "InstancePoolAwsAttributesAvailability", + "description": "Availability type used for the spot nodes.", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "All AWS instances belonging to the instance pool will have this instance\nprofile. If omitted, instances will initially be launched with the\nworkspace's default instance profile. If defined, clusters that use the\npool will inherit the instance profile, and must not specify their own\ninstance profile on cluster creation or update. If the pool does not\nspecify an instance profile, clusters using the pool may specify any\ninstance profile. The instance profile must have previously been added to\nth...", + "required": false + }, + "spot_bid_price_percent": { + "name": "spot_bid_price_percent", + "type": "int", + "description": "Calculates the bid price for AWS spot instances, as a percentage of the\ncorresponding instance type's on-demand price. For example, if this field\nis set to 50, and the cluster needs a new `r3.xlarge` spot instance, then\nthe bid price is half of the price of on-demand `r3.xlarge` instances.\nSimilarly, if this field is set to 200, the bid price is twice the price\nof on-demand `r3.xlarge` instances. If not specified, the default value\nis 100. When spot instances are requested for this cluster, o...", + "required": false + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone/datacenter in which the cluster\nresides. This string will be of a form like \"us-west-2a\". The provided\navailability zone must be in the same region as the Databricks\ndeployment. For example, \"us-west-2a\" is not a valid zone id if the\nDatabricks deployment resides in the \"us-east-1\" region. This is an\noptional field at cluster creation, and if not specified, a default zone\nwill be used. The list of available zones as well as the default value\ncan be found b...", + "required": false + } + } + }, + "compute.InstancePoolAzureAttributes": { + "name": "InstancePoolAzureAttributes", + "package": "compute", + "description": "Attributes set during instance pool creation which are related to Azure.", + "fields": { + "availability": { + "name": "availability", + "type": "InstancePoolAzureAttributesAvailability", + "description": "Availability type used for the spot nodes.", + "required": false + }, + "spot_bid_max_price": { + "name": "spot_bid_max_price", + "type": "float64", + "description": "With variable pricing, you have option to set a max price, in US dollars\n(USD) For example, the value 2 would be a max price of $2.00 USD per\nhour. If you set the max price to be -1, the VM won't be evicted based on\nprice. The price for the VM will be the current price for spot or the\nprice for a standard VM, which ever is less, as long as there is capacity\nand quota available.", + "required": false + } + } + }, + "compute.InstancePoolGcpAttributes": { + "name": "InstancePoolGcpAttributes", + "package": "compute", + "description": "Attributes set during instance pool creation which are related to GCP.", + "fields": { + "gcp_availability": { + "name": "gcp_availability", + "type": "GcpAvailability", + "description": "", + "required": false + }, + "local_ssd_count": { + "name": "local_ssd_count", + "type": "int", + "description": "If provided, each node in the instance pool will have this number of\nlocal SSDs attached. Each local SSD is 375GB in size. Refer to [GCP\ndocumentation] for the supported number of local SSDs for each instance\ntype.\n\n[GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds", + "required": false + }, + "zone_id": { + "name": "zone_id", + "type": "string", + "description": "Identifier for the availability zone/datacenter in which the cluster\nresides. This string will be of a form like \"us-west1-a\". The provided\navailability zone must be in the same region as the Databricks workspace.\nFor example, \"us-west1-a\" is not a valid zone id if the Databricks\nworkspace resides in the \"us-east1\" region. This is an optional field at\ninstance pool creation, and if not specified, a default zone will be\nused.\n\nThis field can be one of the following: - \"HA\" =\u003e High availability...", + "required": false + } + } + }, + "compute.InstancePoolPermission": { + "name": "InstancePoolPermission", + "package": "compute", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "InstancePoolPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.InstancePoolPermissions": { + "name": "InstancePoolPermissions", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]InstancePoolAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.InstancePoolPermissionsDescription": { + "name": "InstancePoolPermissionsDescription", + "package": "compute", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "InstancePoolPermissionLevel", + "description": "", + "required": false + } + } + }, + "compute.InstancePoolPermissionsRequest": { + "name": "InstancePoolPermissionsRequest", + "package": "compute", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]InstancePoolAccessControlRequest", + "description": "", + "required": false + } + } + }, + "compute.InstancePoolStats": { + "name": "InstancePoolStats", + "package": "compute", + "description": "", + "fields": { + "idle_count": { + "name": "idle_count", + "type": "int", + "description": "Number of active instances in the pool that are NOT part of a cluster.", + "required": false + }, + "pending_idle_count": { + "name": "pending_idle_count", + "type": "int", + "description": "Number of pending instances in the pool that are NOT part of a cluster.", + "required": false + }, + "pending_used_count": { + "name": "pending_used_count", + "type": "int", + "description": "Number of pending instances in the pool that are part of a cluster.", + "required": false + }, + "used_count": { + "name": "used_count", + "type": "int", + "description": "Number of active instances in the pool that are part of a cluster.", + "required": false + } + } + }, + "compute.InstancePoolStatus": { + "name": "InstancePoolStatus", + "package": "compute", + "description": "", + "fields": { + "pending_instance_errors": { + "name": "pending_instance_errors", + "type": "[]PendingInstanceError", + "description": "List of error messages for the failed pending instances. The\npending_instance_errors follows FIFO with maximum length of the min_idle\nof the pool. The pending_instance_errors is emptied once the number of\nexiting available instances reaches the min_idle of the pool.", + "required": false + } + } + }, + "compute.InstanceProfile": { + "name": "InstanceProfile", + "package": "compute", + "description": "", + "fields": { + "iam_role_arn": { + "name": "iam_role_arn", + "type": "string", + "description": "The AWS IAM role ARN of the role associated with the instance profile.\nThis field is required if your role name and instance profile name do not\nmatch and you want to use the instance profile with [Databricks SQL\nServerless].\n\nOtherwise, this field is optional.\n\n[Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "The AWS ARN of the instance profile to register with Databricks. This\nfield is required.", + "required": false + }, + "is_meta_instance_profile": { + "name": "is_meta_instance_profile", + "type": "bool", + "description": "Boolean flag indicating whether the instance profile should only be used\nin credential passthrough scenarios. If true, it means the instance\nprofile contains an meta IAM role which could assume a wide range of\nroles. Therefore it should always be used with authorization. This field\nis optional, the default value is `false`.", + "required": false + } + } + }, + "compute.Library": { + "name": "Library", + "package": "compute", + "description": "", + "fields": { + "cran": { + "name": "cran", + "type": "*RCranLibrary", + "description": "Specification of a CRAN library to be installed as part of the library", + "required": false + }, + "egg": { + "name": "egg", + "type": "string", + "description": "Deprecated. URI of the egg library to install. Installing Python egg\nfiles is deprecated and is not supported in Databricks Runtime 14.0 and\nabove.", + "required": false + }, + "jar": { + "name": "jar", + "type": "string", + "description": "URI of the JAR library to install. Supported URIs include Workspace\npaths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ \"jar\":\n\"/Workspace/path/to/library.jar\" }`, `{ \"jar\" :\n\"/Volumes/path/to/library.jar\" }` or `{ \"jar\":\n\"s3://my-bucket/library.jar\" }`. If S3 is used, please make sure the\ncluster has read access on the library. You may need to launch the\ncluster with an IAM role to access the S3 URI.", + "required": false + }, + "maven": { + "name": "maven", + "type": "*MavenLibrary", + "description": "Specification of a maven library to be installed. For example: `{\n\"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "required": false + }, + "pypi": { + "name": "pypi", + "type": "*PythonPyPiLibrary", + "description": "Specification of a PyPi library to be installed. For example: `{\n\"package\": \"simplejson\" }`", + "required": false + }, + "requirements": { + "name": "requirements", + "type": "string", + "description": "URI of the requirements.txt file to install. Only Workspace paths and\nUnity Catalog Volumes paths are supported. For example: `{\n\"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{\n\"requirements\" : \"/Volumes/path/to/requirements.txt\" }`", + "required": false + }, + "whl": { + "name": "whl", + "type": "string", + "description": "URI of the wheel library to install. Supported URIs include Workspace\npaths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ \"whl\":\n\"/Workspace/path/to/library.whl\" }`, `{ \"whl\" :\n\"/Volumes/path/to/library.whl\" }` or `{ \"whl\":\n\"s3://my-bucket/library.whl\" }`. If S3 is used, please make sure the\ncluster has read access on the library. You may need to launch the\ncluster with an IAM role to access the S3 URI.", + "required": false + } + } + }, + "compute.LibraryFullStatus": { + "name": "LibraryFullStatus", + "package": "compute", + "description": "The status of the library on a specific cluster.", + "fields": { + "is_library_for_all_clusters": { + "name": "is_library_for_all_clusters", + "type": "bool", + "description": "Whether the library was set to be installed on all clusters via the\nlibraries UI.", + "required": false + }, + "library": { + "name": "library", + "type": "*Library", + "description": "Unique identifier for the library.", + "required": false + }, + "messages": { + "name": "messages", + "type": "[]string", + "description": "All the info and warning messages that have occurred so far for this\nlibrary.", + "required": false + }, + "status": { + "name": "status", + "type": "LibraryInstallStatus", + "description": "Status of installing the library on the cluster.", + "required": false + } + } + }, + "compute.ListAllClusterLibraryStatusesResponse": { + "name": "ListAllClusterLibraryStatusesResponse", + "package": "compute", + "description": "", + "fields": { + "statuses": { + "name": "statuses", + "type": "[]ClusterLibraryStatuses", + "description": "A list of cluster statuses.", + "required": false + } + } + }, + "compute.ListAvailableZonesResponse": { + "name": "ListAvailableZonesResponse", + "package": "compute", + "description": "", + "fields": { + "default_zone": { + "name": "default_zone", + "type": "string", + "description": "The availability zone if no ``zone_id`` is provided in the cluster\ncreation request.", + "required": false + }, + "zones": { + "name": "zones", + "type": "[]string", + "description": "The list of available zones (e.g., ['us-west-2c', 'us-east-2']).", + "required": false + } + } + }, + "compute.ListClusterCompliancesResponse": { + "name": "ListClusterCompliancesResponse", + "package": "compute", + "description": "", + "fields": { + "clusters": { + "name": "clusters", + "type": "[]ClusterCompliance", + "description": "A list of clusters and their policy compliance statuses.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the next page of\nresults. If the value is \"\", it means no further results for the request.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the previous page\nof results. If the value is \"\", it means no further results for the\nrequest.", + "required": false + } + } + }, + "compute.ListClustersFilterBy": { + "name": "ListClustersFilterBy", + "package": "compute", + "description": "", + "fields": { + "cluster_sources": { + "name": "cluster_sources", + "type": "[]ClusterSource", + "description": "The source of cluster creation.", + "required": false + }, + "cluster_states": { + "name": "cluster_states", + "type": "[]State", + "description": "The current state of the clusters.", + "required": false + }, + "is_pinned": { + "name": "is_pinned", + "type": "bool", + "description": "Whether the clusters are pinned or not.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + } + } + }, + "compute.ListClustersResponse": { + "name": "ListClustersResponse", + "package": "compute", + "description": "", + "fields": { + "clusters": { + "name": "clusters", + "type": "[]ClusterDetails", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the next page of\nresults. If the value is \"\", it means no further results for the request.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the previous page\nof results. If the value is \"\", it means no further results for the\nrequest.", + "required": false + } + } + }, + "compute.ListClustersSortBy": { + "name": "ListClustersSortBy", + "package": "compute", + "description": "", + "fields": { + "direction": { + "name": "direction", + "type": "ListClustersSortByDirection", + "description": "The direction to sort by.", + "required": false + }, + "field": { + "name": "field", + "type": "ListClustersSortByField", + "description": "The sorting criteria. By default, clusters are sorted by 3 columns from\nhighest to lowest precedence: cluster state, pinned or unpinned, then\ncluster name.", + "required": false + } + } + }, + "compute.ListGlobalInitScriptsResponse": { + "name": "ListGlobalInitScriptsResponse", + "package": "compute", + "description": "", + "fields": { + "scripts": { + "name": "scripts", + "type": "[]GlobalInitScriptDetails", + "description": "", + "required": false + } + } + }, + "compute.ListInstancePools": { + "name": "ListInstancePools", + "package": "compute", + "description": "", + "fields": { + "instance_pools": { + "name": "instance_pools", + "type": "[]InstancePoolAndStats", + "description": "", + "required": false + } + } + }, + "compute.ListInstanceProfilesResponse": { + "name": "ListInstanceProfilesResponse", + "package": "compute", + "description": "", + "fields": { + "instance_profiles": { + "name": "instance_profiles", + "type": "[]InstanceProfile", + "description": "A list of instance profiles that the user can access.", + "required": false + } + } + }, + "compute.ListNodeTypesResponse": { + "name": "ListNodeTypesResponse", + "package": "compute", + "description": "", + "fields": { + "node_types": { + "name": "node_types", + "type": "[]NodeType", + "description": "The list of available Spark node types.", + "required": false + } + } + }, + "compute.ListPoliciesResponse": { + "name": "ListPoliciesResponse", + "package": "compute", + "description": "", + "fields": { + "policies": { + "name": "policies", + "type": "[]Policy", + "description": "List of policies.", + "required": false + } + } + }, + "compute.ListPolicyFamiliesResponse": { + "name": "ListPolicyFamiliesResponse", + "package": "compute", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to get the next page of results. If not present,\nthere are no more results to show.", + "required": false + }, + "policy_families": { + "name": "policy_families", + "type": "[]PolicyFamily", + "description": "List of policy families.", + "required": false + } + } + }, + "compute.LocalFileInfo": { + "name": "LocalFileInfo", + "package": "compute", + "description": "", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "local file destination, e.g. `file:/my/local/file.sh`", + "required": false + } + } + }, + "compute.LogAnalyticsInfo": { + "name": "LogAnalyticsInfo", + "package": "compute", + "description": "", + "fields": { + "log_analytics_primary_key": { + "name": "log_analytics_primary_key", + "type": "string", + "description": "", + "required": false + }, + "log_analytics_workspace_id": { + "name": "log_analytics_workspace_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.LogSyncStatus": { + "name": "LogSyncStatus", + "package": "compute", + "description": "The log delivery status", + "fields": { + "last_attempted": { + "name": "last_attempted", + "type": "int64", + "description": "The timestamp of last attempt. If the last attempt fails,\n`last_exception` will contain the exception in the last attempt.", + "required": false + }, + "last_exception": { + "name": "last_exception", + "type": "string", + "description": "The exception thrown in the last attempt, it would be null (omitted in\nthe response) if there is no exception in last attempted.", + "required": false + } + } + }, + "compute.MavenLibrary": { + "name": "MavenLibrary", + "package": "compute", + "description": "", + "fields": { + "coordinates": { + "name": "coordinates", + "type": "string", + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\".", + "required": false + }, + "exclusions": { + "name": "exclusions", + "type": "[]string", + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\",\n\"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "required": false + }, + "repo": { + "name": "repo", + "type": "string", + "description": "Maven repo to install the Maven package from. If omitted, both Maven\nCentral Repository and Spark Packages are searched.", + "required": false + } + } + }, + "compute.NodeInstanceType": { + "name": "NodeInstanceType", + "package": "compute", + "description": "This structure embodies the machine type that hosts spark containers Note:\nthis should be an internal data structure for now It is defined in proto in\ncase we want to send it over the wire in the future (which is likely)", + "fields": { + "instance_type_id": { + "name": "instance_type_id", + "type": "string", + "description": "Unique identifier across instance types", + "required": false + }, + "local_disk_size_gb": { + "name": "local_disk_size_gb", + "type": "int", + "description": "Size of the individual local disks attached to this instance (i.e. per\nlocal disk).", + "required": false + }, + "local_disks": { + "name": "local_disks", + "type": "int", + "description": "Number of local disks that are present on this instance.", + "required": false + }, + "local_nvme_disk_size_gb": { + "name": "local_nvme_disk_size_gb", + "type": "int", + "description": "Size of the individual local nvme disks attached to this instance (i.e.\nper local disk).", + "required": false + }, + "local_nvme_disks": { + "name": "local_nvme_disks", + "type": "int", + "description": "Number of local nvme disks that are present on this instance.", + "required": false + } + } + }, + "compute.NodeType": { + "name": "NodeType", + "package": "compute", + "description": "A description of a Spark node type including both the dimensions of the node\nand the instance type on which it will be hosted.", + "fields": { + "category": { + "name": "category", + "type": "string", + "description": "A descriptive category for this node type. Examples include \"Memory\nOptimized\" and \"Compute Optimized\".", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "A string description associated with this node type, e.g., \"r3.xlarge\".", + "required": false + }, + "display_order": { + "name": "display_order", + "type": "int", + "description": "An optional hint at the display order of node types in the UI. Within a\nnode type category, lowest numbers come first.", + "required": false + }, + "instance_type_id": { + "name": "instance_type_id", + "type": "string", + "description": "An identifier for the type of hardware that this node runs on, e.g.,\n\"r3.2xlarge\" in AWS.", + "required": false + }, + "is_deprecated": { + "name": "is_deprecated", + "type": "bool", + "description": "Whether the node type is deprecated. Non-deprecated node types offer\ngreater performance.", + "required": false + }, + "is_encrypted_in_transit": { + "name": "is_encrypted_in_transit", + "type": "bool", + "description": "AWS specific, whether this instance supports encryption in transit, used\nfor hipaa and pci workloads.", + "required": false + }, + "is_graviton": { + "name": "is_graviton", + "type": "bool", + "description": "Whether this is an Arm-based instance.", + "required": false + }, + "is_hidden": { + "name": "is_hidden", + "type": "bool", + "description": "Whether this node is hidden from presentation in the UI.", + "required": false + }, + "is_io_cache_enabled": { + "name": "is_io_cache_enabled", + "type": "bool", + "description": "Whether this node comes with IO cache enabled by default.", + "required": false + }, + "memory_mb": { + "name": "memory_mb", + "type": "int", + "description": "Memory (in MB) available for this node type.", + "required": false + }, + "node_info": { + "name": "node_info", + "type": "*CloudProviderNodeInfo", + "description": "A collection of node type info reported by the cloud provider", + "required": false + }, + "node_instance_type": { + "name": "node_instance_type", + "type": "*NodeInstanceType", + "description": "The NodeInstanceType object corresponding to instance_type_id", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "Unique identifier for this node type.", + "required": false + }, + "num_cores": { + "name": "num_cores", + "type": "float64", + "description": "Number of CPU cores available for this node type. Note that this can be\nfractional, e.g., 2.5 cores, if the the number of cores on a machine\ninstance is not divisible by the number of Spark nodes on that machine.", + "required": false + }, + "num_gpus": { + "name": "num_gpus", + "type": "int", + "description": "Number of GPUs available for this node type.", + "required": false + }, + "photon_driver_capable": { + "name": "photon_driver_capable", + "type": "bool", + "description": "", + "required": false + }, + "photon_worker_capable": { + "name": "photon_worker_capable", + "type": "bool", + "description": "", + "required": false + }, + "support_cluster_tags": { + "name": "support_cluster_tags", + "type": "bool", + "description": "Whether this node type support cluster tags.", + "required": false + }, + "support_ebs_volumes": { + "name": "support_ebs_volumes", + "type": "bool", + "description": "Whether this node type support EBS volumes. EBS volumes is disabled for\nnode types that we could place multiple corresponding containers on the\nsame hosting instance.", + "required": false + }, + "support_port_forwarding": { + "name": "support_port_forwarding", + "type": "bool", + "description": "Whether this node type supports port forwarding.", + "required": false + } + } + }, + "compute.NodeTypeRequest": { + "name": "NodeTypeRequest", + "package": "compute", + "description": "NodeTypeRequest is a wrapper for local filtering of node types", + "fields": { + "category": { + "name": "category", + "type": "string", + "description": "", + "required": false + }, + "fleet": { + "name": "fleet", + "type": "bool", + "description": "", + "required": false + }, + "gb_per_core": { + "name": "gb_per_core", + "type": "int32", + "description": "", + "required": false + }, + "graviton": { + "name": "graviton", + "type": "bool", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "is_io_cache_enabled": { + "name": "is_io_cache_enabled", + "type": "bool", + "description": "", + "required": false + }, + "local_disk": { + "name": "local_disk", + "type": "bool", + "description": "", + "required": false + }, + "local_disk_min_size": { + "name": "local_disk_min_size", + "type": "int32", + "description": "", + "required": false + }, + "min_cores": { + "name": "min_cores", + "type": "int32", + "description": "", + "required": false + }, + "min_gpus": { + "name": "min_gpus", + "type": "int32", + "description": "", + "required": false + }, + "min_memory_gb": { + "name": "min_memory_gb", + "type": "int32", + "description": "", + "required": false + }, + "photon_driver_capable": { + "name": "photon_driver_capable", + "type": "bool", + "description": "", + "required": false + }, + "photon_worker_capable": { + "name": "photon_worker_capable", + "type": "bool", + "description": "", + "required": false + }, + "support_port_forwarding": { + "name": "support_port_forwarding", + "type": "bool", + "description": "", + "required": false + } + } + }, + "compute.PendingInstanceError": { + "name": "PendingInstanceError", + "package": "compute", + "description": "Error message of a failed pending instances", + "fields": { + "instance_id": { + "name": "instance_id", + "type": "string", + "description": "", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.PermanentDeleteCluster": { + "name": "PermanentDeleteCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The cluster to be deleted.", + "required": false + } + } + }, + "compute.PinCluster": { + "name": "PinCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.Policy": { + "name": "Policy", + "package": "compute", + "description": "Describes a Cluster Policy entity.", + "fields": { + "created_at_timestamp": { + "name": "created_at_timestamp", + "type": "int64", + "description": "Creation time. The timestamp (in millisecond) when this Cluster Policy\nwas created.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "Creator user name. The field won't be included in the response if the\nuser has already been deleted.", + "required": false + }, + "definition": { + "name": "definition", + "type": "string", + "description": "Policy definition document expressed in [Databricks Cluster Policy\nDefinition Language].\n\n[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Additional human-readable description of the cluster policy.", + "required": false + }, + "is_default": { + "name": "is_default", + "type": "bool", + "description": "If true, policy is a default policy created and managed by Databricks.\nDefault policies cannot be deleted, and their policy families cannot be\nchanged.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]Library", + "description": "A list of libraries to be installed on the next cluster restart that uses\nthis policy. The maximum number of libraries is 500.", + "required": false + }, + "max_clusters_per_user": { + "name": "max_clusters_per_user", + "type": "int64", + "description": "Max number of clusters per user that can be active using this policy. If\nnot present, there is no max limit.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Cluster Policy name requested by the user. This has to be unique. Length\nmust be between 1 and 100 characters.", + "required": false + }, + "policy_family_definition_overrides": { + "name": "policy_family_definition_overrides", + "type": "string", + "description": "Policy definition JSON document expressed in [Databricks Policy\nDefinition Language]. The JSON document must be passed as a string and\ncannot be embedded in the requests.\n\nYou can use this to customize the policy definition inherited from the\npolicy family. Policy rules specified here are merged into the inherited\npolicy definition.\n\n[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "policy_family_id": { + "name": "policy_family_id", + "type": "string", + "description": "ID of the policy family. The cluster policy's policy definition inherits\nthe policy family's policy definition.\n\nCannot be used with `definition`. Use\n`policy_family_definition_overrides` instead to customize the policy\ndefinition.", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "Canonical unique identifier for the Cluster Policy.", + "required": false + } + } + }, + "compute.PolicyFamily": { + "name": "PolicyFamily", + "package": "compute", + "description": "", + "fields": { + "definition": { + "name": "definition", + "type": "string", + "description": "Policy definition document expressed in [Databricks Cluster Policy\nDefinition Language].\n\n[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Human-readable description of the purpose of the policy family.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the policy family.", + "required": false + }, + "policy_family_id": { + "name": "policy_family_id", + "type": "string", + "description": "Unique identifier for the policy family.", + "required": false + } + } + }, + "compute.PythonPyPiLibrary": { + "name": "PythonPyPiLibrary", + "package": "compute", + "description": "", + "fields": { + "package": { + "name": "package", + "type": "string", + "description": "The name of the pypi package to install. An optional exact version\nspecification is also supported. Examples: \"simplejson\" and\n\"simplejson==3.8.0\".", + "required": false + }, + "repo": { + "name": "repo", + "type": "string", + "description": "The repository where the package can be found. If not specified, the\ndefault pip index is used.", + "required": false + } + } + }, + "compute.RCranLibrary": { + "name": "RCranLibrary", + "package": "compute", + "description": "", + "fields": { + "package": { + "name": "package", + "type": "string", + "description": "The name of the CRAN package to install.", + "required": false + }, + "repo": { + "name": "repo", + "type": "string", + "description": "The repository where the package can be found. If not specified, the\ndefault CRAN repo is used.", + "required": false + } + } + }, + "compute.RemoveInstanceProfile": { + "name": "RemoveInstanceProfile", + "package": "compute", + "description": "", + "fields": { + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "The ARN of the instance profile to remove. This field is required.", + "required": false + } + } + }, + "compute.ResizeCluster": { + "name": "ResizeCluster", + "package": "compute", + "description": "", + "fields": { + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The cluster to be resized.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + } + } + }, + "compute.RestartCluster": { + "name": "RestartCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The cluster to be started.", + "required": false + }, + "restart_user": { + "name": "restart_user", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.Results": { + "name": "Results", + "package": "compute", + "description": "", + "fields": { + "cause": { + "name": "cause", + "type": "string", + "description": "The cause of the error", + "required": false + }, + "data": { + "name": "data", + "type": "any", + "description": "", + "required": false + }, + "fileName": { + "name": "fileName", + "type": "string", + "description": "The image data in one of the following formats:\n\n1. A Data URL with base64-encoded image data:\n`data:image/{type};base64,{base64-data}`. Example:\n`data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA...`\n\n2. A FileStore file path for large images: `/plots/{filename}.png`.\nExample: `/plots/b6a7ad70-fb2c-4353-8aed-3f1e015174a4.png`", + "required": false + }, + "fileNames": { + "name": "fileNames", + "type": "[]string", + "description": "List of image data for multiple images. Each element follows the same\nformat as file_name.", + "required": false + }, + "isJsonSchema": { + "name": "isJsonSchema", + "type": "bool", + "description": "true if a JSON schema is returned instead of a string representation of\nthe Hive type.", + "required": false + }, + "pos": { + "name": "pos", + "type": "int", + "description": "internal field used by SDK", + "required": false + }, + "resultType": { + "name": "resultType", + "type": "ResultType", + "description": "", + "required": false + }, + "schema": { + "name": "schema", + "type": "[]map[string]any", + "description": "The table schema", + "required": false + }, + "summary": { + "name": "summary", + "type": "string", + "description": "The summary of the error", + "required": false + }, + "truncated": { + "name": "truncated", + "type": "bool", + "description": "true if partial results are returned.", + "required": false + } + } + }, + "compute.S3StorageInfo": { + "name": "S3StorageInfo", + "package": "compute", + "description": "A storage location in Amazon S3", + "fields": { + "canned_acl": { + "name": "canned_acl", + "type": "string", + "description": "(Optional) Set canned access control list for the logs, e.g.\n`bucket-owner-full-control`. If `canned_cal` is set, please make sure the\ncluster iam role has `s3:PutObjectAcl` permission on the destination\nbucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full\ncontrols. If you are using cross account role for writing data, you may\nwant to...", + "required": false + }, + "destination": { + "name": "destination", + "type": "string", + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be\ndelivered using cluster iam role, please make sure you set cluster iam\nrole and the role has write access to the destination. Please also note\nthat you cannot use AWS keys to deliver logs.", + "required": false + }, + "enable_encryption": { + "name": "enable_encryption", + "type": "bool", + "description": "(Optional) Flag to enable server side encryption, `false` by default.", + "required": false + }, + "encryption_type": { + "name": "encryption_type", + "type": "string", + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It\nwill be used only when encryption is enabled and the default type is\n`sse-s3`.", + "required": false + }, + "endpoint": { + "name": "endpoint", + "type": "string", + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or\nendpoint needs to be set. If both are set, endpoint will be used.", + "required": false + }, + "kms_key": { + "name": "kms_key", + "type": "string", + "description": "(Optional) Kms key which will be used if encryption is enabled and\nencryption type is set to `sse-kms`.", + "required": false + }, + "region": { + "name": "region", + "type": "string", + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used.", + "required": false + } + } + }, + "compute.SparkNode": { + "name": "SparkNode", + "package": "compute", + "description": "Describes a specific Spark driver or executor.", + "fields": { + "host_private_ip": { + "name": "host_private_ip", + "type": "string", + "description": "The private IP address of the host instance.", + "required": false + }, + "instance_id": { + "name": "instance_id", + "type": "string", + "description": "Globally unique identifier for the host instance from the cloud provider.", + "required": false + }, + "node_aws_attributes": { + "name": "node_aws_attributes", + "type": "*SparkNodeAwsAttributes", + "description": "Attributes specific to AWS for a Spark node.", + "required": false + }, + "node_id": { + "name": "node_id", + "type": "string", + "description": "Globally unique identifier for this node.", + "required": false + }, + "private_ip": { + "name": "private_ip", + "type": "string", + "description": "Private IP address (typically a 10.x.x.x address) of the Spark node. Note\nthat this is different from the private IP address of the host instance.", + "required": false + }, + "public_dns": { + "name": "public_dns", + "type": "string", + "description": "Public DNS address of this node. This address can be used to access the\nSpark JDBC server on the driver node. To communicate with the JDBC\nserver, traffic must be manually authorized by adding security group\nrules to the \"worker-unmanaged\" security group via the AWS console.", + "required": false + }, + "start_timestamp": { + "name": "start_timestamp", + "type": "int64", + "description": "The timestamp (in millisecond) when the Spark node is launched.", + "required": false + } + } + }, + "compute.SparkNodeAwsAttributes": { + "name": "SparkNodeAwsAttributes", + "package": "compute", + "description": "Attributes specific to AWS for a Spark node.", + "fields": { + "is_spot": { + "name": "is_spot", + "type": "bool", + "description": "Whether this node is on an Amazon spot instance.", + "required": false + } + } + }, + "compute.SparkVersion": { + "name": "SparkVersion", + "package": "compute", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Spark version key, for example \"2.1.x-scala2.11\". This is the value which\nshould be provided as the \"spark_version\" when creating a new cluster.\nNote that the exact Spark version may change over time for a \"wildcard\"\nversion (i.e., \"2.1.x-scala2.11\" is a \"wildcard\" version) with minor bug\nfixes.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "A descriptive name for this Spark version, for example \"Spark 2.1\".", + "required": false + } + } + }, + "compute.SparkVersionRequest": { + "name": "SparkVersionRequest", + "package": "compute", + "description": "SparkVersionRequest - filtering request", + "fields": { + "beta": { + "name": "beta", + "type": "bool", + "description": "", + "required": false + }, + "genomics": { + "name": "genomics", + "type": "bool", + "description": "", + "required": false + }, + "gpu": { + "name": "gpu", + "type": "bool", + "description": "", + "required": false + }, + "graviton": { + "name": "graviton", + "type": "bool", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "latest": { + "name": "latest", + "type": "bool", + "description": "", + "required": false + }, + "long_term_support": { + "name": "long_term_support", + "type": "bool", + "description": "", + "required": false + }, + "ml": { + "name": "ml", + "type": "bool", + "description": "", + "required": false + }, + "photon": { + "name": "photon", + "type": "bool", + "description": "", + "required": false + }, + "scala": { + "name": "scala", + "type": "string", + "description": "", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.StartCluster": { + "name": "StartCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The cluster to be started.", + "required": false + } + } + }, + "compute.TerminationReason": { + "name": "TerminationReason", + "package": "compute", + "description": "", + "fields": { + "code": { + "name": "code", + "type": "TerminationReasonCode", + "description": "status code indicating why the cluster was terminated", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "list of parameters that provide additional information about why the\ncluster was terminated", + "required": false + }, + "type": { + "name": "type", + "type": "TerminationReasonType", + "description": "type of the termination", + "required": false + } + } + }, + "compute.UninstallLibraries": { + "name": "UninstallLibraries", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "Unique identifier for the cluster on which to uninstall these libraries.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]Library", + "description": "The libraries to uninstall.", + "required": false + } + } + }, + "compute.UnpinCluster": { + "name": "UnpinCluster", + "package": "compute", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "compute.Update": { + "name": "Update", + "package": "compute", + "description": "", + "fields": { + "ClusterId": { + "name": "ClusterId", + "type": "string", + "description": "", + "required": false + }, + "Install": { + "name": "Install", + "type": "[]Library", + "description": "The libraries to install.", + "required": false + }, + "Uninstall": { + "name": "Uninstall", + "type": "[]Library", + "description": "The libraries to install.", + "required": false + } + } + }, + "compute.UpdateCluster": { + "name": "UpdateCluster", + "package": "compute", + "description": "", + "fields": { + "cluster": { + "name": "cluster", + "type": "*UpdateClusterResource", + "description": "The cluster to be updated.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "ID of the cluster.", + "required": false + }, + "update_mask": { + "name": "update_mask", + "type": "string", + "description": "Used to specify which cluster attributes and size fields to update. See\nhttps://google.aip.dev/161 for more details.\n\nThe field mask must be a single string, with multiple fields separated by\ncommas (no spaces). The field path is relative to the resource object,\nusing a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).\nSpecification of elements in sequence or map fields is not allowed, as\nonly the entire collection field can be specified. Field names must\nexactly match the resourc...", + "required": false + } + } + }, + "compute.UpdateClusterResource": { + "name": "UpdateClusterResource", + "package": "compute", + "description": "", + "fields": { + "autoscale": { + "name": "autoscale", + "type": "*AutoScale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "autotermination_minutes": { + "name": "autotermination_minutes", + "type": "int", + "description": "Automatically terminates the cluster after it is inactive for this time\nin minutes. If not set, this cluster will not be automatically\nterminated. If specified, the threshold must be between 10 and 10000\nminutes. Users can also set this value to 0 to explicitly disable\nautomatic termination.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Three kinds of destinations (DBFS, S3 and Unity Catalog\nvolumes) are supported. Only one destination can be specified for one\ncluster. If the conf is given, the logs will be delivered to the\ndestination every `5 mins`. The destination of driver logs is\n`$destination/$clusterId/driver`, while the destination of executor logs\nis `$destination/$clusterId/executor`.", + "required": false + }, + "cluster_name": { + "name": "cluster_name", + "type": "string", + "description": "Cluster name requested by the user. This doesn't have to be unique. If\nnot specified at creation, the cluster name will be an empty string. For\njob clusters, the cluster name is automatically set based on the job and\njob run IDs.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "data_security_mode": { + "name": "data_security_mode", + "type": "DataSecurityMode", + "description": "", + "required": false + }, + "docker_image": { + "name": "docker_image", + "type": "*DockerImage", + "description": "Custom docker image BYOC", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.\n\nThis field, along with node_type_id, should not be set if\nvirtual_cluster_size is set. If both driver_node_type_id, node_type_id,\nand virtual_cluster_size are specified, driver_node_type_id and\nnode_type_id take precedence.", + "required": false + }, + "enable_elastic_disk": { + "name": "enable_elastic_disk", + "type": "bool", + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically\nacquire additional disk space when its Spark workers are running low on\ndisk space.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable LUKS on cluster VMs' local disks", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "is_single_node": { + "name": "is_single_node", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\nWhen set to true, Databricks will automatically set single node related\n`custom_tags`, `spark_conf`, and `num_workers`", + "required": false + }, + "kind": { + "name": "kind", + "type": "Kind", + "description": "", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "remote_disk_throughput": { + "name": "remote_disk_throughput", + "type": "int", + "description": "If set, what the configurable throughput (in Mb/s) for the remote disk\nis. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "runtime_engine": { + "name": "runtime_engine", + "type": "RuntimeEngine", + "description": "Determines the cluster's runtime engine, either standard or Photon.\n\nThis field is not compatible with legacy `spark_version` values that\ncontain `-photon-`. Remove `-photon-` from the `spark_version` and set\n`runtime_engine` to `PHOTON`.\n\nIf left unspecified, the runtime engine defaults to standard unless the\nspark_version contains -photon-, in which case Photon will be used.", + "required": false + }, + "single_user_name": { + "name": "single_user_name", + "type": "string", + "description": "Single user name if data_security_mode is `SINGLE_USER`", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. Users can also pass in a string of extra\nJVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`\nrespectively.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "spark_version": { + "name": "spark_version", + "type": "string", + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of\navailable Spark versions can be retrieved by using the\n:method:clusters/sparkVersions API call.", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + }, + "total_initial_remote_disk_size": { + "name": "total_initial_remote_disk_size", + "type": "int", + "description": "If set, what the total initial volume size (in GB) of the remote disks\nshould be. Currently only supported for GCP HYPERDISK_BALANCED disks.", + "required": false + }, + "use_ml_runtime": { + "name": "use_ml_runtime", + "type": "bool", + "description": "This field can only be used when `kind = CLASSIC_PREVIEW`.\n\n`effective_spark_version` is determined by `spark_version` (DBR release),\nthis field `use_ml_runtime`, and whether `node_type_id` is gpu node or\nnot.", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "*WorkloadType", + "description": "", + "required": false + } + } + }, + "compute.VolumesStorageInfo": { + "name": "VolumesStorageInfo", + "package": "compute", + "description": "A storage location back by UC Volumes.", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "UC Volumes destination, e.g.\n`/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` or\n`dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`", + "required": false + } + } + }, + "compute.Wait": { + "name": "Wait", + "package": "compute", + "description": "", + "fields": { + "ClusterID": { + "name": "ClusterID", + "type": "string", + "description": "", + "required": false + }, + "IsRefresh": { + "name": "IsRefresh", + "type": "bool", + "description": "", + "required": false + }, + "IsRunning": { + "name": "IsRunning", + "type": "bool", + "description": "", + "required": false + }, + "Libraries": { + "name": "Libraries", + "type": "[]Library", + "description": "", + "required": false + } + } + }, + "compute.WaitCommandStatusCommandExecutionCancelled": { + "name": "WaitCommandStatusCommandExecutionCancelled", + "package": "compute", + "description": "WaitCommandStatusCommandExecutionCancelled is a wrapper that calls [CommandExecutionAPI.WaitCommandStatusCommandExecutionCancelled] and waits to reach Cancelled state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*CommandStatusResponse)) (*CommandStatusResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*CommandStatusResponse)", + "description": "", + "required": false + }, + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "", + "required": false + }, + "commandId": { + "name": "commandId", + "type": "string", + "description": "", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "compute.WaitCommandStatusCommandExecutionFinishedOrError": { + "name": "WaitCommandStatusCommandExecutionFinishedOrError", + "package": "compute", + "description": "WaitCommandStatusCommandExecutionFinishedOrError is a wrapper that calls [CommandExecutionAPI.WaitCommandStatusCommandExecutionFinishedOrError] and waits to reach Finished or Error state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*CommandStatusResponse)) (*CommandStatusResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*CommandStatusResponse)", + "description": "", + "required": false + }, + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "", + "required": false + }, + "commandId": { + "name": "commandId", + "type": "string", + "description": "", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "compute.WaitContextStatusCommandExecutionRunning": { + "name": "WaitContextStatusCommandExecutionRunning", + "package": "compute", + "description": "WaitContextStatusCommandExecutionRunning is a wrapper that calls [CommandExecutionAPI.WaitContextStatusCommandExecutionRunning] and waits to reach Running state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*ContextStatusResponse)) (*ContextStatusResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*ContextStatusResponse)", + "description": "", + "required": false + }, + "clusterId": { + "name": "clusterId", + "type": "string", + "description": "", + "required": false + }, + "contextId": { + "name": "contextId", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "compute.WaitGetClusterRunning": { + "name": "WaitGetClusterRunning", + "package": "compute", + "description": "WaitGetClusterRunning is a wrapper that calls [ClustersAPI.WaitGetClusterRunning] and waits to reach RUNNING state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*ClusterDetails)) (*ClusterDetails, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*ClusterDetails)", + "description": "", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "compute.WaitGetClusterTerminated": { + "name": "WaitGetClusterTerminated", + "package": "compute", + "description": "WaitGetClusterTerminated is a wrapper that calls [ClustersAPI.WaitGetClusterTerminated] and waits to reach TERMINATED state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*ClusterDetails)) (*ClusterDetails, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*ClusterDetails)", + "description": "", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "compute.WorkloadType": { + "name": "WorkloadType", + "package": "compute", + "description": "Cluster Attributes showing for clusters workload types.", + "fields": { + "clients": { + "name": "clients", + "type": "ClientsTypes", + "description": "defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "required": false + } + } + }, + "compute.WorkspaceStorageInfo": { + "name": "WorkspaceStorageInfo", + "package": "compute", + "description": "A storage location in Workspace Filesystem (WSFS)", + "fields": { + "destination": { + "name": "destination", + "type": "string", + "description": "wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`", + "required": false + } + } + }, + "compute.clusterPoliciesImpl": { + "name": "clusterPoliciesImpl", + "package": "compute", + "description": "unexported type that holds implementations of just ClusterPolicies API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.clustersImpl": { + "name": "clustersImpl", + "package": "compute", + "description": "unexported type that holds implementations of just Clusters API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.commandExecutionImpl": { + "name": "commandExecutionImpl", + "package": "compute", + "description": "unexported type that holds implementations of just CommandExecution API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.globalInitScriptsImpl": { + "name": "globalInitScriptsImpl", + "package": "compute", + "description": "unexported type that holds implementations of just GlobalInitScripts API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.instancePoolsImpl": { + "name": "instancePoolsImpl", + "package": "compute", + "description": "unexported type that holds implementations of just InstancePools API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.instanceProfilesImpl": { + "name": "instanceProfilesImpl", + "package": "compute", + "description": "unexported type that holds implementations of just InstanceProfiles API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.librariesImpl": { + "name": "librariesImpl", + "package": "compute", + "description": "unexported type that holds implementations of just Libraries API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.policyComplianceForClustersImpl": { + "name": "policyComplianceForClustersImpl", + "package": "compute", + "description": "unexported type that holds implementations of just PolicyComplianceForClusters API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "compute.policyFamiliesImpl": { + "name": "policyFamiliesImpl", + "package": "compute", + "description": "unexported type that holds implementations of just PolicyFamilies API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "database.CustomTag": { + "name": "CustomTag", + "package": "database", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "any", + "description": "The key of the custom tag.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The value of the custom tag.", + "required": false + } + } + }, + "database.DatabaseInstanceRef": { + "name": "DatabaseInstanceRef", + "package": "database", + "description": "DatabaseInstanceRef is a reference to a database instance. It is used in the\nDatabaseInstance object to refer to the parent instance of an instance and\nto refer the child instances of an instance.\nTo specify as a parent instance during creation of an instance,\nthe lsn and branch_time fields are optional. If not specified, the child\ninstance will be created from the latest lsn of the parent.\nIf both lsn and branch_time are specified, the lsn will be used to create\nthe child instance.", + "fields": { + "branch_time": { + "name": "branch_time", + "type": "any", + "description": "Branch time of the ref database instance.\nFor a parent ref instance, this is the point in time on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the point in time on the instance from which the child\ninstance was created.\nInput: For specifying the point in time to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "required": false + }, + "effective_lsn": { + "name": "effective_lsn", + "type": "any", + "description": "For a parent ref instance, this is the LSN on the parent instance from which the\ninstance was created.\nFor a child ref instance, this is the LSN on the instance from which the child instance\nwas created.", + "required": false + }, + "lsn": { + "name": "lsn", + "type": "any", + "description": "User-specified WAL LSN of the ref database instance.\n\nInput: For specifying the WAL LSN to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", + "required": false + }, + "name": { + "name": "name", + "type": "any", + "description": "Name of the ref database instance.", + "required": false + }, + "uid": { + "name": "uid", + "type": "any", + "description": "Id of the ref database instance.", + "required": false + } + } + }, + "database.DeltaTableSyncInfo": { + "name": "DeltaTableSyncInfo", + "package": "database", + "description": "", + "fields": { + "delta_commit_timestamp": { + "name": "delta_commit_timestamp", + "type": "any", + "description": "The timestamp when the above Delta version was committed in the source Delta table.\nNote: This is the Delta commit time, not the time the data was written to the synced table.", + "required": false + }, + "delta_commit_version": { + "name": "delta_commit_version", + "type": "any", + "description": "The Delta Lake commit version that was last successfully synced.", + "required": false + } + } + }, + "database.NewPipelineSpec": { + "name": "NewPipelineSpec", + "package": "database", + "description": "Custom fields that user can set for pipeline while creating SyncedDatabaseTable.\nNote that other fields of pipeline are still inferred by table def internally", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", + "type": "any", + "description": "Budget policy to set on the newly created pipeline.", + "required": false + }, + "storage_catalog": { + "name": "storage_catalog", + "type": "any", + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC catalog for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be a standard catalog where the user has permissions to create Delta tables.", + "required": false + }, + "storage_schema": { + "name": "storage_schema", + "type": "any", + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC schema for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be in the standard catalog where the user has permissions to create Delta tables.", + "required": false + } + } + }, + "database.SyncedTableContinuousUpdateStatus": { + "name": "SyncedTableContinuousUpdateStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "fields": { + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "any", + "description": "Progress of the initial data synchronization.", + "required": false + }, + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", + "required": false + } + } + }, + "database.SyncedTableFailedStatus": { + "name": "SyncedTableFailedStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.\nThe last source table Delta version that was synced to the synced table.\nOnly populated if the table is still\nsynced and available for serving.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. Only populated if the table is still synced and available for serving.", + "required": false + } + } + }, + "database.SyncedTablePipelineProgress": { + "name": "SyncedTablePipelineProgress", + "package": "database", + "description": "Progress information of the Synced Table data synchronization pipeline.", + "fields": { + "estimated_completion_time_seconds": { + "name": "estimated_completion_time_seconds", + "type": "any", + "description": "The estimated time remaining to complete this update in seconds.", + "required": false + }, + "latest_version_currently_processing": { + "name": "latest_version_currently_processing", + "type": "any", + "description": "The source table Delta version that was last processed by the pipeline. The pipeline may not\nhave completely processed this version yet.", + "required": false + }, + "provisioning_phase": { + "name": "provisioning_phase", + "type": "any", + "description": "The current phase of the data synchronization pipeline.", + "required": false + }, + "sync_progress_completion": { + "name": "sync_progress_completion", + "type": "any", + "description": "The completion ratio of this update. This is a number between 0 and 1.", + "required": false + }, + "synced_row_count": { + "name": "synced_row_count", + "type": "any", + "description": "The number of rows that have been synced in this update.", + "required": false + }, + "total_row_count": { + "name": "total_row_count", + "type": "any", + "description": "The total number of rows that need to be synced in this update. This number may be an estimate.", + "required": false + } + } + }, + "database.SyncedTablePosition": { + "name": "SyncedTablePosition", + "package": "database", + "description": "", + "fields": { + "delta_table_sync_info": { + "name": "delta_table_sync_info", + "type": "any", + "description": "", + "required": false + }, + "sync_end_timestamp": { + "name": "sync_end_timestamp", + "type": "any", + "description": "The end timestamp of the most recent successful synchronization.\nThis is the time when the data is available in the synced table.", + "required": false + }, + "sync_start_timestamp": { + "name": "sync_start_timestamp", + "type": "any", + "description": "The starting timestamp of the most recent successful synchronization from the source table\nto the destination (synced) table.\nNote this is the starting timestamp of the sync operation, not the end time.\nE.g., for a batch, this is the time when the sync operation started.", + "required": false + } + } + }, + "database.SyncedTableProvisioningStatus": { + "name": "SyncedTableProvisioningStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "fields": { + "initial_pipeline_sync_progress": { + "name": "initial_pipeline_sync_progress", + "type": "any", + "description": "Details about initial data synchronization. Only populated when in the\nPROVISIONING_INITIAL_SNAPSHOT state.", + "required": false + } + } + }, + "database.SyncedTableSpec": { + "name": "SyncedTableSpec", + "package": "database", + "description": "Specification of a synced database table.", + "fields": { + "create_database_objects_if_missing": { + "name": "create_database_objects_if_missing", + "type": "any", + "description": "If true, the synced table's logical database and schema resources in PG\nwill be created if they do not already exist.", + "required": false + }, + "existing_pipeline_id": { + "name": "existing_pipeline_id", + "type": "any", + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf existing_pipeline_id is defined, the synced table will be bin packed into the existing pipeline\nreferenced. This avoids creating a new pipeline and allows sharing existing compute.\nIn this case, the scheduling_policy of this synced table must match the scheduling policy of the existing pipeline.", + "required": false + }, + "new_pipeline_spec": { + "name": "new_pipeline_spec", + "type": "any", + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf new_pipeline_spec is defined, a new pipeline is created for this synced table. The location pointed to is used\nto store intermediate files (checkpoints, event logs etc). The caller must have write permissions to create Delta\ntables in the specified catalog and schema. Again, note this requires write permissions, whereas the source table\nonly requires read permissions.", + "required": false + }, + "primary_key_columns": { + "name": "primary_key_columns", + "type": "any", + "description": "Primary Key columns to be used for data insert/update in the destination.", + "required": false + }, + "scheduling_policy": { + "name": "scheduling_policy", + "type": "any", + "description": "Scheduling policy of the underlying pipeline.", + "required": false + }, + "source_table_full_name": { + "name": "source_table_full_name", + "type": "any", + "description": "Three-part (catalog, schema, table) name of the source Delta table.", + "required": false + }, + "timeseries_key": { + "name": "timeseries_key", + "type": "any", + "description": "Time series key to deduplicate (tie-break) rows with the same primary key.", + "required": false + } + } + }, + "database.SyncedTableState": { + "name": "SyncedTableState", + "package": "database", + "description": "The state of a synced table.", + "fields": {} + }, + "database.SyncedTableStatus": { + "name": "SyncedTableStatus", + "package": "database", + "description": "Status of a synced table.", + "fields": { + "continuous_update_status": { + "name": "continuous_update_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", + "required": false + }, + "detailed_state": { + "name": "detailed_state", + "type": "any", + "description": "The state of the synced table.", + "required": false + }, + "failed_status": { + "name": "failed_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", + "required": false + }, + "last_sync": { + "name": "last_sync", + "type": "any", + "description": "Summary of the last successful synchronization from source to destination.\n\nWill always be present if there has been a successful sync. Even if the most recent syncs have failed.\n\nLimitation:\nThe only exception is if the synced table is doing a FULL REFRESH, then the last sync information\nwill not be available until the full refresh is complete. This limitation will be addressed in a future version.\n\nThis top-level field is a convenience for consumers who want easy access to last sync information\nwithout having to traverse detailed_status.", + "required": false + }, + "message": { + "name": "message", + "type": "any", + "description": "A text description of the current state of the synced table.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "any", + "description": "ID of the associated pipeline. The pipeline ID may have been provided by the client\n(in the case of bin packing), or generated by the server (when creating a new pipeline).", + "required": false + }, + "provisioning_status": { + "name": "provisioning_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", + "required": false + }, + "triggered_update_status": { + "name": "triggered_update_status", + "type": "any", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", + "required": false + } + } + }, + "database.SyncedTableTriggeredUpdateStatus": { + "name": "SyncedTableTriggeredUpdateStatus", + "package": "database", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", + "fields": { + "last_processed_commit_version": { + "name": "last_processed_commit_version", + "type": "any", + "description": "The last source table Delta version that was successfully synced to the synced table.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "any", + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", + "required": false + }, + "triggered_update_progress": { + "name": "triggered_update_progress", + "type": "any", + "description": "Progress of the active data synchronization pipeline.", + "required": false + } + } + }, + "files.AddBlock": { + "name": "AddBlock", + "package": "files", + "description": "", + "fields": { + "data": { + "name": "data", + "type": "string", + "description": "The base64-encoded data to append to the stream. This has a limit of 1\nMB.", + "required": false + }, + "handle": { + "name": "handle", + "type": "int64", + "description": "The handle on an open stream.", + "required": false + } + } + }, + "files.Close": { + "name": "Close", + "package": "files", + "description": "", + "fields": { + "handle": { + "name": "handle", + "type": "int64", + "description": "The handle on an open stream.", + "required": false + } + } + }, + "files.Create": { + "name": "Create", + "package": "files", + "description": "", + "fields": { + "overwrite": { + "name": "overwrite", + "type": "bool", + "description": "The flag that specifies whether to overwrite existing file/files.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The path of the new file. The path should be the absolute DBFS path.", + "required": false + } + } + }, + "files.CreateResponse": { + "name": "CreateResponse", + "package": "files", + "description": "", + "fields": { + "handle": { + "name": "handle", + "type": "int64", + "description": "Handle which should subsequently be passed into the AddBlock and Close\ncalls when writing to a file through a stream.", + "required": false + } + } + }, + "files.Delete": { + "name": "Delete", + "package": "files", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The path of the file or directory to delete. The path should be the\nabsolute DBFS path.", + "required": false + }, + "recursive": { + "name": "recursive", + "type": "bool", + "description": "Whether or not to recursively delete the directory's contents. Deleting\nempty directories can be done without providing the recursive flag.", + "required": false + } + } + }, + "files.DirectoryEntry": { + "name": "DirectoryEntry", + "package": "files", + "description": "", + "fields": { + "file_size": { + "name": "file_size", + "type": "int64", + "description": "The length of the file in bytes. This field is omitted for directories.", + "required": false + }, + "is_directory": { + "name": "is_directory", + "type": "bool", + "description": "True if the path is a directory.", + "required": false + }, + "last_modified": { + "name": "last_modified", + "type": "int64", + "description": "Last modification time of given file in milliseconds since unix epoch.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the file or directory. This is the last component of the\npath.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the file or directory.", + "required": false + } + } + }, + "files.FileInfo": { + "name": "FileInfo", + "package": "files", + "description": "", + "fields": { + "file_size": { + "name": "file_size", + "type": "int64", + "description": "The length of the file in bytes. This field is omitted for directories.", + "required": false + }, + "is_dir": { + "name": "is_dir", + "type": "bool", + "description": "True if the path is a directory.", + "required": false + }, + "modification_time": { + "name": "modification_time", + "type": "int64", + "description": "Last modification time of given file in milliseconds since epoch.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the file or directory.", + "required": false + } + } + }, + "files.ListDirectoryResponse": { + "name": "ListDirectoryResponse", + "package": "files", + "description": "", + "fields": { + "contents": { + "name": "contents", + "type": "[]DirectoryEntry", + "description": "Array of DirectoryEntry.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token, which can be sent as `page_token` to retrieve the next page.", + "required": false + } + } + }, + "files.ListStatusResponse": { + "name": "ListStatusResponse", + "package": "files", + "description": "", + "fields": { + "files": { + "name": "files", + "type": "[]FileInfo", + "description": "A list of FileInfo's that describe contents of directory or file. See\nexample above.", + "required": false + } + } + }, + "files.MkDirs": { + "name": "MkDirs", + "package": "files", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The path of the new directory. The path should be the absolute DBFS path.", + "required": false + } + } + }, + "files.Move": { + "name": "Move", + "package": "files", + "description": "", + "fields": { + "destination_path": { + "name": "destination_path", + "type": "string", + "description": "The destination path of the file or directory. The path should be the\nabsolute DBFS path.", + "required": false + }, + "source_path": { + "name": "source_path", + "type": "string", + "description": "The source path of the file or directory. The path should be the absolute\nDBFS path.", + "required": false + } + } + }, + "files.Put": { + "name": "Put", + "package": "files", + "description": "", + "fields": { + "contents": { + "name": "contents", + "type": "string", + "description": "This parameter might be absent, and instead a posted file will be used.", + "required": false + }, + "overwrite": { + "name": "overwrite", + "type": "bool", + "description": "The flag that specifies whether to overwrite existing file/files.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The path of the new file. The path should be the absolute DBFS path.", + "required": false + } + } + }, + "files.ReadResponse": { + "name": "ReadResponse", + "package": "files", + "description": "", + "fields": { + "bytes_read": { + "name": "bytes_read", + "type": "int64", + "description": "The number of bytes read (could be less than ``length`` if we hit end of\nfile). This refers to number of bytes read in unencoded version (response\ndata is base64-encoded).", + "required": false + }, + "data": { + "name": "data", + "type": "string", + "description": "The base64-encoded contents of the file read.", + "required": false + } + } + }, + "files.dbfsImpl": { + "name": "dbfsImpl", + "package": "files", + "description": "unexported type that holds implementations of just Dbfs API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "files.fileHandle": { + "name": "fileHandle", + "package": "files", + "description": "Internal only state for a DBFS file handle.", + "fields": { + "api": { + "name": "api", + "type": "*DbfsAPI", + "description": "", + "required": false + }, + "ctx": { + "name": "ctx", + "type": "context.Context", + "description": "", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "", + "required": false + }, + "reader": { + "name": "reader", + "type": "*fileHandleReader", + "description": "", + "required": false + }, + "writer": { + "name": "writer", + "type": "*fileHandleWriter", + "description": "", + "required": false + } + } + }, + "files.fileHandleReader": { + "name": "fileHandleReader", + "package": "files", + "description": "Internal only state for a read handle.", + "fields": { + "offset": { + "name": "offset", + "type": "int64", + "description": "", + "required": false + }, + "size": { + "name": "size", + "type": "int64", + "description": "", + "required": false + } + } + }, + "files.fileHandleWriter": { + "name": "fileHandleWriter", + "package": "files", + "description": "Internal only state for a write handle.", + "fields": { + "handle": { + "name": "handle", + "type": "int64", + "description": "", + "required": false + } + } + }, + "files.filesImpl": { + "name": "filesImpl", + "package": "files", + "description": "unexported type that holds implementations of just Files API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.AccessControlRequest": { + "name": "AccessControlRequest", + "package": "iam", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "iam.AccessControlResponse": { + "name": "AccessControlResponse", + "package": "iam", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]Permission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "iam.AccountGroup": { + "name": "AccountGroup", + "package": "iam", + "description": "", + "fields": { + "account_id": { + "name": "account_id", + "type": "string", + "description": "Databricks account ID", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "external_id should be unique for identifying groups", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks group ID", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.AccountServicePrincipal": { + "name": "AccountServicePrincipal", + "package": "iam", + "description": "", + "fields": { + "account_id": { + "name": "account_id", + "type": "string", + "description": "Databricks account ID", + "required": false + }, + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks service principal ID.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.AccountUser": { + "name": "AccountUser", + "package": "iam", + "description": "", + "fields": { + "account_id": { + "name": "account_id", + "type": "string", + "description": "Databricks account ID", + "required": false + }, + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`.", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks user ID.", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.Actor": { + "name": "Actor", + "package": "iam", + "description": "represents an identity trying to access a resource - user or a service\nprincipal group can be a principal of a permission set assignment but an\nactor is always a user or a service principal", + "fields": { + "actor_id": { + "name": "actor_id", + "type": "int64", + "description": "", + "required": false + } + } + }, + "iam.CheckPolicyResponse": { + "name": "CheckPolicyResponse", + "package": "iam", + "description": "", + "fields": { + "consistency_token": { + "name": "consistency_token", + "type": "ConsistencyToken", + "description": "", + "required": false + }, + "is_permitted": { + "name": "is_permitted", + "type": "bool", + "description": "", + "required": false + } + } + }, + "iam.ComplexValue": { + "name": "ComplexValue", + "package": "iam", + "description": "", + "fields": { + "$ref": { + "name": "$ref", + "type": "string", + "description": "", + "required": false + }, + "display": { + "name": "display", + "type": "string", + "description": "", + "required": false + }, + "primary": { + "name": "primary", + "type": "bool", + "description": "", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "iam.ConsistencyToken": { + "name": "ConsistencyToken", + "package": "iam", + "description": "", + "fields": { + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "iam.CreateAccountGroupRequest": { + "name": "CreateAccountGroupRequest", + "package": "iam", + "description": "", + "fields": { + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks group ID", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.CreateAccountServicePrincipalRequest": { + "name": "CreateAccountServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks service principal ID.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.CreateAccountUserRequest": { + "name": "CreateAccountUserRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`.", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks user ID.", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.CreateGroupRequest": { + "name": "CreateGroupRequest", + "package": "iam", + "description": "", + "fields": { + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the group. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks group ID", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]GroupSchema", + "description": "The schema of the group.", + "required": false + } + } + }, + "iam.CreateServicePrincipalRequest": { + "name": "CreateServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the service principal. See [assigning\nentitlements] for a full list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks service principal ID.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ServicePrincipalSchema", + "description": "The schema of the List response.", + "required": false + } + } + }, + "iam.CreateUserRequest": { + "name": "CreateUserRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`. This field cannot be updated through the Workspace\nSCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to\nupdate `displayName`.\n\n[identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the user. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks user ID.", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]UserSchema", + "description": "The schema of the user.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.GetAssignableRolesForResourceResponse": { + "name": "GetAssignableRolesForResourceResponse", + "package": "iam", + "description": "", + "fields": { + "roles": { + "name": "roles", + "type": "[]Role", + "description": "", + "required": false + } + } + }, + "iam.GetPasswordPermissionLevelsResponse": { + "name": "GetPasswordPermissionLevelsResponse", + "package": "iam", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]PasswordPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "iam.GetPermissionLevelsResponse": { + "name": "GetPermissionLevelsResponse", + "package": "iam", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]PermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "iam.GrantRule": { + "name": "GrantRule", + "package": "iam", + "description": "", + "fields": { + "principals": { + "name": "principals", + "type": "[]string", + "description": "Principals this grant rule applies to. A principal can be a user (for end\nusers), a service principal (for applications and compute workloads), or\nan account group. Each principal has its own identifier format: *\nusers/\u003cUSERNAME\u003e * groups/\u003cGROUP_NAME\u003e *\nservicePrincipals/\u003cSERVICE_PRINCIPAL_APPLICATION_ID\u003e", + "required": false + }, + "role": { + "name": "role", + "type": "string", + "description": "Role that is assigned to the list of principals.", + "required": false + } + } + }, + "iam.Group": { + "name": "Group", + "package": "iam", + "description": "", + "fields": { + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the group. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "external_id should be unique for identifying groups", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks group ID", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]GroupSchema", + "description": "The schema of the group.", + "required": false + } + } + }, + "iam.ListAccountGroupsResponse": { + "name": "ListAccountGroupsResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]AccountGroup", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.ListAccountServicePrincipalsResponse": { + "name": "ListAccountServicePrincipalsResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]AccountServicePrincipal", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.ListAccountUsersResponse": { + "name": "ListAccountUsersResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]AccountUser", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.ListGroupsResponse": { + "name": "ListGroupsResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]Group", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ListResponseSchema", + "description": "The schema of the service principal.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.ListServicePrincipalResponse": { + "name": "ListServicePrincipalResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]ServicePrincipal", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ListResponseSchema", + "description": "The schema of the List response.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.ListUsersResponse": { + "name": "ListUsersResponse", + "package": "iam", + "description": "", + "fields": { + "Resources": { + "name": "Resources", + "type": "[]User", + "description": "User objects returned in the response.", + "required": false + }, + "itemsPerPage": { + "name": "itemsPerPage", + "type": "int64", + "description": "Total results returned in the response.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ListResponseSchema", + "description": "The schema of the List response.", + "required": false + }, + "startIndex": { + "name": "startIndex", + "type": "int64", + "description": "Starting index of all the results that matched the request filters. First\nitem is number 1.", + "required": false + }, + "totalResults": { + "name": "totalResults", + "type": "int64", + "description": "Total results that match the request filters.", + "required": false + } + } + }, + "iam.MigratePermissionsRequest": { + "name": "MigratePermissionsRequest", + "package": "iam", + "description": "", + "fields": { + "from_workspace_group_name": { + "name": "from_workspace_group_name", + "type": "string", + "description": "The name of the workspace group that permissions will be migrated from.", + "required": false + }, + "size": { + "name": "size", + "type": "int", + "description": "The maximum number of permissions that will be migrated.", + "required": false + }, + "to_account_group_name": { + "name": "to_account_group_name", + "type": "string", + "description": "The name of the account group that permissions will be migrated to.", + "required": false + }, + "workspace_id": { + "name": "workspace_id", + "type": "int64", + "description": "WorkspaceId of the associated workspace where the permission migration\nwill occur.", + "required": false + } + } + }, + "iam.MigratePermissionsResponse": { + "name": "MigratePermissionsResponse", + "package": "iam", + "description": "", + "fields": { + "permissions_migrated": { + "name": "permissions_migrated", + "type": "int", + "description": "Number of permissions migrated.", + "required": false + } + } + }, + "iam.Name": { + "name": "Name", + "package": "iam", + "description": "", + "fields": { + "familyName": { + "name": "familyName", + "type": "string", + "description": "Family name of the Databricks user.", + "required": false + }, + "givenName": { + "name": "givenName", + "type": "string", + "description": "Given name of the Databricks user.", + "required": false + } + } + }, + "iam.ObjectPermissions": { + "name": "ObjectPermissions", + "package": "iam", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "iam.PartialUpdate": { + "name": "PartialUpdate", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PasswordAccessControlRequest": { + "name": "PasswordAccessControlRequest", + "package": "iam", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PasswordPermissionLevel", + "description": "Permission level", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "iam.PasswordAccessControlResponse": { + "name": "PasswordAccessControlResponse", + "package": "iam", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]PasswordPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "iam.PasswordPermission": { + "name": "PasswordPermission", + "package": "iam", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PasswordPermissionLevel", + "description": "Permission level", + "required": false + } + } + }, + "iam.PasswordPermissions": { + "name": "PasswordPermissions", + "package": "iam", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]PasswordAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "iam.PasswordPermissionsDescription": { + "name": "PasswordPermissionsDescription", + "package": "iam", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PasswordPermissionLevel", + "description": "Permission level", + "required": false + } + } + }, + "iam.PasswordPermissionsRequest": { + "name": "PasswordPermissionsRequest", + "package": "iam", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]PasswordAccessControlRequest", + "description": "", + "required": false + } + } + }, + "iam.Patch": { + "name": "Patch", + "package": "iam", + "description": "", + "fields": { + "op": { + "name": "op", + "type": "PatchOp", + "description": "Type of patch operation.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "Selection of patch operation", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "Value to modify", + "required": false + } + } + }, + "iam.PatchAccountGroupRequest": { + "name": "PatchAccountGroupRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PatchAccountServicePrincipalRequest": { + "name": "PatchAccountServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PatchAccountUserRequest": { + "name": "PatchAccountUserRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PatchGroupRequest": { + "name": "PatchGroupRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PatchServicePrincipalRequest": { + "name": "PatchServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.PatchUserRequest": { + "name": "PatchUserRequest", + "package": "iam", + "description": "", + "fields": { + "Operations": { + "name": "Operations", + "type": "[]Patch", + "description": "", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]PatchSchema", + "description": "The schema of the patch request. Must be\n[\"urn:ietf:params:scim:api:messages:2.0:PatchOp\"].", + "required": false + } + } + }, + "iam.Permission": { + "name": "Permission", + "package": "iam", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "", + "required": false + } + } + }, + "iam.PermissionAssignment": { + "name": "PermissionAssignment", + "package": "iam", + "description": "The output format for existing workspace PermissionAssignment records, which\ncontains some info for user consumption.", + "fields": { + "error": { + "name": "error", + "type": "string", + "description": "Error response associated with a workspace permission assignment, if any.", + "required": false + }, + "permissions": { + "name": "permissions", + "type": "[]WorkspacePermission", + "description": "The permissions level of the principal.", + "required": false + }, + "principal": { + "name": "principal", + "type": "*PrincipalOutput", + "description": "Information about the principal assigned to the workspace.", + "required": false + } + } + }, + "iam.PermissionAssignments": { + "name": "PermissionAssignments", + "package": "iam", + "description": "", + "fields": { + "permission_assignments": { + "name": "permission_assignments", + "type": "[]PermissionAssignment", + "description": "Array of permissions assignments defined for a workspace.", + "required": false + } + } + }, + "iam.PermissionOutput": { + "name": "PermissionOutput", + "package": "iam", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "The results of a permissions query.", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WorkspacePermission", + "description": "", + "required": false + } + } + }, + "iam.PermissionsDescription": { + "name": "PermissionsDescription", + "package": "iam", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "", + "required": false + } + } + }, + "iam.PrincipalOutput": { + "name": "PrincipalOutput", + "package": "iam", + "description": "Information about the principal assigned to the workspace.", + "fields": { + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "The group name of the group. Present only if the principal is a group.", + "required": false + }, + "principal_id": { + "name": "principal_id", + "type": "int64", + "description": "The unique, opaque id of the principal.", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "The name of the service principal. Present only if the principal is a\nservice principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The username of the user. Present only if the principal is a user.", + "required": false + } + } + }, + "iam.ResourceInfo": { + "name": "ResourceInfo", + "package": "iam", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "Id of the current resource.", + "required": false + }, + "legacy_acl_path": { + "name": "legacy_acl_path", + "type": "string", + "description": "The legacy acl path of the current resource.", + "required": false + }, + "parent_resource_info": { + "name": "parent_resource_info", + "type": "*ResourceInfo", + "description": "Parent resource info for the current resource. The parent may have\nanother parent.", + "required": false + } + } + }, + "iam.ResourceMeta": { + "name": "ResourceMeta", + "package": "iam", + "description": "", + "fields": { + "resourceType": { + "name": "resourceType", + "type": "string", + "description": "Identifier for group type. Can be local workspace group\n(`WorkspaceGroup`) or account group (`Group`).", + "required": false + } + } + }, + "iam.Role": { + "name": "Role", + "package": "iam", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Role to assign to a principal or a list of principals on a resource.", + "required": false + } + } + }, + "iam.RuleSetResponse": { + "name": "RuleSetResponse", + "package": "iam", + "description": "", + "fields": { + "etag": { + "name": "etag", + "type": "string", + "description": "Identifies the version of the rule set returned. Etag used for\nversioning. The response is at least as fresh as the eTag provided. Etag\nis used for optimistic concurrency control as a way to help prevent\nsimultaneous updates of a rule set from overwriting each other. It is\nstrongly suggested that systems make use of the etag in the read -\u003e\nmodify -\u003e write pattern to perform rule set updates in order to avoid\nrace conditions that is get an etag from a GET rule set request, and pass\nit with the...", + "required": false + }, + "grant_rules": { + "name": "grant_rules", + "type": "[]GrantRule", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the rule set.", + "required": false + } + } + }, + "iam.RuleSetUpdateRequest": { + "name": "RuleSetUpdateRequest", + "package": "iam", + "description": "", + "fields": { + "etag": { + "name": "etag", + "type": "string", + "description": "Identifies the version of the rule set returned. Etag used for\nversioning. The response is at least as fresh as the eTag provided. Etag\nis used for optimistic concurrency control as a way to help prevent\nsimultaneous updates of a rule set from overwriting each other. It is\nstrongly suggested that systems make use of the etag in the read -\u003e\nmodify -\u003e write pattern to perform rule set updates in order to avoid\nrace conditions that is get an etag from a GET rule set request, and pass\nit with the...", + "required": false + }, + "grant_rules": { + "name": "grant_rules", + "type": "[]GrantRule", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the rule set.", + "required": false + } + } + }, + "iam.ServicePrincipal": { + "name": "ServicePrincipal", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the service principal. See [assigning\nentitlements] for a full list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks service principal ID.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ServicePrincipalSchema", + "description": "The schema of the List response.", + "required": false + } + } + }, + "iam.SetObjectPermissions": { + "name": "SetObjectPermissions", + "package": "iam", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControlRequest", + "description": "", + "required": false + } + } + }, + "iam.UpdateAccountGroupRequest": { + "name": "UpdateAccountGroupRequest", + "package": "iam", + "description": "", + "fields": { + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.UpdateAccountServicePrincipalRequest": { + "name": "UpdateAccountServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + } + } + }, + "iam.UpdateAccountUserRequest": { + "name": "UpdateAccountUserRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`.", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Indicates if the group has the admin role.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.UpdateGroupRequest": { + "name": "UpdateGroupRequest", + "package": "iam", + "description": "", + "fields": { + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a human-readable group name", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the group. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "members": { + "name": "members", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "meta": { + "name": "meta", + "type": "*ResourceMeta", + "description": "Container for the group identifier. Workspace local versus account.", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]GroupSchema", + "description": "The schema of the group.", + "required": false + } + } + }, + "iam.UpdateObjectPermissions": { + "name": "UpdateObjectPermissions", + "package": "iam", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControlRequest", + "description": "", + "required": false + } + } + }, + "iam.UpdateRuleSetRequest": { + "name": "UpdateRuleSetRequest", + "package": "iam", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Name of the rule set.", + "required": false + }, + "rule_set": { + "name": "rule_set", + "type": "RuleSetUpdateRequest", + "description": "", + "required": false + } + } + }, + "iam.UpdateServicePrincipalRequest": { + "name": "UpdateServicePrincipalRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "applicationId": { + "name": "applicationId", + "type": "string", + "description": "UUID relating to the service principal", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the service principal. See [assigning\nentitlements] for a full list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]ServicePrincipalSchema", + "description": "The schema of the List response.", + "required": false + } + } + }, + "iam.UpdateUserRequest": { + "name": "UpdateUserRequest", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`. This field cannot be updated through the Workspace\nSCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to\nupdate `displayName`.\n\n[identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the user. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]UserSchema", + "description": "The schema of the user.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.UpdateWorkspaceAssignments": { + "name": "UpdateWorkspaceAssignments", + "package": "iam", + "description": "", + "fields": { + "permissions": { + "name": "permissions", + "type": "[]WorkspacePermission", + "description": "Array of permissions assignments to update on the workspace. Valid values\nare \"USER\" and \"ADMIN\" (case-sensitive). If both \"USER\" and \"ADMIN\" are\nprovided, \"ADMIN\" takes precedence. Other values will be ignored. Note\nthat excluding this field, or providing unsupported values, will have the\nsame effect as providing an empty list, which will result in the deletion\nof all permissions for the principal.", + "required": false + } + } + }, + "iam.User": { + "name": "User", + "package": "iam", + "description": "", + "fields": { + "active": { + "name": "active", + "type": "bool", + "description": "If this user is active", + "required": false + }, + "displayName": { + "name": "displayName", + "type": "string", + "description": "String that represents a concatenation of given and family names. For\nexample `John Smith`. This field cannot be updated through the Workspace\nSCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to\nupdate `displayName`.\n\n[identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation", + "required": false + }, + "emails": { + "name": "emails", + "type": "[]ComplexValue", + "description": "All the emails associated with the Databricks user.", + "required": false + }, + "entitlements": { + "name": "entitlements", + "type": "[]ComplexValue", + "description": "Entitlements assigned to the user. See [assigning entitlements] for a\nfull list of supported values.\n\n[assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements", + "required": false + }, + "externalId": { + "name": "externalId", + "type": "string", + "description": "External ID is not currently supported. It is reserved for future use.", + "required": false + }, + "groups": { + "name": "groups", + "type": "[]ComplexValue", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Databricks user ID.", + "required": false + }, + "name": { + "name": "name", + "type": "*Name", + "description": "", + "required": false + }, + "roles": { + "name": "roles", + "type": "[]ComplexValue", + "description": "Corresponds to AWS instance profile/arn role.", + "required": false + }, + "schemas": { + "name": "schemas", + "type": "[]UserSchema", + "description": "The schema of the user.", + "required": false + }, + "userName": { + "name": "userName", + "type": "string", + "description": "Email address of the Databricks user.", + "required": false + } + } + }, + "iam.WorkspacePermissions": { + "name": "WorkspacePermissions", + "package": "iam", + "description": "", + "fields": { + "permissions": { + "name": "permissions", + "type": "[]PermissionOutput", + "description": "Array of permissions defined for a workspace.", + "required": false + } + } + }, + "iam.accessControlImpl": { + "name": "accessControlImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccessControl API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountAccessControlImpl": { + "name": "accountAccessControlImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountAccessControl API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountAccessControlProxyImpl": { + "name": "accountAccessControlProxyImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountAccessControlProxy API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountGroupsImpl": { + "name": "accountGroupsImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountGroups API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountGroupsV2Impl": { + "name": "accountGroupsV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountGroupsV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountServicePrincipalsImpl": { + "name": "accountServicePrincipalsImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountServicePrincipals API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountServicePrincipalsV2Impl": { + "name": "accountServicePrincipalsV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountServicePrincipalsV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountUsersImpl": { + "name": "accountUsersImpl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountUsers API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.accountUsersV2Impl": { + "name": "accountUsersV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just AccountUsersV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.currentUserImpl": { + "name": "currentUserImpl", + "package": "iam", + "description": "unexported type that holds implementations of just CurrentUser API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.groupsImpl": { + "name": "groupsImpl", + "package": "iam", + "description": "unexported type that holds implementations of just Groups API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.groupsV2Impl": { + "name": "groupsV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just GroupsV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.permissionMigrationImpl": { + "name": "permissionMigrationImpl", + "package": "iam", + "description": "unexported type that holds implementations of just PermissionMigration API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.permissionsImpl": { + "name": "permissionsImpl", + "package": "iam", + "description": "unexported type that holds implementations of just Permissions API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.servicePrincipalsImpl": { + "name": "servicePrincipalsImpl", + "package": "iam", + "description": "unexported type that holds implementations of just ServicePrincipals API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.servicePrincipalsV2Impl": { + "name": "servicePrincipalsV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just ServicePrincipalsV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.usersImpl": { + "name": "usersImpl", + "package": "iam", + "description": "unexported type that holds implementations of just Users API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.usersV2Impl": { + "name": "usersV2Impl", + "package": "iam", + "description": "unexported type that holds implementations of just UsersV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "iam.workspaceAssignmentImpl": { + "name": "workspaceAssignmentImpl", + "package": "iam", + "description": "unexported type that holds implementations of just WorkspaceAssignment API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "jobs.BaseJob": { + "name": "BaseJob", + "package": "jobs", + "description": "", + "fields": { + "created_time": { + "name": "created_time", + "type": "int64", + "description": "The time at which this job was created in epoch milliseconds\n(milliseconds since 1/1/1970 UTC).", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The creator user name. This field won’t be included in the response if\nthe user has already been deleted.", + "required": false + }, + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", + "type": "string", + "description": "The id of the budget policy used by this job for cost attribution\npurposes. This may be set through (in order of precedence): 1. Budget\nadmins through the account or workspace console 2. Jobs UI in the job\ndetails page and Jobs API using `budget_policy_id` 3. Inferred default\nbased on accessible budget policies of the run_as identity on job\ncreation or modification.", + "required": false + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "The id of the usage policy used by this job for cost attribution\npurposes.", + "required": false + }, + "has_more": { + "name": "has_more", + "type": "bool", + "description": "Indicates if the job has more array properties (`tasks`, `job_clusters`)\nthat are not shown. They can be accessed via :method:jobs/get endpoint.\nIt is only relevant for API 2.2 :method:jobs/list requests with\n`expand_tasks=true`.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier for this job.", + "required": false + }, + "settings": { + "name": "settings", + "type": "*JobSettings", + "description": "Settings for this job and all of its runs. These settings can be updated\nusing the `resetJob` method.", + "required": false + }, + "trigger_state": { + "name": "trigger_state", + "type": "*TriggerStateProto", + "description": "State of the trigger associated with the job.", + "required": false + } + } + }, + "jobs.BaseRun": { + "name": "BaseRun", + "package": "jobs", + "description": "", + "fields": { + "attempt_number": { + "name": "attempt_number", + "type": "int", + "description": "The sequence number of this run attempt for a triggered job run. The\ninitial attempt of a run has an attempt_number of 0. If the initial run\nattempt fails, and the job has a retry policy (`max_retries` \u003e 0),\nsubsequent runs are created with an `original_attempt_run_id` of the\noriginal attempt’s ID and an incrementing `attempt_number`. Runs are\nretried only until they succeed, and the maximum `attempt_number` is the\nsame as the `max_retries` value for the job.", + "required": false + }, + "cleanup_duration": { + "name": "cleanup_duration", + "type": "int64", + "description": "The time in milliseconds it took to terminate the cluster and clean up\nany associated artifacts. The duration of a task run is the sum of the\n`setup_duration`, `execution_duration`, and the `cleanup_duration`. The\n`cleanup_duration` field is set to 0 for multitask job runs. The total\nduration of a multitask job run is the value of the `run_duration` field.", + "required": false + }, + "cluster_instance": { + "name": "cluster_instance", + "type": "*ClusterInstance", + "description": "The cluster used for this run. If the run is specified to use a new\ncluster, this field is set once the Jobs service has requested a cluster\nfor the run.", + "required": false + }, + "cluster_spec": { + "name": "cluster_spec", + "type": "*ClusterSpec", + "description": "A snapshot of the job’s cluster specification when this run was\ncreated.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The creator user name. This field won’t be included in the response if\nthe user has already been deleted.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of the run", + "required": false + }, + "effective_performance_target": { + "name": "effective_performance_target", + "type": "PerformanceTarget", + "description": "The actual performance target used by the serverless run during\nexecution. This can differ from the client-set performance target on the\nrequest depending on whether the performance mode is supported by the job\ntype.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "The id of the usage policy used by this run for cost attribution\npurposes.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "The time at which this run ended in epoch milliseconds (milliseconds\nsince 1/1/1970 UTC). This field is set to 0 if the job is still running.", + "required": false + }, + "execution_duration": { + "name": "execution_duration", + "type": "int64", + "description": "The time in milliseconds it took to execute the commands in the JAR or\nnotebook until they completed, failed, timed out, were cancelled, or\nencountered an unexpected error. The duration of a task run is the sum of\nthe `setup_duration`, `execution_duration`, and the `cleanup_duration`.\nThe `execution_duration` field is set to 0 for multitask job runs. The\ntotal duration of a multitask job run is the value of the `run_duration`\nfield.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote\nrepository by default. However, this behavior can be overridden by\nsetting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If\ndbt or SQL File tasks are used, `git_source` mu...", + "required": false + }, + "has_more": { + "name": "has_more", + "type": "bool", + "description": "Indicates if the run has more array properties (`tasks`, `job_clusters`)\nthat are not shown. They can be accessed via :method:jobs/getrun\nendpoint. It is only relevant for API 2.2 :method:jobs/listruns requests\nwith `expand_tasks=true`.", + "required": false + }, + "job_clusters": { + "name": "job_clusters", + "type": "[]JobCluster", + "description": "A list of job cluster specifications that can be shared and reused by\ntasks of this job. Libraries cannot be declared in a shared job cluster.\nYou must declare dependent libraries in task settings. If more than 100\njob clusters are available, you can paginate through them using\n:method:jobs/getrun.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job that contains this run.", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "[]JobParameter", + "description": "Job-level parameters used in the run", + "required": false + }, + "job_run_id": { + "name": "job_run_id", + "type": "int64", + "description": "ID of the job run that this run belongs to. For legacy and single-task\njob runs the field is populated with the job run ID. For task runs, the\nfield is populated with the ID of the job run that the task run belongs\nto.", + "required": false + }, + "number_in_job": { + "name": "number_in_job", + "type": "int64", + "description": "A unique identifier for this job run. This is set to the same value as\n`run_id`.", + "required": false + }, + "original_attempt_run_id": { + "name": "original_attempt_run_id", + "type": "int64", + "description": "If this run is a retry of a prior run attempt, this field contains the\nrun_id of the original attempt; otherwise, it is the same as the run_id.", + "required": false + }, + "overriding_parameters": { + "name": "overriding_parameters", + "type": "*RunParameters", + "description": "The parameters used for this run.", + "required": false + }, + "queue_duration": { + "name": "queue_duration", + "type": "int64", + "description": "The time in milliseconds that the run has spent in the queue.", + "required": false + }, + "repair_history": { + "name": "repair_history", + "type": "[]RepairHistoryItem", + "description": "The repair history of the run.", + "required": false + }, + "run_duration": { + "name": "run_duration", + "type": "int64", + "description": "The time in milliseconds it took the job run and all of its repairs to\nfinish.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The canonical identifier of the run. This ID is unique across all runs of\nall jobs.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "An optional name for the run. The maximum length is 4096 bytes in UTF-8\nencoding.", + "required": false + }, + "run_page_url": { + "name": "run_page_url", + "type": "string", + "description": "The URL to the detail page of the run.", + "required": false + }, + "run_type": { + "name": "run_type", + "type": "RunType", + "description": "", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*CronSchedule", + "description": "The cron schedule that triggered this run if it was triggered by the\nperiodic scheduler.", + "required": false + }, + "setup_duration": { + "name": "setup_duration", + "type": "int64", + "description": "The time in milliseconds it took to set up the cluster. For runs that run\non new clusters this is the cluster creation time, for runs that run on\nexisting clusters this time should be very short. The duration of a task\nrun is the sum of the `setup_duration`, `execution_duration`, and the\n`cleanup_duration`. The `setup_duration` field is set to 0 for multitask\njob runs. The total duration of a multitask job run is the value of the\n`run_duration` field.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The time at which this run was started in epoch milliseconds\n(milliseconds since 1/1/1970 UTC). This may not be the time when the job\ntask starts executing, for example, if the job is scheduled to run on a\nnew cluster, this is the time the cluster creation call is issued.", + "required": false + }, + "state": { + "name": "state", + "type": "*RunState", + "description": "Deprecated. Please use the `status` field instead.", + "required": false + }, + "status": { + "name": "status", + "type": "*RunStatus", + "description": "", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "[]RunTask", + "description": "The list of tasks performed by the run. Each task has its own `run_id`\nwhich you can use to call `JobsGetOutput` to retrieve the run resutls. If\nmore than 100 tasks are available, you can paginate through them using\n:method:jobs/getrun. Use the `next_page_token` field at the object root\nto determine if more results are available.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "TriggerType", + "description": "", + "required": false + }, + "trigger_info": { + "name": "trigger_info", + "type": "*TriggerInfo", + "description": "", + "required": false + } + } + }, + "jobs.CancelAllRuns": { + "name": "CancelAllRuns", + "package": "jobs", + "description": "", + "fields": { + "all_queued_runs": { + "name": "all_queued_runs", + "type": "bool", + "description": "Optional boolean parameter to cancel all queued runs. If no job_id is\nprovided, all queued runs in the workspace are canceled.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job to cancel all runs of.", + "required": false + } + } + }, + "jobs.CancelRun": { + "name": "CancelRun", + "package": "jobs", + "description": "", + "fields": { + "run_id": { + "name": "run_id", + "type": "int64", + "description": "This field is required.", + "required": false + } + } + }, + "jobs.CleanRoomTaskRunState": { + "name": "CleanRoomTaskRunState", + "package": "jobs", + "description": "Stores the run state of the clean rooms notebook task.", + "fields": { + "life_cycle_state": { + "name": "life_cycle_state", + "type": "CleanRoomTaskRunLifeCycleState", + "description": "A value indicating the run's current lifecycle state. This field is\nalways available in the response. Note: Additional states might be\nintroduced in future releases.", + "required": false + }, + "result_state": { + "name": "result_state", + "type": "CleanRoomTaskRunResultState", + "description": "A value indicating the run's result. This field is only available for\nterminal lifecycle states. Note: Additional states might be introduced in\nfuture releases.", + "required": false + } + } + }, + "jobs.CleanRoomsNotebookTask": { + "name": "CleanRoomsNotebookTask", + "package": "jobs", + "description": "Clean Rooms notebook task for V1 Clean Room service (GA).\nReplaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service.", + "fields": { + "clean_room_name": { + "name": "clean_room_name", + "type": "string", + "description": "The clean room that the notebook belongs to.", + "required": false + }, + "etag": { + "name": "etag", + "type": "string", + "description": "Checksum to validate the freshness of the notebook resource (i.e. the\nnotebook being run is the latest version). It can be fetched by calling\nthe :method:cleanroomassets/get API.", + "required": false + }, + "notebook_base_parameters": { + "name": "notebook_base_parameters", + "type": "map[string]string", + "description": "Base parameters to be used for the clean room notebook job.", + "required": false + }, + "notebook_name": { + "name": "notebook_name", + "type": "string", + "description": "Name of the notebook being run.", + "required": false + } + } + }, + "jobs.CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput": { + "name": "CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput", + "package": "jobs", + "description": "", + "fields": { + "clean_room_job_run_state": { + "name": "clean_room_job_run_state", + "type": "*CleanRoomTaskRunState", + "description": "The run state of the clean rooms notebook task.", + "required": false + }, + "notebook_output": { + "name": "notebook_output", + "type": "*NotebookOutput", + "description": "The notebook output for the clean room run", + "required": false + }, + "output_schema_info": { + "name": "output_schema_info", + "type": "*OutputSchemaInfo", + "description": "Information on how to access the output schema for the clean room run", + "required": false + } + } + }, + "jobs.ClusterInstance": { + "name": "ClusterInstance", + "package": "jobs", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The canonical identifier for the cluster used by a run. This field is\nalways available for runs on existing clusters. For runs on new clusters,\nit becomes available once the cluster is created. This value can be used\nto view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`.\nThe logs continue to be available after the run completes.\n\nThe response won’t include this field if the identifier is not\navailable yet.", + "required": false + }, + "spark_context_id": { + "name": "spark_context_id", + "type": "string", + "description": "The canonical identifier for the Spark context used by a run. This field\nis filled in once the run begins execution. This value can be used to\nview the Spark UI by browsing to\n`/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues\nto be available after the run has completed.\n\nThe response won’t include this field if the identifier is not\navailable yet.", + "required": false + } + } + }, + "jobs.ClusterSpec": { + "name": "ClusterSpec", + "package": "jobs", + "description": "", + "fields": { + "existing_cluster_id": { + "name": "existing_cluster_id", + "type": "string", + "description": "If existing_cluster_id, the ID of an existing cluster that is used for\nall runs. When running jobs or tasks on an existing cluster, you may need\nto manually restart the cluster if it stops responding. We suggest\nrunning jobs and tasks on new clusters for greater reliability", + "required": false + }, + "job_cluster_key": { + "name": "job_cluster_key", + "type": "string", + "description": "If job_cluster_key, this task is executed reusing the cluster specified\nin `job.settings.job_clusters`.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]compute.Library", + "description": "An optional list of libraries to be installed on the cluster. The default\nvalue is an empty list.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "*compute.ClusterSpec", + "description": "If new_cluster, a description of a new cluster that is created for each\nrun.", + "required": false + } + } + }, + "jobs.ComputeConfig": { + "name": "ComputeConfig", + "package": "jobs", + "description": "", + "fields": { + "gpu_node_pool_id": { + "name": "gpu_node_pool_id", + "type": "string", + "description": "IDof the GPU pool to use.", + "required": false + }, + "gpu_type": { + "name": "gpu_type", + "type": "string", + "description": "GPU type.", + "required": false + }, + "num_gpus": { + "name": "num_gpus", + "type": "int", + "description": "Number of GPUs.", + "required": false + } + } + }, + "jobs.ConditionTask": { + "name": "ConditionTask", + "package": "jobs", + "description": "", + "fields": { + "left": { + "name": "left", + "type": "string", + "description": "The left operand of the condition task. Can be either a string value or a\njob state or parameter reference.", + "required": false + }, + "op": { + "name": "op", + "type": "ConditionTaskOp", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their\noperands. This means that `“12.0” == “12”` will evaluate to\n`false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`,\n`LESS_THAN_OR_EQUAL` operators perform numeric comparison of their\noperands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0”\n\u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators\n`EQUAL_TO`, `NOT_EQUAL`. If a task value w...", + "required": false + }, + "right": { + "name": "right", + "type": "string", + "description": "The right operand of the condition task. Can be either a string value or\na job state or parameter reference.", + "required": false + } + } + }, + "jobs.ConditionTaskOp": { + "name": "ConditionTaskOp", + "package": "jobs", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.", + "fields": {} + }, + "jobs.Continuous": { + "name": "Continuous", + "package": "jobs", + "description": "", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "PauseStatus", + "description": "Indicate whether the continuous execution of the job is paused or not.\nDefaults to UNPAUSED.", + "required": false + }, + "task_retry_mode": { + "name": "task_retry_mode", + "type": "TaskRetryMode", + "description": "Indicate whether the continuous job is applying task level retries or\nnot. Defaults to NEVER.", + "required": false + } + } + }, + "jobs.CreateJob": { + "name": "CreateJob", + "package": "jobs", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]JobAccessControlRequest", + "description": "List of permissions to set on the job.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The id of the user specified budget policy to use for this job. If not\nspecified, a default budget policy may be applied when creating or\nmodifying the job. See `effective_budget_policy_id` for the budget policy\nused by this workload.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "*Continuous", + "description": "An optional continuous property for this job. The continuous property\nwill ensure that there is always one run executing. Only one of\n`schedule` and `continuous` can be used.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*JobDeployment", + "description": "Deployment information for jobs managed by external sources.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for the job. The maximum length is 27700\ncharacters in UTF-8 encoding.", + "required": false + }, + "edit_mode": { + "name": "edit_mode", + "type": "JobEditMode", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified. *\n`EDITABLE`: The job is in an editable state and can be modified.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*JobEmailNotifications", + "description": "An optional set of email addresses that is notified when runs of this job\nbegin or complete as well as when this job is deleted.", + "required": false + }, + "environments": { + "name": "environments", + "type": "[]JobEnvironment", + "description": "A list of task execution environment specifications that can be\nreferenced by serverless tasks of this job. For serverless notebook\ntasks, if the environment_key is not specified, the notebook environment\nwill be used if present. If a jobs environment is specified, it will\noverride the notebook environment. For other serverless tasks, the task\nenvironment is required to be specified using environment_key in the task\nsettings.", + "required": false + }, + "format": { + "name": "format", + "type": "Format", + "description": "Used to tell what is the format of the job. This field is ignored in\nCreate/Update/Reset calls. When using the Jobs API 2.1 this value is\nalways set to `\"MULTI_TASK\"`.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote\nrepository by default. However, this behavior can be overridden by\nsetting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If\ndbt or SQL File tasks are used, `git_source` mu...", + "required": false + }, + "health": { + "name": "health", + "type": "*JobsHealthRules", + "description": "", + "required": false + }, + "job_clusters": { + "name": "job_clusters", + "type": "[]JobCluster", + "description": "A list of job cluster specifications that can be shared and reused by\ntasks of this job. Libraries cannot be declared in a shared job cluster.\nYou must declare dependent libraries in task settings.", + "required": false + }, + "max_concurrent_runs": { + "name": "max_concurrent_runs", + "type": "int", + "description": "An optional maximum allowed number of concurrent runs of the job. Set\nthis value if you want to be able to execute multiple runs of the same\njob concurrently. This is useful for example if you trigger your job on a\nfrequent schedule and want to allow consecutive runs to overlap with each\nother, or if you want to trigger multiple runs which differ by their\ninput parameters. This setting affects only new runs. For example,\nsuppose the job’s concurrency is 4 and there are 4 concurrent active\nr...", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8\nencoding.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*JobNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\njob.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]JobParameterDefinition", + "description": "Job-level parameter definitions", + "required": false + }, + "performance_target": { + "name": "performance_target", + "type": "PerformanceTarget", + "description": "The performance mode on a serverless job. This field determines the level\nof compute performance or cost-efficiency for the run. The performance\ntarget does not apply to tasks that run on Serverless GPU compute.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "queue": { + "name": "queue", + "type": "*QueueSettings", + "description": "The queue settings of the job.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*JobRunAs", + "description": "The user or service principal that the job runs as, if specified in the\nrequest. This field indicates the explicit configuration of `run_as` for\nthe job. To find the value in all cases, explicit or implicit, use\n`run_as_user_name`.", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*CronSchedule", + "description": "An optional periodic schedule for this job. The default behavior is that\nthe job only runs when triggered by clicking “Run Now” in the Jobs UI\nor sending an API request to `runNow`.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the job. These are forwarded to the cluster\nas cluster tags for jobs clusters, and are subject to the same\nlimitations as cluster tags. A maximum of 25 tags can be added to the\njob.", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "[]Task", + "description": "A list of task specifications to be executed by this job. It supports up\nto 1000 elements in write endpoints (:method:jobs/create,\n:method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read\nendpoints return only 100 tasks. If more than 100 tasks are available,\nyou can paginate through them using :method:jobs/get. Use the\n`next_page_token` field at the object root to determine if more results\nare available.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job. A value of `0` means\nno timeout.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*TriggerSettings", + "description": "A configuration to trigger a run when certain conditions are met. The\ndefault behavior is that the job runs only when triggered by clicking\n“Run Now” in the Jobs UI or sending an API request to `runNow`.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The id of the user specified usage policy to use for this job. If not\nspecified, a default usage policy may be applied when creating or\nmodifying the job. See `effective_usage_policy_id` for the usage policy\nused by this workload.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when runs of this job\nbegin or complete.", + "required": false + } + } + }, + "jobs.CreateResponse": { + "name": "CreateResponse", + "package": "jobs", + "description": "Job was created successfully", + "fields": { + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier for the newly created job.", + "required": false + } + } + }, + "jobs.CronSchedule": { + "name": "CronSchedule", + "package": "jobs", + "description": "", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "PauseStatus", + "description": "Indicate whether this schedule is paused or not.", + "required": false + }, + "quartz_cron_expression": { + "name": "quartz_cron_expression", + "type": "string", + "description": "A Cron expression using Quartz syntax that describes the schedule for a\njob. See [Cron Trigger] for details. This field is required.\n\n[Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "A Java timezone ID. The schedule for a job is resolved with respect to\nthis timezone. See [Java TimeZone] for details. This field is required.\n\n[Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html", + "required": false + } + } + }, + "jobs.DashboardPageSnapshot": { + "name": "DashboardPageSnapshot", + "package": "jobs", + "description": "", + "fields": { + "page_display_name": { + "name": "page_display_name", + "type": "string", + "description": "", + "required": false + }, + "widget_error_details": { + "name": "widget_error_details", + "type": "[]WidgetErrorDetail", + "description": "", + "required": false + } + } + }, + "jobs.DashboardTask": { + "name": "DashboardTask", + "package": "jobs", + "description": "Configures the Lakeview Dashboard job task type.", + "fields": { + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "The identifier of the dashboard to refresh.", + "required": false + }, + "subscription": { + "name": "subscription", + "type": "*Subscription", + "description": "Optional: subscription configuration for sending the dashboard snapshot.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional: The warehouse id to execute the dashboard with for the\nschedule. If not specified, the default warehouse of the dashboard will\nbe used.", + "required": false + } + } + }, + "jobs.DashboardTaskOutput": { + "name": "DashboardTaskOutput", + "package": "jobs", + "description": "", + "fields": { + "page_snapshots": { + "name": "page_snapshots", + "type": "[]DashboardPageSnapshot", + "description": "Should only be populated for manual PDF download jobs.", + "required": false + } + } + }, + "jobs.DbtCloudJobRunStep": { + "name": "DbtCloudJobRunStep", + "package": "jobs", + "description": "Format of response retrieved from dbt Cloud, for inclusion in output\nDeprecated in favor of DbtPlatformJobRunStep", + "fields": { + "index": { + "name": "index", + "type": "int", + "description": "Orders the steps in the job", + "required": false + }, + "logs": { + "name": "logs", + "type": "string", + "description": "Output of the step", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the step in the job", + "required": false + }, + "status": { + "name": "status", + "type": "DbtPlatformRunStatus", + "description": "State of the step", + "required": false + } + } + }, + "jobs.DbtCloudTask": { + "name": "DbtCloudTask", + "package": "jobs", + "description": "Deprecated in favor of DbtPlatformTask", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection that authenticates the dbt Cloud\nfor this task", + "required": false + }, + "dbt_cloud_job_id": { + "name": "dbt_cloud_job_id", + "type": "int64", + "description": "Id of the dbt Cloud job to be triggered", + "required": false + } + } + }, + "jobs.DbtCloudTaskOutput": { + "name": "DbtCloudTaskOutput", + "package": "jobs", + "description": "Deprecated in favor of DbtPlatformTaskOutput", + "fields": { + "dbt_cloud_job_run_id": { + "name": "dbt_cloud_job_run_id", + "type": "int64", + "description": "Id of the job run in dbt Cloud", + "required": false + }, + "dbt_cloud_job_run_output": { + "name": "dbt_cloud_job_run_output", + "type": "[]DbtCloudJobRunStep", + "description": "Steps of the job run as received from dbt Cloud", + "required": false + }, + "dbt_cloud_job_run_url": { + "name": "dbt_cloud_job_run_url", + "type": "string", + "description": "Url where full run details can be viewed", + "required": false + } + } + }, + "jobs.DbtOutput": { + "name": "DbtOutput", + "package": "jobs", + "description": "", + "fields": { + "artifacts_headers": { + "name": "artifacts_headers", + "type": "map[string]string", + "description": "An optional map of headers to send when retrieving the artifact from the\n`artifacts_link`.", + "required": false + }, + "artifacts_link": { + "name": "artifacts_link", + "type": "string", + "description": "A pre-signed URL to download the (compressed) dbt artifacts. This link is\nvalid for a limited time (30 minutes). This information is only available\nafter the run has finished.", + "required": false + } + } + }, + "jobs.DbtPlatformJobRunStep": { + "name": "DbtPlatformJobRunStep", + "package": "jobs", + "description": "Format of response retrieved from dbt platform, for inclusion in output", + "fields": { + "index": { + "name": "index", + "type": "int", + "description": "Orders the steps in the job", + "required": false + }, + "logs": { + "name": "logs", + "type": "string", + "description": "Output of the step", + "required": false + }, + "logs_truncated": { + "name": "logs_truncated", + "type": "bool", + "description": "Whether the logs of this step have been truncated. If true, the logs has\nbeen truncated to 10000 characters.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the step in the job", + "required": false + }, + "name_truncated": { + "name": "name_truncated", + "type": "bool", + "description": "Whether the name of the job has been truncated. If true, the name has\nbeen truncated to 100 characters.", + "required": false + }, + "status": { + "name": "status", + "type": "DbtPlatformRunStatus", + "description": "State of the step", + "required": false + } + } + }, + "jobs.DbtPlatformTask": { + "name": "DbtPlatformTask", + "package": "jobs", + "description": "", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection that authenticates the dbt\nplatform for this task", + "required": false + }, + "dbt_platform_job_id": { + "name": "dbt_platform_job_id", + "type": "string", + "description": "Id of the dbt platform job to be triggered. Specified as a string for\nmaximum compatibility with clients.", + "required": false + } + } + }, + "jobs.DbtPlatformTaskOutput": { + "name": "DbtPlatformTaskOutput", + "package": "jobs", + "description": "", + "fields": { + "dbt_platform_job_run_id": { + "name": "dbt_platform_job_run_id", + "type": "string", + "description": "Id of the job run in dbt platform. Specified as a string for maximum\ncompatibility with clients.", + "required": false + }, + "dbt_platform_job_run_output": { + "name": "dbt_platform_job_run_output", + "type": "[]DbtPlatformJobRunStep", + "description": "Steps of the job run as received from dbt platform", + "required": false + }, + "dbt_platform_job_run_url": { + "name": "dbt_platform_job_run_url", + "type": "string", + "description": "Url where full run details can be viewed", + "required": false + }, + "steps_truncated": { + "name": "steps_truncated", + "type": "bool", + "description": "Whether the number of steps in the output has been truncated. If true,\nthe output will contain the first 20 steps of the output.", + "required": false + } + } + }, + "jobs.DbtTask": { + "name": "DbtTask", + "package": "jobs", + "description": "", + "fields": { + "catalog": { + "name": "catalog", + "type": "string", + "description": "Optional name of the catalog to use. The value is the top level in the\n3-level namespace of Unity Catalog (catalog / schema / relation). The\ncatalog value can only be specified if a warehouse_id is specified.\nRequires dbt-databricks \u003e= 1.1.1.", + "required": false + }, + "commands": { + "name": "commands", + "type": "[]string", + "description": "A list of dbt commands to execute. All commands must start with `dbt`.\nThis parameter must not be empty. A maximum of up to 10 commands can be\nprovided.", + "required": false + }, + "profiles_directory": { + "name": "profiles_directory", + "type": "string", + "description": "Optional (relative) path to the profiles directory. Can only be specified\nif no warehouse_id is specified. If no warehouse_id is specified and this\nfolder is unset, the root directory is used.", + "required": false + }, + "project_directory": { + "name": "project_directory", + "type": "string", + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Optional schema to write to. This parameter is only used when a\nwarehouse_id is also provided. If not provided, the `default` schema is\nused.", + "required": false + }, + "source": { + "name": "source", + "type": "Source", + "description": "Optional location type of the project directory. When set to `WORKSPACE`,\nthe project will be retrieved from the local Databricks workspace. When\nset to `GIT`, the project will be retrieved from a Git repository defined\nin `git_source`. If the value is empty, the task will use `GIT` if\n`git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace. * `GIT`:\nProject is located in cloud Git provider.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse to connect to. If provided, we automatically\ngenerate and provide the profile and connection details to dbt. It can be\noverridden on a per-command basis by using the `--profiles-dir` command\nline argument.", + "required": false + } + } + }, + "jobs.DeleteJob": { + "name": "DeleteJob", + "package": "jobs", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job to delete. This field is required.", + "required": false + } + } + }, + "jobs.DeleteRun": { + "name": "DeleteRun", + "package": "jobs", + "description": "", + "fields": { + "run_id": { + "name": "run_id", + "type": "int64", + "description": "ID of the run to delete.", + "required": false + } + } + }, + "jobs.EnforcePolicyComplianceForJobResponseJobClusterSettingsChange": { + "name": "EnforcePolicyComplianceForJobResponseJobClusterSettingsChange", + "package": "jobs", + "description": "Represents a change to the job cluster's settings that would be required for\nthe job clusters to become compliant with their policies.", + "fields": { + "field": { + "name": "field", + "type": "string", + "description": "The field where this change would be made, prepended with the job cluster\nkey.", + "required": false + }, + "new_value": { + "name": "new_value", + "type": "string", + "description": "The new value of this field after enforcing policy compliance (either a\nnumber, a boolean, or a string) converted to a string. This is intended\nto be read by a human. The typed new value of this field can be retrieved\nby reading the settings field in the API response.", + "required": false + }, + "previous_value": { + "name": "previous_value", + "type": "string", + "description": "The previous value of this field before enforcing policy compliance\n(either a number, a boolean, or a string) converted to a string. This is\nintended to be read by a human. The type of the field can be retrieved by\nreading the settings field in the API response.", + "required": false + } + } + }, + "jobs.EnforcePolicyComplianceRequest": { + "name": "EnforcePolicyComplianceRequest", + "package": "jobs", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The ID of the job you want to enforce policy compliance on.", + "required": false + }, + "validate_only": { + "name": "validate_only", + "type": "bool", + "description": "If set, previews changes made to the job to comply with its policy, but\ndoes not update the job.", + "required": false + } + } + }, + "jobs.EnforcePolicyComplianceResponse": { + "name": "EnforcePolicyComplianceResponse", + "package": "jobs", + "description": "", + "fields": { + "has_changes": { + "name": "has_changes", + "type": "bool", + "description": "Whether any changes have been made to the job cluster settings for the\njob to become compliant with its policies.", + "required": false + }, + "job_cluster_changes": { + "name": "job_cluster_changes", + "type": "[]EnforcePolicyComplianceForJobResponseJobClusterSettingsChange", + "description": "A list of job cluster changes that have been made to the job’s cluster\nsettings in order for all job clusters to become compliant with their\npolicies.", + "required": false + }, + "settings": { + "name": "settings", + "type": "*JobSettings", + "description": "Updated job settings after policy enforcement. Policy enforcement only\napplies to job clusters that are created when running the job (which are\nspecified in new_cluster) and does not apply to existing all-purpose\nclusters. Updated job settings are derived by applying policy default\nvalues to the existing job clusters in order to satisfy policy\nrequirements.", + "required": false + } + } + }, + "jobs.ExportRunOutput": { + "name": "ExportRunOutput", + "package": "jobs", + "description": "Run was exported successfully.", + "fields": { + "views": { + "name": "views", + "type": "[]ViewItem", + "description": "The exported content in HTML format (one for every view item). To extract\nthe HTML notebook from the JSON response, download and run this [Python\nscript](/_static/examples/extract.py).", + "required": false + } + } + }, + "jobs.FileArrivalTriggerConfiguration": { + "name": "FileArrivalTriggerConfiguration", + "package": "jobs", + "description": "", + "fields": { + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time\npassed since the last time the trigger fired. The minimum allowed value\nis 60 seconds", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL to be monitored for file arrivals. The path must point to the root or\na subpath of the external location.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no file activity has occurred\nfor the specified amount of time. This makes it possible to wait for a\nbatch of incoming files to arrive before triggering a run. The minimum\nallowed value is 60 seconds.", + "required": false + } + } + }, + "jobs.FileArrivalTriggerState": { + "name": "FileArrivalTriggerState", + "package": "jobs", + "description": "", + "fields": { + "using_file_events": { + "name": "using_file_events", + "type": "bool", + "description": "Indicates whether the trigger leverages file events to detect file\narrivals.", + "required": false + } + } + }, + "jobs.ForEachStats": { + "name": "ForEachStats", + "package": "jobs", + "description": "", + "fields": { + "error_message_stats": { + "name": "error_message_stats", + "type": "[]ForEachTaskErrorMessageStats", + "description": "Sample of 3 most common error messages occurred during the iteration.", + "required": false + }, + "task_run_stats": { + "name": "task_run_stats", + "type": "*ForEachTaskTaskRunStats", + "description": "Describes stats of the iteration. Only latest retries are considered.", + "required": false + } + } + }, + "jobs.ForEachTask": { + "name": "ForEachTask", + "package": "jobs", + "description": "", + "fields": { + "concurrency": { + "name": "concurrency", + "type": "int", + "description": "An optional maximum allowed number of concurrent runs of the task. Set\nthis value if you want to be able to execute multiple runs of the task\nconcurrently.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "string", + "description": "Array for task to iterate on. This can be a JSON string or a reference to\nan array parameter.", + "required": false + }, + "task": { + "name": "task", + "type": "Task", + "description": "Configuration for the task that will be run for each element in the array", + "required": false + } + } + }, + "jobs.ForEachTaskErrorMessageStats": { + "name": "ForEachTaskErrorMessageStats", + "package": "jobs", + "description": "", + "fields": { + "count": { + "name": "count", + "type": "int", + "description": "Describes the count of such error message encountered during the\niterations.", + "required": false + }, + "error_message": { + "name": "error_message", + "type": "string", + "description": "Describes the error message occured during the iterations.", + "required": false + }, + "termination_category": { + "name": "termination_category", + "type": "string", + "description": "Describes the termination reason for the error message.", + "required": false + } + } + }, + "jobs.ForEachTaskTaskRunStats": { + "name": "ForEachTaskTaskRunStats", + "package": "jobs", + "description": "", + "fields": { + "active_iterations": { + "name": "active_iterations", + "type": "int", + "description": "Describes the iteration runs having an active lifecycle state or an\nactive run sub state.", + "required": false + }, + "completed_iterations": { + "name": "completed_iterations", + "type": "int", + "description": "Describes the number of failed and succeeded iteration runs.", + "required": false + }, + "failed_iterations": { + "name": "failed_iterations", + "type": "int", + "description": "Describes the number of failed iteration runs.", + "required": false + }, + "scheduled_iterations": { + "name": "scheduled_iterations", + "type": "int", + "description": "Describes the number of iteration runs that have been scheduled.", + "required": false + }, + "succeeded_iterations": { + "name": "succeeded_iterations", + "type": "int", + "description": "Describes the number of succeeded iteration runs.", + "required": false + }, + "total_iterations": { + "name": "total_iterations", + "type": "int", + "description": "Describes the length of the list of items to iterate over.", + "required": false + } + } + }, + "jobs.GenAiComputeTask": { + "name": "GenAiComputeTask", + "package": "jobs", + "description": "", + "fields": { + "command": { + "name": "command", + "type": "string", + "description": "Command launcher to run the actual script, e.g. bash, python etc.", + "required": false + }, + "compute": { + "name": "compute", + "type": "*ComputeConfig", + "description": "", + "required": false + }, + "dl_runtime_image": { + "name": "dl_runtime_image", + "type": "string", + "description": "Runtime image", + "required": false + }, + "mlflow_experiment_name": { + "name": "mlflow_experiment_name", + "type": "string", + "description": "Optional string containing the name of the MLflow experiment to log the\nrun to. If name is not found, backend will create the mlflow experiment\nusing the name.", + "required": false + }, + "source": { + "name": "source", + "type": "Source", + "description": "Optional location type of the training script. When set to `WORKSPACE`,\nthe script will be retrieved from the local Databricks workspace. When\nset to `GIT`, the script will be retrieved from a Git repository defined\nin `git_source`. If the value is empty, the task will use `GIT` if\n`git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: Script\nis located in Databricks workspace. * `GIT`: Script is located in cloud\nGit provider.", + "required": false + }, + "training_script_path": { + "name": "training_script_path", + "type": "string", + "description": "The training script file path to be executed. Cloud file URIs (such as\ndbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For\npython files stored in the Databricks workspace, the path must be\nabsolute and begin with `/`. For files stored in a remote repository, the\npath must be relative. This field is required.", + "required": false + }, + "yaml_parameters": { + "name": "yaml_parameters", + "type": "string", + "description": "Optional string containing model parameters passed to the training script\nin yaml format. If present, then the content in yaml_parameters_file_path\nwill be ignored.", + "required": false + }, + "yaml_parameters_file_path": { + "name": "yaml_parameters_file_path", + "type": "string", + "description": "Optional path to a YAML file containing model parameters passed to the\ntraining script.", + "required": false + } + } + }, + "jobs.GetJobPermissionLevelsResponse": { + "name": "GetJobPermissionLevelsResponse", + "package": "jobs", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]JobPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "jobs.GetPolicyComplianceResponse": { + "name": "GetPolicyComplianceResponse", + "package": "jobs", + "description": "", + "fields": { + "is_compliant": { + "name": "is_compliant", + "type": "bool", + "description": "Whether the job is compliant with its policies or not. Jobs could be out\nof compliance if a policy they are using was updated after the job was\nlast edited and some of its job clusters no longer comply with their\nupdated policies.", + "required": false + }, + "violations": { + "name": "violations", + "type": "map[string]string", + "description": "An object containing key-value mappings representing the first 200 policy\nvalidation errors. The keys indicate the path where the policy validation\nerror is occurring. An identifier for the job cluster is prepended to the\npath. The values indicate an error message describing the policy\nvalidation error.", + "required": false + } + } + }, + "jobs.GitSnapshot": { + "name": "GitSnapshot", + "package": "jobs", + "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "fields": { + "used_commit": { + "name": "used_commit", + "type": "string", + "description": "Commit that was used to execute the run. If git_branch was specified,\nthis points to the HEAD of the branch at the time of the run; if git_tag\nwas specified, this points to the commit the tag points to.", + "required": false + } + } + }, + "jobs.GitSource": { + "name": "GitSource", + "package": "jobs", + "description": "An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.", + "fields": { + "git_branch": { + "name": "git_branch", + "type": "string", + "description": "Name of the branch to be checked out and used by this job. This field\ncannot be specified in conjunction with git_tag or git_commit.", + "required": false + }, + "git_commit": { + "name": "git_commit", + "type": "string", + "description": "Commit to be checked out and used by this job. This field cannot be\nspecified in conjunction with git_branch or git_tag.", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "GitProvider", + "description": "Unique identifier of the service used to host the Git repository. The\nvalue is case insensitive.", + "required": false + }, + "git_snapshot": { + "name": "git_snapshot", + "type": "*GitSnapshot", + "description": "Read-only state of the remote repository at the time the job was run. This field is only included on job runs.", + "required": false + }, + "git_tag": { + "name": "git_tag", + "type": "string", + "description": "Name of the tag to be checked out and used by this job. This field cannot\nbe specified in conjunction with git_branch or git_commit.", + "required": false + }, + "git_url": { + "name": "git_url", + "type": "string", + "description": "URL of the repository to be cloned by this job.", + "required": false + }, + "job_source": { + "name": "job_source", + "type": "*JobSource", + "description": "The source of the job specification in the remote repository when the job\nis source controlled.", + "required": false + } + } + }, + "jobs.Job": { + "name": "Job", + "package": "jobs", + "description": "Job was retrieved successfully.", + "fields": { + "created_time": { + "name": "created_time", + "type": "int64", + "description": "The time at which this job was created in epoch milliseconds\n(milliseconds since 1/1/1970 UTC).", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The creator user name. This field won’t be included in the response if\nthe user has already been deleted.", + "required": false + }, + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", + "type": "string", + "description": "The id of the budget policy used by this job for cost attribution\npurposes. This may be set through (in order of precedence): 1. Budget\nadmins through the account or workspace console 2. Jobs UI in the job\ndetails page and Jobs API using `budget_policy_id` 3. Inferred default\nbased on accessible budget policies of the run_as identity on job\ncreation or modification.", + "required": false + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "The id of the usage policy used by this job for cost attribution\npurposes.", + "required": false + }, + "has_more": { + "name": "has_more", + "type": "bool", + "description": "Indicates if the job has more array properties (`tasks`, `job_clusters`)\nthat are not shown. They can be accessed via :method:jobs/get endpoint.\nIt is only relevant for API 2.2 :method:jobs/list requests with\n`expand_tasks=true`.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier for this job.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to list the next page of array properties.", + "required": false + }, + "run_as_user_name": { + "name": "run_as_user_name", + "type": "string", + "description": "The email of an active workspace user or the application ID of a service\nprincipal that the job runs as. This value can be changed by setting the\n`run_as` field when creating or updating a job.\n\nBy default, `run_as_user_name` is based on the current job settings and\nis set to the creator of the job if job access control is disabled or to\nthe user with the `is_owner` permission if job access control is enabled.", + "required": false + }, + "settings": { + "name": "settings", + "type": "*JobSettings", + "description": "Settings for this job and all of its runs. These settings can be updated\nusing the `resetJob` method.", + "required": false + }, + "trigger_state": { + "name": "trigger_state", + "type": "*TriggerStateProto", + "description": "State of the trigger associated with the job.", + "required": false + } + } + }, + "jobs.JobAccessControlRequest": { + "name": "JobAccessControlRequest", + "package": "jobs", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "JobPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "jobs.JobAccessControlResponse": { + "name": "JobAccessControlResponse", + "package": "jobs", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]JobPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "jobs.JobCluster": { + "name": "JobCluster", + "package": "jobs", + "description": "", + "fields": { + "job_cluster_key": { + "name": "job_cluster_key", + "type": "string", + "description": "A unique name for the job cluster. This field is required and must be\nunique within the job. `JobTaskSettings` may refer to this field to\ndetermine which cluster to launch for the task execution.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "compute.ClusterSpec", + "description": "If new_cluster, a description of a cluster that is created for each task.", + "required": false + } + } + }, + "jobs.JobCompliance": { + "name": "JobCompliance", + "package": "jobs", + "description": "", + "fields": { + "is_compliant": { + "name": "is_compliant", + "type": "bool", + "description": "Whether this job is in compliance with the latest version of its policy.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "Canonical unique identifier for a job.", + "required": false + }, + "violations": { + "name": "violations", + "type": "map[string]string", + "description": "An object containing key-value mappings representing the first 200 policy\nvalidation errors. The keys indicate the path where the policy validation\nerror is occurring. An identifier for the job cluster is prepended to the\npath. The values indicate an error message describing the policy\nvalidation error.", + "required": false + } + } + }, + "jobs.JobDeployment": { + "name": "JobDeployment", + "package": "jobs", + "description": "", + "fields": { + "kind": { + "name": "kind", + "type": "JobDeploymentKind", + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "required": false + }, + "metadata_file_path": { + "name": "metadata_file_path", + "type": "string", + "description": "Path of the file that contains deployment metadata.", + "required": false + } + } + }, + "jobs.JobDeploymentKind": { + "name": "JobDeploymentKind", + "package": "jobs", + "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "fields": {} + }, + "jobs.JobEditMode": { + "name": "JobEditMode", + "package": "jobs", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.", + "fields": {} + }, + "jobs.JobEmailNotifications": { + "name": "JobEmailNotifications", + "package": "jobs", + "description": "", + "fields": { + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "bool", + "description": "If true, do not send email to recipients specified in `on_failure` if the\nrun is skipped. This field is `deprecated`. Please use the\n`notification_settings.no_alert_for_skipped_runs` field.", + "required": false + }, + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "[]string", + "description": "A list of email addresses to be notified when the duration of a run\nexceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in\nthe `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is\nspecified in the `health` field for the job, notifications are not sent.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "[]string", + "description": "A list of email addresses to be notified when a run unsuccessfully\ncompletes. A run is considered to have completed unsuccessfully if it\nends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or\n`TIMED_OUT` result_state. If this is not specified on job creation,\nreset, or update the list is empty, and notifications are not sent.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "[]string", + "description": "A list of email addresses to be notified when a run begins. If not\nspecified on job creation, reset, or update, the list is empty, and\nnotifications are not sent.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "[]string", + "description": "A list of email addresses to notify when any streaming backlog thresholds\nare exceeded for any stream. Streaming backlog thresholds can be set in\nthe `health` field using the following metrics:\n`STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`,\n`STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is\nbased on the 10-minute average of these metrics. If the issue persists,\nnotifications are resent every 30 minutes.", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "[]string", + "description": "A list of email addresses to be notified when a run successfully\ncompletes. A run is considered to have completed successfully if it ends\nwith a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If\nnot specified on job creation, reset, or update, the list is empty, and\nnotifications are not sent.", + "required": false + } + } + }, + "jobs.JobEnvironment": { + "name": "JobEnvironment", + "package": "jobs", + "description": "", + "fields": { + "environment_key": { + "name": "environment_key", + "type": "string", + "description": "The key of an environment. It has to be unique within a job.", + "required": false + }, + "spec": { + "name": "spec", + "type": "*compute.Environment", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "required": false + } + } + }, + "jobs.JobNotificationSettings": { + "name": "JobNotificationSettings", + "package": "jobs", + "description": "", + "fields": { + "no_alert_for_canceled_runs": { + "name": "no_alert_for_canceled_runs", + "type": "bool", + "description": "If true, do not send notifications to recipients specified in\n`on_failure` if the run is canceled.", + "required": false + }, + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "bool", + "description": "If true, do not send notifications to recipients specified in\n`on_failure` if the run is skipped.", + "required": false + } + } + }, + "jobs.JobParameter": { + "name": "JobParameter", + "package": "jobs", + "description": "", + "fields": { + "default": { + "name": "default", + "type": "string", + "description": "The optional default value of the parameter", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the parameter", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value used in the run", + "required": false + } + } + }, + "jobs.JobParameterDefinition": { + "name": "JobParameterDefinition", + "package": "jobs", + "description": "", + "fields": { + "default": { + "name": "default", + "type": "string", + "description": "Default value of the parameter.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the defined parameter. May only contain alphanumeric\ncharacters, `_`, `-`, and `.`", + "required": false + } + } + }, + "jobs.JobPermission": { + "name": "JobPermission", + "package": "jobs", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "JobPermissionLevel", + "description": "", + "required": false + } + } + }, + "jobs.JobPermissions": { + "name": "JobPermissions", + "package": "jobs", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]JobAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "jobs.JobPermissionsDescription": { + "name": "JobPermissionsDescription", + "package": "jobs", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "JobPermissionLevel", + "description": "", + "required": false + } + } + }, + "jobs.JobPermissionsRequest": { + "name": "JobPermissionsRequest", + "package": "jobs", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]JobAccessControlRequest", + "description": "", + "required": false + } + } + }, + "jobs.JobRunAs": { + "name": "JobRunAs", + "package": "jobs", + "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "Group name of an account group assigned to the workspace. Setting this\nfield requires being a member of the group.", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field\nrequires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Non-admin users can only set this\nfield to their own email.", + "required": false + } + } + }, + "jobs.JobSettings": { + "name": "JobSettings", + "package": "jobs", + "description": "", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The id of the user specified budget policy to use for this job. If not\nspecified, a default budget policy may be applied when creating or\nmodifying the job. See `effective_budget_policy_id` for the budget policy\nused by this workload.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "*Continuous", + "description": "An optional continuous property for this job. The continuous property\nwill ensure that there is always one run executing. Only one of\n`schedule` and `continuous` can be used.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*JobDeployment", + "description": "Deployment information for jobs managed by external sources.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for the job. The maximum length is 27700\ncharacters in UTF-8 encoding.", + "required": false + }, + "edit_mode": { + "name": "edit_mode", + "type": "JobEditMode", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified. *\n`EDITABLE`: The job is in an editable state and can be modified.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*JobEmailNotifications", + "description": "An optional set of email addresses that is notified when runs of this job\nbegin or complete as well as when this job is deleted.", + "required": false + }, + "environments": { + "name": "environments", + "type": "[]JobEnvironment", + "description": "A list of task execution environment specifications that can be\nreferenced by serverless tasks of this job. For serverless notebook\ntasks, if the environment_key is not specified, the notebook environment\nwill be used if present. If a jobs environment is specified, it will\noverride the notebook environment. For other serverless tasks, the task\nenvironment is required to be specified using environment_key in the task\nsettings.", + "required": false + }, + "format": { + "name": "format", + "type": "Format", + "description": "Used to tell what is the format of the job. This field is ignored in\nCreate/Update/Reset calls. When using the Jobs API 2.1 this value is\nalways set to `\"MULTI_TASK\"`.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote\nrepository by default. However, this behavior can be overridden by\nsetting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If\ndbt or SQL File tasks are used, `git_source` mu...", + "required": false + }, + "health": { + "name": "health", + "type": "*JobsHealthRules", + "description": "", + "required": false + }, + "job_clusters": { + "name": "job_clusters", + "type": "[]JobCluster", + "description": "A list of job cluster specifications that can be shared and reused by\ntasks of this job. Libraries cannot be declared in a shared job cluster.\nYou must declare dependent libraries in task settings.", + "required": false + }, + "max_concurrent_runs": { + "name": "max_concurrent_runs", + "type": "int", + "description": "An optional maximum allowed number of concurrent runs of the job. Set\nthis value if you want to be able to execute multiple runs of the same\njob concurrently. This is useful for example if you trigger your job on a\nfrequent schedule and want to allow consecutive runs to overlap with each\nother, or if you want to trigger multiple runs which differ by their\ninput parameters. This setting affects only new runs. For example,\nsuppose the job’s concurrency is 4 and there are 4 concurrent active\nr...", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8\nencoding.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*JobNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\njob.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]JobParameterDefinition", + "description": "Job-level parameter definitions", + "required": false + }, + "performance_target": { + "name": "performance_target", + "type": "PerformanceTarget", + "description": "The performance mode on a serverless job. This field determines the level\nof compute performance or cost-efficiency for the run. The performance\ntarget does not apply to tasks that run on Serverless GPU compute.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "queue": { + "name": "queue", + "type": "*QueueSettings", + "description": "The queue settings of the job.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*JobRunAs", + "description": "The user or service principal that the job runs as, if specified in the\nrequest. This field indicates the explicit configuration of `run_as` for\nthe job. To find the value in all cases, explicit or implicit, use\n`run_as_user_name`.", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*CronSchedule", + "description": "An optional periodic schedule for this job. The default behavior is that\nthe job only runs when triggered by clicking “Run Now” in the Jobs UI\nor sending an API request to `runNow`.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the job. These are forwarded to the cluster\nas cluster tags for jobs clusters, and are subject to the same\nlimitations as cluster tags. A maximum of 25 tags can be added to the\njob.", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "[]Task", + "description": "A list of task specifications to be executed by this job. It supports up\nto 1000 elements in write endpoints (:method:jobs/create,\n:method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read\nendpoints return only 100 tasks. If more than 100 tasks are available,\nyou can paginate through them using :method:jobs/get. Use the\n`next_page_token` field at the object root to determine if more results\nare available.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job. A value of `0` means\nno timeout.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*TriggerSettings", + "description": "A configuration to trigger a run when certain conditions are met. The\ndefault behavior is that the job runs only when triggered by clicking\n“Run Now” in the Jobs UI or sending an API request to `runNow`.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The id of the user specified usage policy to use for this job. If not\nspecified, a default usage policy may be applied when creating or\nmodifying the job. See `effective_usage_policy_id` for the usage policy\nused by this workload.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when runs of this job\nbegin or complete.", + "required": false + } + } + }, + "jobs.JobSource": { + "name": "JobSource", + "package": "jobs", + "description": "The source of the job specification in the remote repository when the job is source controlled.", + "fields": { + "dirty_state": { + "name": "dirty_state", + "type": "JobSourceDirtyState", + "description": "Dirty state indicates the job is not fully synced with the job\nspecification in the remote repository.\n\nPossible values are: * `NOT_SYNCED`: The job is not yet synced with the\nremote job specification. Import the remote job specification from UI to\nmake the job fully synced. * `DISCONNECTED`: The job is temporary\ndisconnected from the remote job specification and is allowed for live\nedit. Import the remote job specification again from UI to make the job\nfully synced.", + "required": false + }, + "import_from_git_branch": { + "name": "import_from_git_branch", + "type": "string", + "description": "Name of the branch which the job is imported from.", + "required": false + }, + "job_config_path": { + "name": "job_config_path", + "type": "string", + "description": "Path of the job YAML file that contains the job specification.", + "required": false + } + } + }, + "jobs.JobSourceDirtyState": { + "name": "JobSourceDirtyState", + "package": "jobs", + "description": "Dirty state indicates the job is not fully synced with the job specification\nin the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.", + "fields": {} + }, + "jobs.JobsHealthMetric": { + "name": "JobsHealthMetric", + "package": "jobs", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", + "fields": {} + }, + "jobs.JobsHealthOperator": { + "name": "JobsHealthOperator", + "package": "jobs", + "description": "Specifies the operator used to compare the health metric value with the specified threshold.", + "fields": {} + }, + "jobs.JobsHealthRule": { + "name": "JobsHealthRule", + "package": "jobs", + "description": "", + "fields": { + "metric": { + "name": "metric", + "type": "JobsHealthMetric", + "description": "Specifies the health metric that is being evaluated for a particular health rule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds.\n* `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Public Preview.\n* `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Public Preview.", + "required": false + }, + "op": { + "name": "op", + "type": "JobsHealthOperator", + "description": "Specifies the operator used to compare the health metric value with the specified threshold.", + "required": false + }, + "value": { + "name": "value", + "type": "int64", + "description": "Specifies the threshold value that the health metric should obey to\nsatisfy the health rule.", + "required": false + } + } + }, + "jobs.JobsHealthRules": { + "name": "JobsHealthRules", + "package": "jobs", + "description": "An optional set of health rules that can be defined for this job.", + "fields": { + "rules": { + "name": "rules", + "type": "[]JobsHealthRule", + "description": "", + "required": false + } + } + }, + "jobs.ListJobComplianceForPolicyResponse": { + "name": "ListJobComplianceForPolicyResponse", + "package": "jobs", + "description": "", + "fields": { + "jobs": { + "name": "jobs", + "type": "[]JobCompliance", + "description": "A list of jobs and their policy compliance statuses.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the next page of\nresults. If this field is not in the response, it means no further\nresults for the request.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "This field represents the pagination token to retrieve the previous page\nof results. If this field is not in the response, it means no further\nresults for the request.", + "required": false + } + } + }, + "jobs.ListJobsResponse": { + "name": "ListJobsResponse", + "package": "jobs", + "description": "List of jobs was retrieved successfully.", + "fields": { + "has_more": { + "name": "has_more", + "type": "bool", + "description": "If true, additional jobs matching the provided filter are available for\nlisting.", + "required": false + }, + "jobs": { + "name": "jobs", + "type": "[]BaseJob", + "description": "The list of jobs. Only included in the response if there are jobs to\nlist.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to list the next page of jobs (if applicable).", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "A token that can be used to list the previous page of jobs (if\napplicable).", + "required": false + } + } + }, + "jobs.ListRunsResponse": { + "name": "ListRunsResponse", + "package": "jobs", + "description": "List of runs was retrieved successfully.", + "fields": { + "has_more": { + "name": "has_more", + "type": "bool", + "description": "If true, additional runs matching the provided filter are available for\nlisting.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to list the next page of runs (if applicable).", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "A token that can be used to list the previous page of runs (if\napplicable).", + "required": false + }, + "runs": { + "name": "runs", + "type": "[]BaseRun", + "description": "A list of runs, from most recently started to least. Only included in the\nresponse if there are runs to list.", + "required": false + } + } + }, + "jobs.ModelTriggerConfiguration": { + "name": "ModelTriggerConfiguration", + "package": "jobs", + "description": "", + "fields": { + "aliases": { + "name": "aliases", + "type": "[]string", + "description": "Aliases of the model versions to monitor. Can only be used in conjunction\nwith condition MODEL_ALIAS_SET.", + "required": false + }, + "condition": { + "name": "condition", + "type": "ModelTriggerConfigurationCondition", + "description": "The condition based on which to trigger a job run.", + "required": false + }, + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time\nhas passed since the last time the trigger fired. The minimum allowed\nvalue is 60 seconds.", + "required": false + }, + "securable_name": { + "name": "securable_name", + "type": "string", + "description": "Name of the securable to monitor (\"mycatalog.myschema.mymodel\" in the\ncase of model-level triggers, \"mycatalog.myschema\" in the case of\nschema-level triggers) or empty in the case of metastore-level triggers.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no model updates have\noccurred for the specified time and can be used to wait for a series of\nmodel updates before triggering a run. The minimum allowed value is 60\nseconds.", + "required": false + } + } + }, + "jobs.NotebookOutput": { + "name": "NotebookOutput", + "package": "jobs", + "description": "", + "fields": { + "result": { + "name": "result", + "type": "string", + "description": "The value passed to\n[dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit).\nDatabricks restricts this API to return the first 5 MB of the value. For\na larger result, your job can store the results in a cloud storage\nservice. This field is absent if `dbutils.notebook.exit()` was never\ncalled.", + "required": false + }, + "truncated": { + "name": "truncated", + "type": "bool", + "description": "Whether or not the result was truncated.", + "required": false + } + } + }, + "jobs.NotebookTask": { + "name": "NotebookTask", + "package": "jobs", + "description": "", + "fields": { + "base_parameters": { + "name": "base_parameters", + "type": "map[string]string", + "description": "Base parameters to be used for each run of this job. If the run is\ninitiated by a call to :method:jobs/run Now with parameters specified,\nthe two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used. Use\n[Task parameter variables] to set parameters containing information about\njob runs.\n\nIf the notebook takes a parameter that is not specified in the job’s\n`base_parameters` or the `run-now` override parameters, the d...", + "required": false + }, + "notebook_path": { + "name": "notebook_path", + "type": "string", + "description": "The path of the notebook to be run in the Databricks workspace or remote\nrepository. For notebooks stored in the Databricks workspace, the path\nmust be absolute and begin with a slash. For notebooks stored in a remote\nrepository, the path must be relative. This field is required.", + "required": false + }, + "source": { + "name": "source", + "type": "Source", + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the\nnotebook will be retrieved from the local Databricks workspace. When set\nto `GIT`, the notebook will be retrieved from a Git repository defined in\n`git_source`. If the value is empty, the task will use `GIT` if\n`git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`:\nNotebook is located in Databricks workspace. * `GIT`: Notebook is located\nin cloud Git provider.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic\nSQL warehouses are NOT supported, please use serverless or pro SQL\nwarehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains\nnon-SQL cells, the run will fail.", + "required": false + } + } + }, + "jobs.OutputSchemaInfo": { + "name": "OutputSchemaInfo", + "package": "jobs", + "description": "Stores the catalog name, schema name, and the output schema expiration time\nfor the clean room run.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "", + "required": false + }, + "expiration_time": { + "name": "expiration_time", + "type": "int64", + "description": "The expiration time for the output schema as a Unix timestamp in\nmilliseconds.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "", + "required": false + } + } + }, + "jobs.PerformanceTarget": { + "name": "PerformanceTarget", + "package": "jobs", + "description": "PerformanceTarget defines how performant (lower latency) or cost efficient the execution of run on serverless compute should be.\nThe performance mode on the job or pipeline should map to a performance setting that is passed to Cluster Manager\n(see cluster-common PerformanceTarget).", + "fields": {} + }, + "jobs.PeriodicTriggerConfiguration": { + "name": "PeriodicTriggerConfiguration", + "package": "jobs", + "description": "", + "fields": { + "interval": { + "name": "interval", + "type": "int", + "description": "The interval at which the trigger should run.", + "required": false + }, + "unit": { + "name": "unit", + "type": "PeriodicTriggerConfigurationTimeUnit", + "description": "The unit of time for the interval.", + "required": false + } + } + }, + "jobs.PipelineParams": { + "name": "PipelineParams", + "package": "jobs", + "description": "", + "fields": { + "full_refresh": { + "name": "full_refresh", + "type": "bool", + "description": "If true, triggers a full refresh on the delta live table.", + "required": false + } + } + }, + "jobs.PipelineTask": { + "name": "PipelineTask", + "package": "jobs", + "description": "", + "fields": { + "full_refresh": { + "name": "full_refresh", + "type": "bool", + "description": "If true, triggers a full refresh on the delta live table.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The full name of the pipeline task to execute.", + "required": false + } + } + }, + "jobs.PowerBiModel": { + "name": "PowerBiModel", + "package": "jobs", + "description": "", + "fields": { + "authentication_method": { + "name": "authentication_method", + "type": "AuthenticationMethod", + "description": "How the published Power BI model authenticates to Databricks", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "The name of the Power BI model", + "required": false + }, + "overwrite_existing": { + "name": "overwrite_existing", + "type": "bool", + "description": "Whether to overwrite existing Power BI models", + "required": false + }, + "storage_mode": { + "name": "storage_mode", + "type": "StorageMode", + "description": "The default storage mode of the Power BI model", + "required": false + }, + "workspace_name": { + "name": "workspace_name", + "type": "string", + "description": "The name of the Power BI workspace of the model", + "required": false + } + } + }, + "jobs.PowerBiTable": { + "name": "PowerBiTable", + "package": "jobs", + "description": "", + "fields": { + "catalog": { + "name": "catalog", + "type": "string", + "description": "The catalog name in Databricks", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The table name in Databricks", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The schema name in Databricks", + "required": false + }, + "storage_mode": { + "name": "storage_mode", + "type": "StorageMode", + "description": "The Power BI storage mode of the table", + "required": false + } + } + }, + "jobs.PowerBiTask": { + "name": "PowerBiTask", + "package": "jobs", + "description": "", + "fields": { + "connection_resource_name": { + "name": "connection_resource_name", + "type": "string", + "description": "The resource name of the UC connection to authenticate from Databricks to\nPower BI", + "required": false + }, + "power_bi_model": { + "name": "power_bi_model", + "type": "*PowerBiModel", + "description": "The semantic model to update", + "required": false + }, + "refresh_after_update": { + "name": "refresh_after_update", + "type": "bool", + "description": "Whether the model should be refreshed after the update", + "required": false + }, + "tables": { + "name": "tables", + "type": "[]PowerBiTable", + "description": "The tables to be exported to Power BI", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The SQL warehouse ID to use as the Power BI data source", + "required": false + } + } + }, + "jobs.PythonWheelTask": { + "name": "PythonWheelTask", + "package": "jobs", + "description": "", + "fields": { + "entry_point": { + "name": "entry_point", + "type": "string", + "description": "Named entry point to use, if it does not exist in the metadata of the\npackage it executes the function from the package directly using\n`$packageName.$entryPoint()`", + "required": false + }, + "named_parameters": { + "name": "named_parameters", + "type": "map[string]string", + "description": "Command-line parameters passed to Python wheel task in the form of\n`[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if\n`parameters` is not null.", + "required": false + }, + "package_name": { + "name": "package_name", + "type": "string", + "description": "Name of the package to execute", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "Command-line parameters passed to Python wheel task. Leave it empty if\n`named_parameters` is not null.", + "required": false + } + } + }, + "jobs.QueueDetails": { + "name": "QueueDetails", + "package": "jobs", + "description": "", + "fields": { + "code": { + "name": "code", + "type": "QueueDetailsCodeCode", + "description": "", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "A descriptive message with the queuing details. This field is\nunstructured, and its exact format is subject to change.", + "required": false + } + } + }, + "jobs.QueueSettings": { + "name": "QueueSettings", + "package": "jobs", + "description": "", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "If true, enable queueing for the job. This is a required field.", + "required": false + } + } + }, + "jobs.RepairHistoryItem": { + "name": "RepairHistoryItem", + "package": "jobs", + "description": "", + "fields": { + "effective_performance_target": { + "name": "effective_performance_target", + "type": "PerformanceTarget", + "description": "The actual performance target used by the serverless run during\nexecution. This can differ from the client-set performance target on the\nrequest depending on whether the performance mode is supported by the job\ntype.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "The end time of the (repaired) run.", + "required": false + }, + "id": { + "name": "id", + "type": "int64", + "description": "The ID of the repair. Only returned for the items that represent a repair\nin `repair_history`.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The start time of the (repaired) run.", + "required": false + }, + "state": { + "name": "state", + "type": "*RunState", + "description": "Deprecated. Please use the `status` field instead.", + "required": false + }, + "status": { + "name": "status", + "type": "*RunStatus", + "description": "", + "required": false + }, + "task_run_ids": { + "name": "task_run_ids", + "type": "[]int64", + "description": "The run IDs of the task runs that ran as part of this repair history\nitem.", + "required": false + }, + "type": { + "name": "type", + "type": "RepairHistoryItemType", + "description": "The repair history item type. Indicates whether a run is the original run\nor a repair run.", + "required": false + } + } + }, + "jobs.RepairRun": { + "name": "RepairRun", + "package": "jobs", + "description": "", + "fields": { + "dbt_commands": { + "name": "dbt_commands", + "type": "[]string", + "description": "An array of commands to execute for jobs with the dbt task, for example\n`\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt\nrun\"]`\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + }, + "jar_params": { + "name": "jar_params", + "type": "[]string", + "description": "A list of parameters for jobs with Spark JAR tasks, for example\n`\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to invoke the\nmain function of the main class specified in the Spark JAR task. If not\nspecified upon `run-now`, it defaults to an empty list. jar_params cannot\nbe specified in conjunction with notebook_params. The JSON representation\nof this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot\nexceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] t...", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "map[string]string", + "description": "Job-level parameters used in the run. for example `\"param\":\n\"overriding_val\"`", + "required": false + }, + "latest_repair_id": { + "name": "latest_repair_id", + "type": "int64", + "description": "The ID of the latest repair. This parameter is not required when\nrepairing a run for the first time, but must be provided on subsequent\nrequests to repair the same run.", + "required": false + }, + "notebook_params": { + "name": "notebook_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with notebook task, for example\n`\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map is passed\nto the notebook and is accessible through the [dbutils.widgets.get]\nfunction.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base\nparameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nThe JSON representation of this field (f...", + "required": false + }, + "performance_target": { + "name": "performance_target", + "type": "PerformanceTarget", + "description": "The performance mode on a serverless job. The performance target\ndetermines the level of compute performance or cost-efficiency for the\nrun. This field overrides the performance target defined on the job\nlevel.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "pipeline_params": { + "name": "pipeline_params", + "type": "*PipelineParams", + "description": "Controls whether the pipeline should perform a full refresh", + "required": false + }, + "python_named_params": { + "name": "python_named_params", + "type": "map[string]string", + "description": "", + "required": false + }, + "python_params": { + "name": "python_params", + "type": "[]string", + "description": "A list of parameters for jobs with Python tasks, for example\n`\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to\nPython file as command-line parameters. If specified upon `run-now`, it\nwould overwrite the parameters specified in job setting. The JSON\nrepresentation of this field (for example `{\"python_params\":[\"john\ndoe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nImportant\n\nThese parameters accept onl...", + "required": false + }, + "rerun_all_failed_tasks": { + "name": "rerun_all_failed_tasks", + "type": "bool", + "description": "If true, repair all failed tasks. Only one of `rerun_tasks` or\n`rerun_all_failed_tasks` can be used.", + "required": false + }, + "rerun_dependent_tasks": { + "name": "rerun_dependent_tasks", + "type": "bool", + "description": "If true, repair all tasks that depend on the tasks in `rerun_tasks`, even\nif they were previously successful. Can be also used in combination with\n`rerun_all_failed_tasks`.", + "required": false + }, + "rerun_tasks": { + "name": "rerun_tasks", + "type": "[]string", + "description": "The task keys of the task runs to repair.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The job run ID of the run to repair. The run must not be in progress.", + "required": false + }, + "spark_submit_params": { + "name": "spark_submit_params", + "type": "[]string", + "description": "A list of parameters for jobs with spark submit task, for example\n`\"spark_submit_params\": [\"--class\",\n\"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to\nspark-submit script as command-line parameters. If specified upon\n`run-now`, it would overwrite the parameters specified in job setting.\nThe JSON representation of this field (for example\n`{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down ...", + "required": false + }, + "sql_params": { + "name": "sql_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with SQL task, for example\n`\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task\ndoes not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + } + } + }, + "jobs.RepairRunResponse": { + "name": "RepairRunResponse", + "package": "jobs", + "description": "Run repair was initiated.", + "fields": { + "repair_id": { + "name": "repair_id", + "type": "int64", + "description": "The ID of the repair. Must be provided in subsequent repairs using the\n`latest_repair_id` field to ensure sequential repairs.", + "required": false + } + } + }, + "jobs.ResetJob": { + "name": "ResetJob", + "package": "jobs", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job to reset. This field is required.", + "required": false + }, + "new_settings": { + "name": "new_settings", + "type": "JobSettings", + "description": "The new settings of the job. These settings completely replace the old\nsettings.\n\nChanges to the field `JobBaseSettings.timeout_seconds` are applied to\nactive runs. Changes to other fields are applied to future runs only.", + "required": false + } + } + }, + "jobs.ResolvedConditionTaskValues": { + "name": "ResolvedConditionTaskValues", + "package": "jobs", + "description": "", + "fields": { + "left": { + "name": "left", + "type": "string", + "description": "", + "required": false + }, + "right": { + "name": "right", + "type": "string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedDbtTaskValues": { + "name": "ResolvedDbtTaskValues", + "package": "jobs", + "description": "", + "fields": { + "commands": { + "name": "commands", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedNotebookTaskValues": { + "name": "ResolvedNotebookTaskValues", + "package": "jobs", + "description": "", + "fields": { + "base_parameters": { + "name": "base_parameters", + "type": "map[string]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedParamPairValues": { + "name": "ResolvedParamPairValues", + "package": "jobs", + "description": "", + "fields": { + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedPythonWheelTaskValues": { + "name": "ResolvedPythonWheelTaskValues", + "package": "jobs", + "description": "", + "fields": { + "named_parameters": { + "name": "named_parameters", + "type": "map[string]string", + "description": "", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedRunJobTaskValues": { + "name": "ResolvedRunJobTaskValues", + "package": "jobs", + "description": "", + "fields": { + "job_parameters": { + "name": "job_parameters", + "type": "map[string]string", + "description": "", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedStringParamsValues": { + "name": "ResolvedStringParamsValues", + "package": "jobs", + "description": "", + "fields": { + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "jobs.ResolvedValues": { + "name": "ResolvedValues", + "package": "jobs", + "description": "", + "fields": { + "condition_task": { + "name": "condition_task", + "type": "*ResolvedConditionTaskValues", + "description": "", + "required": false + }, + "dbt_task": { + "name": "dbt_task", + "type": "*ResolvedDbtTaskValues", + "description": "", + "required": false + }, + "notebook_task": { + "name": "notebook_task", + "type": "*ResolvedNotebookTaskValues", + "description": "", + "required": false + }, + "python_wheel_task": { + "name": "python_wheel_task", + "type": "*ResolvedPythonWheelTaskValues", + "description": "", + "required": false + }, + "run_job_task": { + "name": "run_job_task", + "type": "*ResolvedRunJobTaskValues", + "description": "", + "required": false + }, + "simulation_task": { + "name": "simulation_task", + "type": "*ResolvedParamPairValues", + "description": "", + "required": false + }, + "spark_jar_task": { + "name": "spark_jar_task", + "type": "*ResolvedStringParamsValues", + "description": "", + "required": false + }, + "spark_python_task": { + "name": "spark_python_task", + "type": "*ResolvedStringParamsValues", + "description": "", + "required": false + }, + "spark_submit_task": { + "name": "spark_submit_task", + "type": "*ResolvedStringParamsValues", + "description": "", + "required": false + }, + "sql_task": { + "name": "sql_task", + "type": "*ResolvedParamPairValues", + "description": "", + "required": false + } + } + }, + "jobs.Run": { + "name": "Run", + "package": "jobs", + "description": "Run was retrieved successfully", + "fields": { + "attempt_number": { + "name": "attempt_number", + "type": "int", + "description": "The sequence number of this run attempt for a triggered job run. The\ninitial attempt of a run has an attempt_number of 0. If the initial run\nattempt fails, and the job has a retry policy (`max_retries` \u003e 0),\nsubsequent runs are created with an `original_attempt_run_id` of the\noriginal attempt’s ID and an incrementing `attempt_number`. Runs are\nretried only until they succeed, and the maximum `attempt_number` is the\nsame as the `max_retries` value for the job.", + "required": false + }, + "cleanup_duration": { + "name": "cleanup_duration", + "type": "int64", + "description": "The time in milliseconds it took to terminate the cluster and clean up\nany associated artifacts. The duration of a task run is the sum of the\n`setup_duration`, `execution_duration`, and the `cleanup_duration`. The\n`cleanup_duration` field is set to 0 for multitask job runs. The total\nduration of a multitask job run is the value of the `run_duration` field.", + "required": false + }, + "cluster_instance": { + "name": "cluster_instance", + "type": "*ClusterInstance", + "description": "The cluster used for this run. If the run is specified to use a new\ncluster, this field is set once the Jobs service has requested a cluster\nfor the run.", + "required": false + }, + "cluster_spec": { + "name": "cluster_spec", + "type": "*ClusterSpec", + "description": "A snapshot of the job’s cluster specification when this run was\ncreated.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The creator user name. This field won’t be included in the response if\nthe user has already been deleted.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of the run", + "required": false + }, + "effective_performance_target": { + "name": "effective_performance_target", + "type": "PerformanceTarget", + "description": "The actual performance target used by the serverless run during\nexecution. This can differ from the client-set performance target on the\nrequest depending on whether the performance mode is supported by the job\ntype.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "effective_usage_policy_id": { + "name": "effective_usage_policy_id", + "type": "string", + "description": "The id of the usage policy used by this run for cost attribution\npurposes.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "The time at which this run ended in epoch milliseconds (milliseconds\nsince 1/1/1970 UTC). This field is set to 0 if the job is still running.", + "required": false + }, + "execution_duration": { + "name": "execution_duration", + "type": "int64", + "description": "The time in milliseconds it took to execute the commands in the JAR or\nnotebook until they completed, failed, timed out, were cancelled, or\nencountered an unexpected error. The duration of a task run is the sum of\nthe `setup_duration`, `execution_duration`, and the `cleanup_duration`.\nThe `execution_duration` field is set to 0 for multitask job runs. The\ntotal duration of a multitask job run is the value of the `run_duration`\nfield.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote\nrepository by default. However, this behavior can be overridden by\nsetting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If\ndbt or SQL File tasks are used, `git_source` mu...", + "required": false + }, + "has_more": { + "name": "has_more", + "type": "bool", + "description": "Indicates if the run has more array properties (`tasks`, `job_clusters`)\nthat are not shown. They can be accessed via :method:jobs/getrun\nendpoint. It is only relevant for API 2.2 :method:jobs/listruns requests\nwith `expand_tasks=true`.", + "required": false + }, + "iterations": { + "name": "iterations", + "type": "[]RunTask", + "description": "Only populated by for-each iterations. The parent for-each task is\nlocated in tasks array.", + "required": false + }, + "job_clusters": { + "name": "job_clusters", + "type": "[]JobCluster", + "description": "A list of job cluster specifications that can be shared and reused by\ntasks of this job. Libraries cannot be declared in a shared job cluster.\nYou must declare dependent libraries in task settings. If more than 100\njob clusters are available, you can paginate through them using\n:method:jobs/getrun.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job that contains this run.", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "[]JobParameter", + "description": "Job-level parameters used in the run", + "required": false + }, + "job_run_id": { + "name": "job_run_id", + "type": "int64", + "description": "ID of the job run that this run belongs to. For legacy and single-task\njob runs the field is populated with the job run ID. For task runs, the\nfield is populated with the ID of the job run that the task run belongs\nto.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to list the next page of array properties.", + "required": false + }, + "number_in_job": { + "name": "number_in_job", + "type": "int64", + "description": "A unique identifier for this job run. This is set to the same value as\n`run_id`.", + "required": false + }, + "original_attempt_run_id": { + "name": "original_attempt_run_id", + "type": "int64", + "description": "If this run is a retry of a prior run attempt, this field contains the\nrun_id of the original attempt; otherwise, it is the same as the run_id.", + "required": false + }, + "overriding_parameters": { + "name": "overriding_parameters", + "type": "*RunParameters", + "description": "The parameters used for this run.", + "required": false + }, + "queue_duration": { + "name": "queue_duration", + "type": "int64", + "description": "The time in milliseconds that the run has spent in the queue.", + "required": false + }, + "repair_history": { + "name": "repair_history", + "type": "[]RepairHistoryItem", + "description": "The repair history of the run.", + "required": false + }, + "run_duration": { + "name": "run_duration", + "type": "int64", + "description": "The time in milliseconds it took the job run and all of its repairs to\nfinish.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The canonical identifier of the run. This ID is unique across all runs of\nall jobs.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "An optional name for the run. The maximum length is 4096 bytes in UTF-8\nencoding.", + "required": false + }, + "run_page_url": { + "name": "run_page_url", + "type": "string", + "description": "The URL to the detail page of the run.", + "required": false + }, + "run_type": { + "name": "run_type", + "type": "RunType", + "description": "", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "*CronSchedule", + "description": "The cron schedule that triggered this run if it was triggered by the\nperiodic scheduler.", + "required": false + }, + "setup_duration": { + "name": "setup_duration", + "type": "int64", + "description": "The time in milliseconds it took to set up the cluster. For runs that run\non new clusters this is the cluster creation time, for runs that run on\nexisting clusters this time should be very short. The duration of a task\nrun is the sum of the `setup_duration`, `execution_duration`, and the\n`cleanup_duration`. The `setup_duration` field is set to 0 for multitask\njob runs. The total duration of a multitask job run is the value of the\n`run_duration` field.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The time at which this run was started in epoch milliseconds\n(milliseconds since 1/1/1970 UTC). This may not be the time when the job\ntask starts executing, for example, if the job is scheduled to run on a\nnew cluster, this is the time the cluster creation call is issued.", + "required": false + }, + "state": { + "name": "state", + "type": "*RunState", + "description": "Deprecated. Please use the `status` field instead.", + "required": false + }, + "status": { + "name": "status", + "type": "*RunStatus", + "description": "", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "[]RunTask", + "description": "The list of tasks performed by the run. Each task has its own `run_id`\nwhich you can use to call `JobsGetOutput` to retrieve the run resutls. If\nmore than 100 tasks are available, you can paginate through them using\n:method:jobs/getrun. Use the `next_page_token` field at the object root\nto determine if more results are available.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "TriggerType", + "description": "", + "required": false + }, + "trigger_info": { + "name": "trigger_info", + "type": "*TriggerInfo", + "description": "", + "required": false + } + } + }, + "jobs.RunConditionTask": { + "name": "RunConditionTask", + "package": "jobs", + "description": "", + "fields": { + "left": { + "name": "left", + "type": "string", + "description": "The left operand of the condition task. Can be either a string value or a\njob state or parameter reference.", + "required": false + }, + "op": { + "name": "op", + "type": "ConditionTaskOp", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their\noperands. This means that `“12.0” == “12”` will evaluate to\n`false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`,\n`LESS_THAN_OR_EQUAL` operators perform numeric comparison of their\noperands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0”\n\u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators\n`EQUAL_TO`, `NOT_EQUAL`. If a task value w...", + "required": false + }, + "outcome": { + "name": "outcome", + "type": "string", + "description": "The condition expression evaluation result. Filled in if the task was\nsuccessfully completed. Can be `\"true\"` or `\"false\"`", + "required": false + }, + "right": { + "name": "right", + "type": "string", + "description": "The right operand of the condition task. Can be either a string value or\na job state or parameter reference.", + "required": false + } + } + }, + "jobs.RunForEachTask": { + "name": "RunForEachTask", + "package": "jobs", + "description": "", + "fields": { + "concurrency": { + "name": "concurrency", + "type": "int", + "description": "An optional maximum allowed number of concurrent runs of the task. Set\nthis value if you want to be able to execute multiple runs of the task\nconcurrently.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "string", + "description": "Array for task to iterate on. This can be a JSON string or a reference to\nan array parameter.", + "required": false + }, + "stats": { + "name": "stats", + "type": "*ForEachStats", + "description": "Read only field. Populated for GetRun and ListRuns RPC calls and stores\nthe execution stats of an For each task", + "required": false + }, + "task": { + "name": "task", + "type": "Task", + "description": "Configuration for the task that will be run for each element in the array", + "required": false + } + } + }, + "jobs.RunIf": { + "name": "RunIf", + "package": "jobs", + "description": "An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n\nPossible values are:\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed", + "fields": {} + }, + "jobs.RunJobOutput": { + "name": "RunJobOutput", + "package": "jobs", + "description": "", + "fields": { + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The run id of the triggered job run", + "required": false + } + } + }, + "jobs.RunJobTask": { + "name": "RunJobTask", + "package": "jobs", + "description": "", + "fields": { + "dbt_commands": { + "name": "dbt_commands", + "type": "[]string", + "description": "An array of commands to execute for jobs with the dbt task, for example\n`\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt\nrun\"]`\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + }, + "jar_params": { + "name": "jar_params", + "type": "[]string", + "description": "A list of parameters for jobs with Spark JAR tasks, for example\n`\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to invoke the\nmain function of the main class specified in the Spark JAR task. If not\nspecified upon `run-now`, it defaults to an empty list. jar_params cannot\nbe specified in conjunction with notebook_params. The JSON representation\nof this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot\nexceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] t...", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "ID of the job to trigger.", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "map[string]string", + "description": "Job-level parameters used to trigger the job.", + "required": false + }, + "notebook_params": { + "name": "notebook_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with notebook task, for example\n`\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map is passed\nto the notebook and is accessible through the [dbutils.widgets.get]\nfunction.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base\nparameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nThe JSON representation of this field (f...", + "required": false + }, + "pipeline_params": { + "name": "pipeline_params", + "type": "*PipelineParams", + "description": "Controls whether the pipeline should perform a full refresh", + "required": false + }, + "python_named_params": { + "name": "python_named_params", + "type": "map[string]string", + "description": "", + "required": false + }, + "python_params": { + "name": "python_params", + "type": "[]string", + "description": "A list of parameters for jobs with Python tasks, for example\n`\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to\nPython file as command-line parameters. If specified upon `run-now`, it\nwould overwrite the parameters specified in job setting. The JSON\nrepresentation of this field (for example `{\"python_params\":[\"john\ndoe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nImportant\n\nThese parameters accept onl...", + "required": false + }, + "spark_submit_params": { + "name": "spark_submit_params", + "type": "[]string", + "description": "A list of parameters for jobs with spark submit task, for example\n`\"spark_submit_params\": [\"--class\",\n\"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to\nspark-submit script as command-line parameters. If specified upon\n`run-now`, it would overwrite the parameters specified in job setting.\nThe JSON representation of this field (for example\n`{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down ...", + "required": false + }, + "sql_params": { + "name": "sql_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with SQL task, for example\n`\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task\ndoes not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + } + } + }, + "jobs.RunNow": { + "name": "RunNow", + "package": "jobs", + "description": "", + "fields": { + "dbt_commands": { + "name": "dbt_commands", + "type": "[]string", + "description": "An array of commands to execute for jobs with the dbt task, for example\n`\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt\nrun\"]`\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + }, + "idempotency_token": { + "name": "idempotency_token", + "type": "string", + "description": "An optional token to guarantee the idempotency of job run requests. If a\nrun with the provided token already exists, the request does not create a\nnew run but returns the ID of the existing run instead. If a run with the\nprovided token is deleted, an error is returned.\n\nIf you specify the idempotency token, upon failure you can retry until\nthe request succeeds. Databricks guarantees that exactly one run is\nlaunched with that idempotency token.\n\nThis token must have at most 64 characters.\n\nFor...", + "required": false + }, + "jar_params": { + "name": "jar_params", + "type": "[]string", + "description": "A list of parameters for jobs with Spark JAR tasks, for example\n`\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to invoke the\nmain function of the main class specified in the Spark JAR task. If not\nspecified upon `run-now`, it defaults to an empty list. jar_params cannot\nbe specified in conjunction with notebook_params. The JSON representation\nof this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot\nexceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] t...", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The ID of the job to be executed", + "required": false + }, + "job_parameters": { + "name": "job_parameters", + "type": "map[string]string", + "description": "Job-level parameters used in the run. for example `\"param\":\n\"overriding_val\"`", + "required": false + }, + "notebook_params": { + "name": "notebook_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with notebook task, for example\n`\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map is passed\nto the notebook and is accessible through the [dbutils.widgets.get]\nfunction.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base\nparameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nThe JSON representation of this field (f...", + "required": false + }, + "only": { + "name": "only", + "type": "[]string", + "description": "A list of task keys to run inside of the job. If this field is not\nprovided, all tasks in the job will be run.", + "required": false + }, + "performance_target": { + "name": "performance_target", + "type": "PerformanceTarget", + "description": "The performance mode on a serverless job. The performance target\ndetermines the level of compute performance or cost-efficiency for the\nrun. This field overrides the performance target defined on the job\nlevel.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "pipeline_params": { + "name": "pipeline_params", + "type": "*PipelineParams", + "description": "Controls whether the pipeline should perform a full refresh", + "required": false + }, + "python_named_params": { + "name": "python_named_params", + "type": "map[string]string", + "description": "", + "required": false + }, + "python_params": { + "name": "python_params", + "type": "[]string", + "description": "A list of parameters for jobs with Python tasks, for example\n`\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to\nPython file as command-line parameters. If specified upon `run-now`, it\nwould overwrite the parameters specified in job setting. The JSON\nrepresentation of this field (for example `{\"python_params\":[\"john\ndoe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nImportant\n\nThese parameters accept onl...", + "required": false + }, + "queue": { + "name": "queue", + "type": "*QueueSettings", + "description": "The queue settings of the run.", + "required": false + }, + "spark_submit_params": { + "name": "spark_submit_params", + "type": "[]string", + "description": "A list of parameters for jobs with spark submit task, for example\n`\"spark_submit_params\": [\"--class\",\n\"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to\nspark-submit script as command-line parameters. If specified upon\n`run-now`, it would overwrite the parameters specified in job setting.\nThe JSON representation of this field (for example\n`{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down ...", + "required": false + }, + "sql_params": { + "name": "sql_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with SQL task, for example\n`\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task\ndoes not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + } + } + }, + "jobs.RunNowResponse": { + "name": "RunNowResponse", + "package": "jobs", + "description": "Run was started successfully.", + "fields": { + "number_in_job": { + "name": "number_in_job", + "type": "int64", + "description": "A unique identifier for this job run. This is set to the same value as\n`run_id`.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The globally unique ID of the newly triggered run.", + "required": false + } + } + }, + "jobs.RunOutput": { + "name": "RunOutput", + "package": "jobs", + "description": "Run output was retrieved successfully.", + "fields": { + "clean_rooms_notebook_output": { + "name": "clean_rooms_notebook_output", + "type": "*CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput", + "description": "The output of a clean rooms notebook task, if available", + "required": false + }, + "dashboard_output": { + "name": "dashboard_output", + "type": "*DashboardTaskOutput", + "description": "The output of a dashboard task, if available", + "required": false + }, + "dbt_cloud_output": { + "name": "dbt_cloud_output", + "type": "*DbtCloudTaskOutput", + "description": "Deprecated in favor of the new dbt_platform_output", + "required": false + }, + "dbt_output": { + "name": "dbt_output", + "type": "*DbtOutput", + "description": "The output of a dbt task, if available.", + "required": false + }, + "dbt_platform_output": { + "name": "dbt_platform_output", + "type": "*DbtPlatformTaskOutput", + "description": "", + "required": false + }, + "error": { + "name": "error", + "type": "string", + "description": "An error message indicating why a task failed or why output is not\navailable. The message is unstructured, and its exact format is subject\nto change.", + "required": false + }, + "error_trace": { + "name": "error_trace", + "type": "string", + "description": "If there was an error executing the run, this field contains any\navailable stack traces.", + "required": false + }, + "info": { + "name": "info", + "type": "string", + "description": "", + "required": false + }, + "logs": { + "name": "logs", + "type": "string", + "description": "The output from tasks that write to standard streams (stdout/stderr) such\nas spark_jar_task, spark_python_task, python_wheel_task.\n\nIt's not supported for the notebook_task, pipeline_task or\nspark_submit_task.\n\nDatabricks restricts this API to return the last 5 MB of these logs.", + "required": false + }, + "logs_truncated": { + "name": "logs_truncated", + "type": "bool", + "description": "Whether the logs are truncated.", + "required": false + }, + "metadata": { + "name": "metadata", + "type": "*Run", + "description": "All details of the run except for its output.", + "required": false + }, + "notebook_output": { + "name": "notebook_output", + "type": "*NotebookOutput", + "description": "The output of a notebook task, if available. A notebook task that\nterminates (either successfully or with a failure) without calling\n`dbutils.notebook.exit()` is considered to have an empty output. This\nfield is set but its result value is empty. Databricks restricts this API\nto return the first 5 MB of the output. To return a larger result, use\nthe [ClusterLogConf] field to configure log storage for the job cluster.\n\n[ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters...", + "required": false + }, + "run_job_output": { + "name": "run_job_output", + "type": "*RunJobOutput", + "description": "The output of a run job task, if available", + "required": false + }, + "sql_output": { + "name": "sql_output", + "type": "*SqlOutput", + "description": "The output of a SQL task, if available.", + "required": false + } + } + }, + "jobs.RunParameters": { + "name": "RunParameters", + "package": "jobs", + "description": "", + "fields": { + "dbt_commands": { + "name": "dbt_commands", + "type": "[]string", + "description": "An array of commands to execute for jobs with the dbt task, for example\n`\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt\nrun\"]`\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + }, + "jar_params": { + "name": "jar_params", + "type": "[]string", + "description": "A list of parameters for jobs with Spark JAR tasks, for example\n`\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to invoke the\nmain function of the main class specified in the Spark JAR task. If not\nspecified upon `run-now`, it defaults to an empty list. jar_params cannot\nbe specified in conjunction with notebook_params. The JSON representation\nof this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot\nexceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] t...", + "required": false + }, + "notebook_params": { + "name": "notebook_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with notebook task, for example\n`\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map is passed\nto the notebook and is accessible through the [dbutils.widgets.get]\nfunction.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base\nparameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nThe JSON representation of this field (f...", + "required": false + }, + "pipeline_params": { + "name": "pipeline_params", + "type": "*PipelineParams", + "description": "Controls whether the pipeline should perform a full refresh", + "required": false + }, + "python_named_params": { + "name": "python_named_params", + "type": "map[string]string", + "description": "", + "required": false + }, + "python_params": { + "name": "python_params", + "type": "[]string", + "description": "A list of parameters for jobs with Python tasks, for example\n`\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to\nPython file as command-line parameters. If specified upon `run-now`, it\nwould overwrite the parameters specified in job setting. The JSON\nrepresentation of this field (for example `{\"python_params\":[\"john\ndoe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\nImportant\n\nThese parameters accept onl...", + "required": false + }, + "spark_submit_params": { + "name": "spark_submit_params", + "type": "[]string", + "description": "A list of parameters for jobs with spark submit task, for example\n`\"spark_submit_params\": [\"--class\",\n\"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to\nspark-submit script as command-line parameters. If specified upon\n`run-now`, it would overwrite the parameters specified in job setting.\nThe JSON representation of this field (for example\n`{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down ...", + "required": false + }, + "sql_params": { + "name": "sql_params", + "type": "map[string]string", + "description": "A map from keys to values for jobs with SQL task, for example\n`\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task\ndoes not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters] to pass information down to\ntasks.\n\n[job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown", + "required": false + } + } + }, + "jobs.RunState": { + "name": "RunState", + "package": "jobs", + "description": "The current state of the run.", + "fields": { + "life_cycle_state": { + "name": "life_cycle_state", + "type": "RunLifeCycleState", + "description": "A value indicating the run's current lifecycle state. This field is\nalways available in the response. Note: Additional states might be\nintroduced in future releases.", + "required": false + }, + "queue_reason": { + "name": "queue_reason", + "type": "string", + "description": "The reason indicating why the run was queued.", + "required": false + }, + "result_state": { + "name": "result_state", + "type": "RunResultState", + "description": "A value indicating the run's result. This field is only available for\nterminal lifecycle states. Note: Additional states might be introduced in\nfuture releases.", + "required": false + }, + "state_message": { + "name": "state_message", + "type": "string", + "description": "A descriptive message for the current state. This field is unstructured,\nand its exact format is subject to change.", + "required": false + }, + "user_cancelled_or_timedout": { + "name": "user_cancelled_or_timedout", + "type": "bool", + "description": "A value indicating whether a run was canceled manually by a user or by\nthe scheduler because the run timed out.", + "required": false + } + } + }, + "jobs.RunStatus": { + "name": "RunStatus", + "package": "jobs", + "description": "The current status of the run", + "fields": { + "queue_details": { + "name": "queue_details", + "type": "*QueueDetails", + "description": "If the run was queued, details about the reason for queuing the run.", + "required": false + }, + "state": { + "name": "state", + "type": "RunLifecycleStateV2State", + "description": "", + "required": false + }, + "termination_details": { + "name": "termination_details", + "type": "*TerminationDetails", + "description": "If the run is in a TERMINATING or TERMINATED state, details about the\nreason for terminating the run.", + "required": false + } + } + }, + "jobs.RunTask": { + "name": "RunTask", + "package": "jobs", + "description": "Used when outputting a child run, in GetRun or ListRuns.", + "fields": { + "attempt_number": { + "name": "attempt_number", + "type": "int", + "description": "The sequence number of this run attempt for a triggered job run. The\ninitial attempt of a run has an attempt_number of 0. If the initial run\nattempt fails, and the job has a retry policy (`max_retries` \u003e 0),\nsubsequent runs are created with an `original_attempt_run_id` of the\noriginal attempt’s ID and an incrementing `attempt_number`. Runs are\nretried only until they succeed, and the maximum `attempt_number` is the\nsame as the `max_retries` value for the job.", + "required": false + }, + "clean_rooms_notebook_task": { + "name": "clean_rooms_notebook_task", + "type": "*CleanRoomsNotebookTask", + "description": "The task runs a [clean rooms] notebook when the\n`clean_rooms_notebook_task` field is present.\n\n[clean rooms]: https://docs.databricks.com/clean-rooms/index.html", + "required": false + }, + "cleanup_duration": { + "name": "cleanup_duration", + "type": "int64", + "description": "The time in milliseconds it took to terminate the cluster and clean up\nany associated artifacts. The duration of a task run is the sum of the\n`setup_duration`, `execution_duration`, and the `cleanup_duration`. The\n`cleanup_duration` field is set to 0 for multitask job runs. The total\nduration of a multitask job run is the value of the `run_duration` field.", + "required": false + }, + "cluster_instance": { + "name": "cluster_instance", + "type": "*ClusterInstance", + "description": "The cluster used for this run. If the run is specified to use a new\ncluster, this field is set once the Jobs service has requested a cluster\nfor the run.", + "required": false + }, + "condition_task": { + "name": "condition_task", + "type": "*RunConditionTask", + "description": "The task evaluates a condition that can be used to control the execution\nof other tasks when the `condition_task` field is present. The condition\ntask does not require a cluster to execute and does not support retries\nor notifications.", + "required": false + }, + "dashboard_task": { + "name": "dashboard_task", + "type": "*DashboardTask", + "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", + "required": false + }, + "dbt_cloud_task": { + "name": "dbt_cloud_task", + "type": "*DbtCloudTask", + "description": "Task type for dbt cloud, deprecated in favor of the new name\ndbt_platform_task", + "required": false + }, + "dbt_platform_task": { + "name": "dbt_platform_task", + "type": "*DbtPlatformTask", + "description": "", + "required": false + }, + "dbt_task": { + "name": "dbt_task", + "type": "*DbtTask", + "description": "The task runs one or more dbt commands when the `dbt_task` field is\npresent. The dbt task requires both Databricks SQL and the ability to use\na serverless or a pro SQL warehouse.", + "required": false + }, + "depends_on": { + "name": "depends_on", + "type": "[]TaskDependency", + "description": "An optional array of objects specifying the dependency graph of the task.\nAll tasks specified in this field must complete successfully before\nexecuting this task. The key is `task_key`, and the value is the name\nassigned to the dependent task.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for this task.", + "required": false + }, + "effective_performance_target": { + "name": "effective_performance_target", + "type": "PerformanceTarget", + "description": "The actual performance target used by the serverless run during\nexecution. This can differ from the client-set performance target on the\nrequest depending on whether the performance mode is supported by the job\ntype.\n\n* `STANDARD`: Enables cost-efficient execution of serverless workloads. *\n`PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times\nthrough rapid scaling and optimized cluster performance.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*JobEmailNotifications", + "description": "An optional set of email addresses notified when the task run begins or\ncompletes. The default behavior is to not send any emails.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "The time at which this run ended in epoch milliseconds (milliseconds\nsince 1/1/1970 UTC). This field is set to 0 if the job is still running.", + "required": false + }, + "environment_key": { + "name": "environment_key", + "type": "string", + "description": "The key that references an environment spec in a job. This field is\nrequired for Python script, Python wheel and dbt tasks when using\nserverless compute.", + "required": false + }, + "execution_duration": { + "name": "execution_duration", + "type": "int64", + "description": "The time in milliseconds it took to execute the commands in the JAR or\nnotebook until they completed, failed, timed out, were cancelled, or\nencountered an unexpected error. The duration of a task run is the sum of\nthe `setup_duration`, `execution_duration`, and the `cleanup_duration`.\nThe `execution_duration` field is set to 0 for multitask job runs. The\ntotal duration of a multitask job run is the value of the `run_duration`\nfield.", + "required": false + }, + "existing_cluster_id": { + "name": "existing_cluster_id", + "type": "string", + "description": "If existing_cluster_id, the ID of an existing cluster that is used for\nall runs. When running jobs or tasks on an existing cluster, you may need\nto manually restart the cluster if it stops responding. We suggest\nrunning jobs and tasks on new clusters for greater reliability", + "required": false + }, + "for_each_task": { + "name": "for_each_task", + "type": "*RunForEachTask", + "description": "The task executes a nested task for every input provided when the\n`for_each_task` field is present.", + "required": false + }, + "gen_ai_compute_task": { + "name": "gen_ai_compute_task", + "type": "*GenAiComputeTask", + "description": "", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks. If `git_source` is set,\nthese tasks retrieve the file from the remote repository by default.\nHowever, this behavior can be overridden by setting `source` to\n`WORKSPACE` on the task. Note: dbt and SQL File tasks support only\nversion-controlled sources. If dbt or SQL File tasks are used,\n`git_source` must...", + "required": false + }, + "job_cluster_key": { + "name": "job_cluster_key", + "type": "string", + "description": "If job_cluster_key, this task is executed reusing the cluster specified\nin `job.settings.job_clusters`.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]compute.Library", + "description": "An optional list of libraries to be installed on the cluster. The default\nvalue is an empty list.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "*compute.ClusterSpec", + "description": "If new_cluster, a description of a new cluster that is created for each\nrun.", + "required": false + }, + "notebook_task": { + "name": "notebook_task", + "type": "*NotebookTask", + "description": "The task runs a notebook when the `notebook_task` field is present.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*TaskNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\ntask run.", + "required": false + }, + "pipeline_task": { + "name": "pipeline_task", + "type": "*PipelineTask", + "description": "The task triggers a pipeline update when the `pipeline_task` field is\npresent. Only pipelines configured to use triggered more are supported.", + "required": false + }, + "power_bi_task": { + "name": "power_bi_task", + "type": "*PowerBiTask", + "description": "The task triggers a Power BI semantic model update when the\n`power_bi_task` field is present.", + "required": false + }, + "python_wheel_task": { + "name": "python_wheel_task", + "type": "*PythonWheelTask", + "description": "The task runs a Python wheel when the `python_wheel_task` field is\npresent.", + "required": false + }, + "queue_duration": { + "name": "queue_duration", + "type": "int64", + "description": "The time in milliseconds that the run has spent in the queue.", + "required": false + }, + "resolved_values": { + "name": "resolved_values", + "type": "*ResolvedValues", + "description": "Parameter values including resolved references", + "required": false + }, + "run_duration": { + "name": "run_duration", + "type": "int64", + "description": "The time in milliseconds it took the job run and all of its repairs to\nfinish.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The ID of the task run.", + "required": false + }, + "run_if": { + "name": "run_if", + "type": "RunIf", + "description": "An optional value indicating the condition that determines whether the\ntask should be run once its dependencies have been completed. When\nomitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of\npossible values.", + "required": false + }, + "run_job_task": { + "name": "run_job_task", + "type": "*RunJobTask", + "description": "The task triggers another job when the `run_job_task` field is present.", + "required": false + }, + "run_page_url": { + "name": "run_page_url", + "type": "string", + "description": "", + "required": false + }, + "setup_duration": { + "name": "setup_duration", + "type": "int64", + "description": "The time in milliseconds it took to set up the cluster. For runs that run\non new clusters this is the cluster creation time, for runs that run on\nexisting clusters this time should be very short. The duration of a task\nrun is the sum of the `setup_duration`, `execution_duration`, and the\n`cleanup_duration`. The `setup_duration` field is set to 0 for multitask\njob runs. The total duration of a multitask job run is the value of the\n`run_duration` field.", + "required": false + }, + "spark_jar_task": { + "name": "spark_jar_task", + "type": "*SparkJarTask", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", + "required": false + }, + "spark_python_task": { + "name": "spark_python_task", + "type": "*SparkPythonTask", + "description": "The task runs a Python file when the `spark_python_task` field is\npresent.", + "required": false + }, + "spark_submit_task": { + "name": "spark_submit_task", + "type": "*SparkSubmitTask", + "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task\nfield is present. Databricks recommends using the spark_jar_task instead;\nsee [Spark Submit task for jobs](/jobs/spark-submit).", + "required": false + }, + "sql_task": { + "name": "sql_task", + "type": "*SqlTask", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a\nlegacy SQL dashboard when the `sql_task` field is present.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The time at which this run was started in epoch milliseconds\n(milliseconds since 1/1/1970 UTC). This may not be the time when the job\ntask starts executing, for example, if the job is scheduled to run on a\nnew cluster, this is the time the cluster creation call is issued.", + "required": false + }, + "state": { + "name": "state", + "type": "*RunState", + "description": "Deprecated. Please use the `status` field instead.", + "required": false + }, + "status": { + "name": "status", + "type": "*RunStatus", + "description": "", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "string", + "description": "A unique name for the task. This field is used to refer to this task from\nother tasks. This field is required and must be unique within its parent\njob. On Update or Reset, this field is used to reference the tasks to be\nupdated or reset.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job task. A value of `0`\nmeans no timeout.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when the run begins or\ncompletes. The default behavior is to not send any system notifications.\nTask webhooks respect the task notification settings.", + "required": false + } + } + }, + "jobs.Source": { + "name": "Source", + "package": "jobs", + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\\\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider.", + "fields": {} + }, + "jobs.SparkJarTask": { + "name": "SparkJarTask", + "package": "jobs", + "description": "", + "fields": { + "jar_uri": { + "name": "jar_uri", + "type": "string", + "description": "Deprecated since 04/2016. For classic compute, provide a `jar` through\nthe `libraries` field instead. For serverless compute, provide a `jar`\nthough the `java_dependencies` field inside the `environments` list.\n\nSee the examples of classic and serverless compute usage at the top of\nthe page.", + "required": false + }, + "main_class_name": { + "name": "main_class_name", + "type": "string", + "description": "The full name of the class containing the main method to be executed.\nThis class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context;\notherwise, runs of the job fail.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables] to set parameters containing information\nabout job runs.\n\n[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables", + "required": false + }, + "run_as_repl": { + "name": "run_as_repl", + "type": "bool", + "description": "Deprecated. A value of `false` is no longer supported.", + "required": false + } + } + }, + "jobs.SparkPythonTask": { + "name": "SparkPythonTask", + "package": "jobs", + "description": "", + "fields": { + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables] to set parameters containing information\nabout job runs.\n\n[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables", + "required": false + }, + "python_file": { + "name": "python_file", + "type": "string", + "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/,\nadls:/, gcs:/) and workspace paths are supported. For python files stored\nin the Databricks workspace, the path must be absolute and begin with\n`/`. For files stored in a remote repository, the path must be relative.\nThis field is required.", + "required": false + }, + "source": { + "name": "source", + "type": "Source", + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not\nspecified, the file will be retrieved from the local Databricks workspace\nor cloud location (if the `python_file` has a URI format). When set to\n`GIT`, the Python file will be retrieved from a Git repository defined in\n`git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at\na cloud filesystem URI. * `GIT`: The Python file is located in a remote\nGit repository.", + "required": false + } + } + }, + "jobs.SparkSubmitTask": { + "name": "SparkSubmitTask", + "package": "jobs", + "description": "", + "fields": { + "parameters": { + "name": "parameters", + "type": "[]string", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables] to set parameters containing information\nabout job runs.\n\n[Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables", + "required": false + } + } + }, + "jobs.SqlAlertOutput": { + "name": "SqlAlertOutput", + "package": "jobs", + "description": "", + "fields": { + "alert_state": { + "name": "alert_state", + "type": "SqlAlertState", + "description": "", + "required": false + }, + "output_link": { + "name": "output_link", + "type": "string", + "description": "The link to find the output results.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "The text of the SQL query. Can Run permission of the SQL query associated\nwith the SQL alert is required to view this field.", + "required": false + }, + "sql_statements": { + "name": "sql_statements", + "type": "[]SqlStatementOutput", + "description": "Information about SQL statements executed in the run.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The canonical identifier of the SQL warehouse.", + "required": false + } + } + }, + "jobs.SqlDashboardOutput": { + "name": "SqlDashboardOutput", + "package": "jobs", + "description": "", + "fields": { + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The canonical identifier of the SQL warehouse.", + "required": false + }, + "widgets": { + "name": "widgets", + "type": "[]SqlDashboardWidgetOutput", + "description": "Widgets executed in the run. Only SQL query based widgets are listed.", + "required": false + } + } + }, + "jobs.SqlDashboardWidgetOutput": { + "name": "SqlDashboardWidgetOutput", + "package": "jobs", + "description": "", + "fields": { + "end_time": { + "name": "end_time", + "type": "int64", + "description": "Time (in epoch milliseconds) when execution of the SQL widget ends.", + "required": false + }, + "error": { + "name": "error", + "type": "*SqlOutputError", + "description": "The information about the error when execution fails.", + "required": false + }, + "output_link": { + "name": "output_link", + "type": "string", + "description": "The link to find the output results.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "Time (in epoch milliseconds) when execution of the SQL widget starts.", + "required": false + }, + "status": { + "name": "status", + "type": "SqlDashboardWidgetOutputStatus", + "description": "The execution status of the SQL widget.", + "required": false + }, + "widget_id": { + "name": "widget_id", + "type": "string", + "description": "The canonical identifier of the SQL widget.", + "required": false + }, + "widget_title": { + "name": "widget_title", + "type": "string", + "description": "The title of the SQL widget.", + "required": false + } + } + }, + "jobs.SqlOutput": { + "name": "SqlOutput", + "package": "jobs", + "description": "", + "fields": { + "alert_output": { + "name": "alert_output", + "type": "*SqlAlertOutput", + "description": "The output of a SQL alert task, if available.", + "required": false + }, + "dashboard_output": { + "name": "dashboard_output", + "type": "*SqlDashboardOutput", + "description": "The output of a SQL dashboard task, if available.", + "required": false + }, + "query_output": { + "name": "query_output", + "type": "*SqlQueryOutput", + "description": "The output of a SQL query task, if available.", + "required": false + } + } + }, + "jobs.SqlOutputError": { + "name": "SqlOutputError", + "package": "jobs", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "string", + "description": "The error message when execution fails.", + "required": false + } + } + }, + "jobs.SqlQueryOutput": { + "name": "SqlQueryOutput", + "package": "jobs", + "description": "", + "fields": { + "endpoint_id": { + "name": "endpoint_id", + "type": "string", + "description": "", + "required": false + }, + "output_link": { + "name": "output_link", + "type": "string", + "description": "The link to find the output results.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "The text of the SQL query. Can Run permission of the SQL query is\nrequired to view this field.", + "required": false + }, + "sql_statements": { + "name": "sql_statements", + "type": "[]SqlStatementOutput", + "description": "Information about SQL statements executed in the run.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The canonical identifier of the SQL warehouse.", + "required": false + } + } + }, + "jobs.SqlStatementOutput": { + "name": "SqlStatementOutput", + "package": "jobs", + "description": "", + "fields": { + "lookup_key": { + "name": "lookup_key", + "type": "string", + "description": "A key that can be used to look up query details.", + "required": false + } + } + }, + "jobs.SqlTask": { + "name": "SqlTask", + "package": "jobs", + "description": "", + "fields": { + "alert": { + "name": "alert", + "type": "*SqlTaskAlert", + "description": "If alert, indicates that this job must refresh a SQL alert.", + "required": false + }, + "dashboard": { + "name": "dashboard", + "type": "*SqlTaskDashboard", + "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", + "required": false + }, + "file": { + "name": "file", + "type": "*SqlTaskFile", + "description": "If file, indicates that this job runs a SQL file in a remote Git\nrepository.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "Parameters to be used for each run of this job. The SQL alert task does\nnot support custom parameters.", + "required": false + }, + "query": { + "name": "query", + "type": "*SqlTaskQuery", + "description": "If query, indicates that this job must execute a SQL query.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The canonical identifier of the SQL warehouse. Recommended to use with\nserverless or pro SQL warehouses. Classic SQL warehouses are only\nsupported for SQL alert, dashboard and query tasks and are limited to\nscheduled single-task jobs.", + "required": false + } + } + }, + "jobs.SqlTaskAlert": { + "name": "SqlTaskAlert", + "package": "jobs", + "description": "", + "fields": { + "alert_id": { + "name": "alert_id", + "type": "string", + "description": "The canonical identifier of the SQL alert.", + "required": false + }, + "pause_subscriptions": { + "name": "pause_subscriptions", + "type": "bool", + "description": "If true, the alert notifications are not sent to subscribers.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "[]SqlTaskSubscription", + "description": "If specified, alert notifications are sent to subscribers.", + "required": false + } + } + }, + "jobs.SqlTaskDashboard": { + "name": "SqlTaskDashboard", + "package": "jobs", + "description": "", + "fields": { + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Subject of the email sent to subscribers of this task.", + "required": false + }, + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "The canonical identifier of the SQL dashboard.", + "required": false + }, + "pause_subscriptions": { + "name": "pause_subscriptions", + "type": "bool", + "description": "If true, the dashboard snapshot is not taken, and emails are not sent to\nsubscribers.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "[]SqlTaskSubscription", + "description": "If specified, dashboard snapshots are sent to subscriptions.", + "required": false + } + } + }, + "jobs.SqlTaskFile": { + "name": "SqlTaskFile", + "package": "jobs", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "Path of the SQL file. Must be relative if the source is a remote Git\nrepository and absolute for workspace paths.", + "required": false + }, + "source": { + "name": "source", + "type": "Source", + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL\nfile will be retrieved from the local Databricks workspace. When set to\n`GIT`, the SQL file will be retrieved from a Git repository defined in\n`git_source`. If the value is empty, the task will use `GIT` if\n`git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL\nfile is located in cloud Git provider.", + "required": false + } + } + }, + "jobs.SqlTaskQuery": { + "name": "SqlTaskQuery", + "package": "jobs", + "description": "", + "fields": { + "query_id": { + "name": "query_id", + "type": "string", + "description": "The canonical identifier of the SQL query.", + "required": false + } + } + }, + "jobs.SqlTaskSubscription": { + "name": "SqlTaskSubscription", + "package": "jobs", + "description": "", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "The canonical identifier of the destination to receive email\nnotification. This parameter is mutually exclusive with user_name. You\ncannot set both destination_id and user_name for subscription\nnotifications.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The user name to receive the subscription email. This parameter is\nmutually exclusive with destination_id. You cannot set both\ndestination_id and user_name for subscription notifications.", + "required": false + } + } + }, + "jobs.SubmitRun": { + "name": "SubmitRun", + "package": "jobs", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]JobAccessControlRequest", + "description": "List of permissions to set on the job.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The user specified id of the budget policy to use for this one-time run.\nIf not specified, the run will be not be attributed to any budget policy.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*JobEmailNotifications", + "description": "An optional set of email addresses notified when the run begins or\ncompletes.", + "required": false + }, + "environments": { + "name": "environments", + "type": "[]JobEnvironment", + "description": "A list of task execution environment specifications that can be\nreferenced by tasks of this run.", + "required": false + }, + "git_source": { + "name": "git_source", + "type": "*GitSource", + "description": "An optional specification for a remote Git repository containing the\nsource code used by tasks. Version-controlled source code is supported by\nnotebook, dbt, Python script, and SQL File tasks.\n\nIf `git_source` is set, these tasks retrieve the file from the remote\nrepository by default. However, this behavior can be overridden by\nsetting `source` to `WORKSPACE` on the task.\n\nNote: dbt and SQL File tasks support only version-controlled sources. If\ndbt or SQL File tasks are used, `git_source` mu...", + "required": false + }, + "health": { + "name": "health", + "type": "*JobsHealthRules", + "description": "", + "required": false + }, + "idempotency_token": { + "name": "idempotency_token", + "type": "string", + "description": "An optional token that can be used to guarantee the idempotency of job\nrun requests. If a run with the provided token already exists, the\nrequest does not create a new run but returns the ID of the existing run\ninstead. If a run with the provided token is deleted, an error is\nreturned.\n\nIf you specify the idempotency token, upon failure you can retry until\nthe request succeeds. Databricks guarantees that exactly one run is\nlaunched with that idempotency token.\n\nThis token must have at most 64...", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*JobNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\nrun.", + "required": false + }, + "queue": { + "name": "queue", + "type": "*QueueSettings", + "description": "The queue settings of the one-time run.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*JobRunAs", + "description": "Specifies the user or service principal that the job runs as. If not\nspecified, the job runs as the user who submits the request.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "An optional name for the run. The default value is `Untitled`.", + "required": false + }, + "tasks": { + "name": "tasks", + "type": "[]SubmitTask", + "description": "", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job. A value of `0` means\nno timeout.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The user specified id of the usage policy to use for this one-time run.\nIf not specified, a default usage policy may be applied when creating or\nmodifying the job.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when the run begins or\ncompletes.", + "required": false + } + } + }, + "jobs.SubmitRunResponse": { + "name": "SubmitRunResponse", + "package": "jobs", + "description": "Run was created and started successfully.", + "fields": { + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The canonical identifier for the newly submitted run.", + "required": false + } + } + }, + "jobs.SubmitTask": { + "name": "SubmitTask", + "package": "jobs", + "description": "", + "fields": { + "clean_rooms_notebook_task": { + "name": "clean_rooms_notebook_task", + "type": "*CleanRoomsNotebookTask", + "description": "The task runs a [clean rooms] notebook when the\n`clean_rooms_notebook_task` field is present.\n\n[clean rooms]: https://docs.databricks.com/clean-rooms/index.html", + "required": false + }, + "condition_task": { + "name": "condition_task", + "type": "*ConditionTask", + "description": "The task evaluates a condition that can be used to control the execution\nof other tasks when the `condition_task` field is present. The condition\ntask does not require a cluster to execute and does not support retries\nor notifications.", + "required": false + }, + "dashboard_task": { + "name": "dashboard_task", + "type": "*DashboardTask", + "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", + "required": false + }, + "dbt_cloud_task": { + "name": "dbt_cloud_task", + "type": "*DbtCloudTask", + "description": "Task type for dbt cloud, deprecated in favor of the new name\ndbt_platform_task", + "required": false + }, + "dbt_platform_task": { + "name": "dbt_platform_task", + "type": "*DbtPlatformTask", + "description": "", + "required": false + }, + "dbt_task": { + "name": "dbt_task", + "type": "*DbtTask", + "description": "The task runs one or more dbt commands when the `dbt_task` field is\npresent. The dbt task requires both Databricks SQL and the ability to use\na serverless or a pro SQL warehouse.", + "required": false + }, + "depends_on": { + "name": "depends_on", + "type": "[]TaskDependency", + "description": "An optional array of objects specifying the dependency graph of the task.\nAll tasks specified in this field must complete successfully before\nexecuting this task. The key is `task_key`, and the value is the name\nassigned to the dependent task.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for this task.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*JobEmailNotifications", + "description": "An optional set of email addresses notified when the task run begins or\ncompletes. The default behavior is to not send any emails.", + "required": false + }, + "environment_key": { + "name": "environment_key", + "type": "string", + "description": "The key that references an environment spec in a job. This field is\nrequired for Python script, Python wheel and dbt tasks when using\nserverless compute.", + "required": false + }, + "existing_cluster_id": { + "name": "existing_cluster_id", + "type": "string", + "description": "If existing_cluster_id, the ID of an existing cluster that is used for\nall runs. When running jobs or tasks on an existing cluster, you may need\nto manually restart the cluster if it stops responding. We suggest\nrunning jobs and tasks on new clusters for greater reliability", + "required": false + }, + "for_each_task": { + "name": "for_each_task", + "type": "*ForEachTask", + "description": "The task executes a nested task for every input provided when the\n`for_each_task` field is present.", + "required": false + }, + "gen_ai_compute_task": { + "name": "gen_ai_compute_task", + "type": "*GenAiComputeTask", + "description": "", + "required": false + }, + "health": { + "name": "health", + "type": "*JobsHealthRules", + "description": "", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]compute.Library", + "description": "An optional list of libraries to be installed on the cluster. The default\nvalue is an empty list.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "*compute.ClusterSpec", + "description": "If new_cluster, a description of a new cluster that is created for each\nrun.", + "required": false + }, + "notebook_task": { + "name": "notebook_task", + "type": "*NotebookTask", + "description": "The task runs a notebook when the `notebook_task` field is present.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*TaskNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\ntask run.", + "required": false + }, + "pipeline_task": { + "name": "pipeline_task", + "type": "*PipelineTask", + "description": "The task triggers a pipeline update when the `pipeline_task` field is\npresent. Only pipelines configured to use triggered more are supported.", + "required": false + }, + "power_bi_task": { + "name": "power_bi_task", + "type": "*PowerBiTask", + "description": "The task triggers a Power BI semantic model update when the\n`power_bi_task` field is present.", + "required": false + }, + "python_wheel_task": { + "name": "python_wheel_task", + "type": "*PythonWheelTask", + "description": "The task runs a Python wheel when the `python_wheel_task` field is\npresent.", + "required": false + }, + "run_if": { + "name": "run_if", + "type": "RunIf", + "description": "An optional value indicating the condition that determines whether the\ntask should be run once its dependencies have been completed. When\nomitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of\npossible values.", + "required": false + }, + "run_job_task": { + "name": "run_job_task", + "type": "*RunJobTask", + "description": "The task triggers another job when the `run_job_task` field is present.", + "required": false + }, + "spark_jar_task": { + "name": "spark_jar_task", + "type": "*SparkJarTask", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", + "required": false + }, + "spark_python_task": { + "name": "spark_python_task", + "type": "*SparkPythonTask", + "description": "The task runs a Python file when the `spark_python_task` field is\npresent.", + "required": false + }, + "spark_submit_task": { + "name": "spark_submit_task", + "type": "*SparkSubmitTask", + "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task\nfield is present. Databricks recommends using the spark_jar_task instead;\nsee [Spark Submit task for jobs](/jobs/spark-submit).", + "required": false + }, + "sql_task": { + "name": "sql_task", + "type": "*SqlTask", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a\nlegacy SQL dashboard when the `sql_task` field is present.", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "string", + "description": "A unique name for the task. This field is used to refer to this task from\nother tasks. This field is required and must be unique within its parent\njob. On Update or Reset, this field is used to reference the tasks to be\nupdated or reset.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job task. A value of `0`\nmeans no timeout.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when the run begins or\ncompletes. The default behavior is to not send any system notifications.\nTask webhooks respect the task notification settings.", + "required": false + } + } + }, + "jobs.Subscription": { + "name": "Subscription", + "package": "jobs", + "description": "", + "fields": { + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Optional: Allows users to specify a custom subject line on the email sent\nto subscribers.", + "required": false + }, + "paused": { + "name": "paused", + "type": "bool", + "description": "When true, the subscription will not send emails.", + "required": false + }, + "subscribers": { + "name": "subscribers", + "type": "[]SubscriptionSubscriber", + "description": "The list of subscribers to send the snapshot of the dashboard to.", + "required": false + } + } + }, + "jobs.SubscriptionSubscriber": { + "name": "SubscriptionSubscriber", + "package": "jobs", + "description": "", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "A snapshot of the dashboard will be sent to the destination when the\n`destination_id` field is present.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "A snapshot of the dashboard will be sent to the user's email when the\n`user_name` field is present.", + "required": false + } + } + }, + "jobs.TableState": { + "name": "TableState", + "package": "jobs", + "description": "", + "fields": { + "has_seen_updates": { + "name": "has_seen_updates", + "type": "bool", + "description": "Whether or not the table has seen updates since either the creation of\nthe trigger or the last successful evaluation of the trigger", + "required": false + }, + "table_name": { + "name": "table_name", + "type": "string", + "description": "Full table name of the table to monitor, e.g.\n`mycatalog.myschema.mytable`", + "required": false + } + } + }, + "jobs.TableTriggerState": { + "name": "TableTriggerState", + "package": "jobs", + "description": "", + "fields": { + "last_seen_table_states": { + "name": "last_seen_table_states", + "type": "[]TableState", + "description": "", + "required": false + }, + "using_scalable_monitoring": { + "name": "using_scalable_monitoring", + "type": "bool", + "description": "Indicates whether the trigger is using scalable monitoring.", + "required": false + } + } + }, + "jobs.TableUpdateTriggerConfiguration": { + "name": "TableUpdateTriggerConfiguration", + "package": "jobs", + "description": "", + "fields": { + "condition": { + "name": "condition", + "type": "Condition", + "description": "The table(s) condition based on which to trigger a job run.", + "required": false + }, + "min_time_between_triggers_seconds": { + "name": "min_time_between_triggers_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after the specified amount of time\nhas passed since the last time the trigger fired. The minimum allowed\nvalue is 60 seconds.", + "required": false + }, + "table_names": { + "name": "table_names", + "type": "[]string", + "description": "A list of tables to monitor for changes. The table name must be in the\nformat `catalog_name.schema_name.table_name`.", + "required": false + }, + "wait_after_last_change_seconds": { + "name": "wait_after_last_change_seconds", + "type": "int", + "description": "If set, the trigger starts a run only after no table updates have\noccurred for the specified time and can be used to wait for a series of\ntable updates before triggering a run. The minimum allowed value is 60\nseconds.", + "required": false + } + } + }, + "jobs.Task": { + "name": "Task", + "package": "jobs", + "description": "", + "fields": { + "clean_rooms_notebook_task": { + "name": "clean_rooms_notebook_task", + "type": "*CleanRoomsNotebookTask", + "description": "The task runs a [clean rooms] notebook when the\n`clean_rooms_notebook_task` field is present.\n\n[clean rooms]: https://docs.databricks.com/clean-rooms/index.html", + "required": false + }, + "condition_task": { + "name": "condition_task", + "type": "*ConditionTask", + "description": "The task evaluates a condition that can be used to control the execution\nof other tasks when the `condition_task` field is present. The condition\ntask does not require a cluster to execute and does not support retries\nor notifications.", + "required": false + }, + "dashboard_task": { + "name": "dashboard_task", + "type": "*DashboardTask", + "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", + "required": false + }, + "dbt_cloud_task": { + "name": "dbt_cloud_task", + "type": "*DbtCloudTask", + "description": "Task type for dbt cloud, deprecated in favor of the new name\ndbt_platform_task", + "required": false + }, + "dbt_platform_task": { + "name": "dbt_platform_task", + "type": "*DbtPlatformTask", + "description": "", + "required": false + }, + "dbt_task": { + "name": "dbt_task", + "type": "*DbtTask", + "description": "The task runs one or more dbt commands when the `dbt_task` field is\npresent. The dbt task requires both Databricks SQL and the ability to use\na serverless or a pro SQL warehouse.", + "required": false + }, + "depends_on": { + "name": "depends_on", + "type": "[]TaskDependency", + "description": "An optional array of objects specifying the dependency graph of the task.\nAll tasks specified in this field must complete before executing this\ntask. The task will run only if the `run_if` condition is true. The key\nis `task_key`, and the value is the name assigned to the dependent task.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "An optional description for this task.", + "required": false + }, + "disable_auto_optimization": { + "name": "disable_auto_optimization", + "type": "bool", + "description": "An option to disable auto optimization in serverless", + "required": false + }, + "disabled": { + "name": "disabled", + "type": "bool", + "description": "An optional flag to disable the task. If set to true, the task will not\nrun even if it is part of a job.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*TaskEmailNotifications", + "description": "An optional set of email addresses that is notified when runs of this\ntask begin or complete as well as when this task is deleted. The default\nbehavior is to not send any emails.", + "required": false + }, + "environment_key": { + "name": "environment_key", + "type": "string", + "description": "The key that references an environment spec in a job. This field is\nrequired for Python script, Python wheel and dbt tasks when using\nserverless compute.", + "required": false + }, + "existing_cluster_id": { + "name": "existing_cluster_id", + "type": "string", + "description": "If existing_cluster_id, the ID of an existing cluster that is used for\nall runs. When running jobs or tasks on an existing cluster, you may need\nto manually restart the cluster if it stops responding. We suggest\nrunning jobs and tasks on new clusters for greater reliability", + "required": false + }, + "for_each_task": { + "name": "for_each_task", + "type": "*ForEachTask", + "description": "The task executes a nested task for every input provided when the\n`for_each_task` field is present.", + "required": false + }, + "gen_ai_compute_task": { + "name": "gen_ai_compute_task", + "type": "*GenAiComputeTask", + "description": "", + "required": false + }, + "health": { + "name": "health", + "type": "*JobsHealthRules", + "description": "An optional set of health rules that can be defined for this job.", + "required": false + }, + "job_cluster_key": { + "name": "job_cluster_key", + "type": "string", + "description": "If job_cluster_key, this task is executed reusing the cluster specified\nin `job.settings.job_clusters`.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]compute.Library", + "description": "An optional list of libraries to be installed on the cluster. The default\nvalue is an empty list.", + "required": false + }, + "max_retries": { + "name": "max_retries", + "type": "int", + "description": "An optional maximum number of times to retry an unsuccessful run. A run\nis considered to be unsuccessful if it completes with the `FAILED`\nresult_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means\nto retry indefinitely and the value `0` means to never retry.", + "required": false + }, + "min_retry_interval_millis": { + "name": "min_retry_interval_millis", + "type": "int", + "description": "An optional minimal interval in milliseconds between the start of the\nfailed run and the subsequent retry run. The default behavior is that\nunsuccessful runs are immediately retried.", + "required": false + }, + "new_cluster": { + "name": "new_cluster", + "type": "*compute.ClusterSpec", + "description": "If new_cluster, a description of a new cluster that is created for each\nrun.", + "required": false + }, + "notebook_task": { + "name": "notebook_task", + "type": "*NotebookTask", + "description": "The task runs a notebook when the `notebook_task` field is present.", + "required": false + }, + "notification_settings": { + "name": "notification_settings", + "type": "*TaskNotificationSettings", + "description": "Optional notification settings that are used when sending notifications\nto each of the `email_notifications` and `webhook_notifications` for this\ntask.", + "required": false + }, + "pipeline_task": { + "name": "pipeline_task", + "type": "*PipelineTask", + "description": "The task triggers a pipeline update when the `pipeline_task` field is\npresent. Only pipelines configured to use triggered more are supported.", + "required": false + }, + "power_bi_task": { + "name": "power_bi_task", + "type": "*PowerBiTask", + "description": "The task triggers a Power BI semantic model update when the\n`power_bi_task` field is present.", + "required": false + }, + "python_wheel_task": { + "name": "python_wheel_task", + "type": "*PythonWheelTask", + "description": "The task runs a Python wheel when the `python_wheel_task` field is\npresent.", + "required": false + }, + "retry_on_timeout": { + "name": "retry_on_timeout", + "type": "bool", + "description": "An optional policy to specify whether to retry a job when it times out.\nThe default behavior is to not retry on timeout.", + "required": false + }, + "run_if": { + "name": "run_if", + "type": "RunIf", + "description": "An optional value specifying the condition determining whether the task\nis run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded *\n`AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded *\n`NONE_FAILED`: None of the dependencies have failed and at least one was\nexecuted * `ALL_DONE`: All dependencies have been completed *\n`AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl\ndependencies have failed", + "required": false + }, + "run_job_task": { + "name": "run_job_task", + "type": "*RunJobTask", + "description": "The task triggers another job when the `run_job_task` field is present.", + "required": false + }, + "spark_jar_task": { + "name": "spark_jar_task", + "type": "*SparkJarTask", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", + "required": false + }, + "spark_python_task": { + "name": "spark_python_task", + "type": "*SparkPythonTask", + "description": "The task runs a Python file when the `spark_python_task` field is\npresent.", + "required": false + }, + "spark_submit_task": { + "name": "spark_submit_task", + "type": "*SparkSubmitTask", + "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task\nfield is present. Databricks recommends using the spark_jar_task instead;\nsee [Spark Submit task for jobs](/jobs/spark-submit).", + "required": false + }, + "sql_task": { + "name": "sql_task", + "type": "*SqlTask", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a\nlegacy SQL dashboard when the `sql_task` field is present.", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "string", + "description": "A unique name for the task. This field is used to refer to this task from\nother tasks. This field is required and must be unique within its parent\njob. On Update or Reset, this field is used to reference the tasks to be\nupdated or reset.", + "required": false + }, + "timeout_seconds": { + "name": "timeout_seconds", + "type": "int", + "description": "An optional timeout applied to each run of this job task. A value of `0`\nmeans no timeout.", + "required": false + }, + "webhook_notifications": { + "name": "webhook_notifications", + "type": "*WebhookNotifications", + "description": "A collection of system notification IDs to notify when runs of this task\nbegin or complete. The default behavior is to not send any system\nnotifications.", + "required": false + } + } + }, + "jobs.TaskDependency": { + "name": "TaskDependency", + "package": "jobs", + "description": "", + "fields": { + "outcome": { + "name": "outcome", + "type": "string", + "description": "Can only be specified on condition task dependencies. The outcome of the\ndependent task that must be met for this task to run.", + "required": false + }, + "task_key": { + "name": "task_key", + "type": "string", + "description": "The name of the task this task depends on.", + "required": false + } + } + }, + "jobs.TaskEmailNotifications": { + "name": "TaskEmailNotifications", + "package": "jobs", + "description": "", + "fields": { + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "bool", + "description": "If true, do not send email to recipients specified in `on_failure` if the\nrun is skipped. This field is `deprecated`. Please use the\n`notification_settings.no_alert_for_skipped_runs` field.", + "required": false + }, + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "[]string", + "description": "A list of email addresses to be notified when the duration of a run\nexceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in\nthe `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is\nspecified in the `health` field for the job, notifications are not sent.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "[]string", + "description": "A list of email addresses to be notified when a run unsuccessfully\ncompletes. A run is considered to have completed unsuccessfully if it\nends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or\n`TIMED_OUT` result_state. If this is not specified on job creation,\nreset, or update the list is empty, and notifications are not sent.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "[]string", + "description": "A list of email addresses to be notified when a run begins. If not\nspecified on job creation, reset, or update, the list is empty, and\nnotifications are not sent.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "[]string", + "description": "A list of email addresses to notify when any streaming backlog thresholds\nare exceeded for any stream. Streaming backlog thresholds can be set in\nthe `health` field using the following metrics:\n`STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`,\n`STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is\nbased on the 10-minute average of these metrics. If the issue persists,\nnotifications are resent every 30 minutes.", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "[]string", + "description": "A list of email addresses to be notified when a run successfully\ncompletes. A run is considered to have completed successfully if it ends\nwith a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If\nnot specified on job creation, reset, or update, the list is empty, and\nnotifications are not sent.", + "required": false + } + } + }, + "jobs.TaskNotificationSettings": { + "name": "TaskNotificationSettings", + "package": "jobs", + "description": "", + "fields": { + "alert_on_last_attempt": { + "name": "alert_on_last_attempt", + "type": "bool", + "description": "If true, do not send notifications to recipients specified in `on_start`\nfor the retried runs and do not send notifications to recipients\nspecified in `on_failure` until the last retry of the run.", + "required": false + }, + "no_alert_for_canceled_runs": { + "name": "no_alert_for_canceled_runs", + "type": "bool", + "description": "If true, do not send notifications to recipients specified in\n`on_failure` if the run is canceled.", + "required": false + }, + "no_alert_for_skipped_runs": { + "name": "no_alert_for_skipped_runs", + "type": "bool", + "description": "If true, do not send notifications to recipients specified in\n`on_failure` if the run is skipped.", + "required": false + } + } + }, + "jobs.TaskRetryMode": { + "name": "TaskRetryMode", + "package": "jobs", + "description": "task retry mode of the continuous job\n* NEVER: The failed task will not be retried.\n* ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt.\nWhen this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started.", + "fields": {} + }, + "jobs.TerminationDetails": { + "name": "TerminationDetails", + "package": "jobs", + "description": "", + "fields": { + "code": { + "name": "code", + "type": "TerminationCodeCode", + "description": "", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "A descriptive message with the termination details. This field is\nunstructured and the format might change.", + "required": false + }, + "type": { + "name": "type", + "type": "TerminationTypeType", + "description": "", + "required": false + } + } + }, + "jobs.TriggerInfo": { + "name": "TriggerInfo", + "package": "jobs", + "description": "Additional details about what triggered the run", + "fields": { + "run_id": { + "name": "run_id", + "type": "int64", + "description": "The run id of the Run Job task run", + "required": false + } + } + }, + "jobs.TriggerSettings": { + "name": "TriggerSettings", + "package": "jobs", + "description": "", + "fields": { + "file_arrival": { + "name": "file_arrival", + "type": "*FileArrivalTriggerConfiguration", + "description": "File arrival trigger settings.", + "required": false + }, + "model": { + "name": "model", + "type": "*ModelTriggerConfiguration", + "description": "", + "required": false + }, + "pause_status": { + "name": "pause_status", + "type": "PauseStatus", + "description": "Whether this trigger is paused or not.", + "required": false + }, + "periodic": { + "name": "periodic", + "type": "*PeriodicTriggerConfiguration", + "description": "Periodic trigger settings.", + "required": false + }, + "table_update": { + "name": "table_update", + "type": "*TableUpdateTriggerConfiguration", + "description": "", + "required": false + } + } + }, + "jobs.TriggerStateProto": { + "name": "TriggerStateProto", + "package": "jobs", + "description": "", + "fields": { + "file_arrival": { + "name": "file_arrival", + "type": "*FileArrivalTriggerState", + "description": "", + "required": false + }, + "table": { + "name": "table", + "type": "*TableTriggerState", + "description": "", + "required": false + } + } + }, + "jobs.UpdateJob": { + "name": "UpdateJob", + "package": "jobs", + "description": "", + "fields": { + "fields_to_remove": { + "name": "fields_to_remove", + "type": "[]string", + "description": "Remove top-level fields in the job settings. Removing nested fields is\nnot supported, except for tasks and job clusters (`tasks/task_1`). This\nfield is optional.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The canonical identifier of the job to update. This field is required.", + "required": false + }, + "new_settings": { + "name": "new_settings", + "type": "*JobSettings", + "description": "The new settings for the job.\n\nTop-level fields specified in `new_settings` are completely replaced,\nexcept for arrays which are merged. That is, new and existing entries are\ncompletely replaced based on the respective key fields, i.e. `task_key`\nor `job_cluster_key`, while previous entries are kept.\n\nPartially updating nested fields is not supported.\n\nChanges to the field `JobSettings.timeout_seconds` are applied to active\nruns. Changes to other fields are applied to future runs only.", + "required": false + } + } + }, + "jobs.ViewItem": { + "name": "ViewItem", + "package": "jobs", + "description": "", + "fields": { + "content": { + "name": "content", + "type": "string", + "description": "Content of the view.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the view item. In the case of code view, it would be the\nnotebook’s name. In the case of dashboard view, it would be the\ndashboard’s name.", + "required": false + }, + "type": { + "name": "type", + "type": "ViewType", + "description": "Type of the view item.", + "required": false + } + } + }, + "jobs.WaitGetRunJobTerminatedOrSkipped": { + "name": "WaitGetRunJobTerminatedOrSkipped", + "package": "jobs", + "description": "WaitGetRunJobTerminatedOrSkipped is a wrapper that calls [JobsAPI.WaitGetRunJobTerminatedOrSkipped] and waits to reach TERMINATED or SKIPPED state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*Run)) (*Run, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*Run)", + "description": "", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "int64", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "jobs.Webhook": { + "name": "Webhook", + "package": "jobs", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + } + } + }, + "jobs.WebhookNotifications": { + "name": "WebhookNotifications", + "package": "jobs", + "description": "", + "fields": { + "on_duration_warning_threshold_exceeded": { + "name": "on_duration_warning_threshold_exceeded", + "type": "[]Webhook", + "description": "An optional list of system notification IDs to call when the duration of\na run exceeds the threshold specified for the `RUN_DURATION_SECONDS`\nmetric in the `health` field. A maximum of 3 destinations can be\nspecified for the `on_duration_warning_threshold_exceeded` property.", + "required": false + }, + "on_failure": { + "name": "on_failure", + "type": "[]Webhook", + "description": "An optional list of system notification IDs to call when the run fails. A\nmaximum of 3 destinations can be specified for the `on_failure` property.", + "required": false + }, + "on_start": { + "name": "on_start", + "type": "[]Webhook", + "description": "An optional list of system notification IDs to call when the run starts.\nA maximum of 3 destinations can be specified for the `on_start` property.", + "required": false + }, + "on_streaming_backlog_exceeded": { + "name": "on_streaming_backlog_exceeded", + "type": "[]Webhook", + "description": "An optional list of system notification IDs to call when any streaming\nbacklog thresholds are exceeded for any stream. Streaming backlog\nthresholds can be set in the `health` field using the following metrics:\n`STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`,\n`STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is\nbased on the 10-minute average of these metrics. If the issue persists,\nnotifications are resent every 30 minutes. A maximum of 3 destinations\ncan be specified ...", + "required": false + }, + "on_success": { + "name": "on_success", + "type": "[]Webhook", + "description": "An optional list of system notification IDs to call when the run\ncompletes successfully. A maximum of 3 destinations can be specified for\nthe `on_success` property.", + "required": false + } + } + }, + "jobs.WidgetErrorDetail": { + "name": "WidgetErrorDetail", + "package": "jobs", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "string", + "description": "", + "required": false + } + } + }, + "jobs.expandedJobsIterator": { + "name": "expandedJobsIterator", + "package": "jobs", + "description": "expandedJobsIterator is a custom iterator that for each job calls job/get in order to fetch full list of tasks and job_clusters.", + "fields": { + "originalIterator": { + "name": "originalIterator", + "type": "listing.Iterator[BaseJob]", + "description": "", + "required": false + }, + "service": { + "name": "service", + "type": "*JobsAPI", + "description": "", + "required": false + } + } + }, + "jobs.expandedRunsIterator": { + "name": "expandedRunsIterator", + "package": "jobs", + "description": "expandedRunsIterator is a custom iterator that for each run calls runs/get in order to fetch full list of tasks and job_clusters.", + "fields": { + "originalIterator": { + "name": "originalIterator", + "type": "listing.Iterator[BaseRun]", + "description": "", + "required": false + }, + "service": { + "name": "service", + "type": "*JobsAPI", + "description": "", + "required": false + } + } + }, + "jobs.jobsImpl": { + "name": "jobsImpl", + "package": "jobs", + "description": "unexported type that holds implementations of just Jobs API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "jobs.policyComplianceForJobsImpl": { + "name": "policyComplianceForJobsImpl", + "package": "jobs", + "description": "unexported type that holds implementations of just PolicyComplianceForJobs API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.Activity": { + "name": "Activity", + "package": "ml", + "description": "For activities, this contains the activity recorded for the action. For\ncomments, this contains the comment details. For transition requests, this\ncontains the transition request details.", + "fields": { + "activity_type": { + "name": "activity_type", + "type": "ActivityType", + "description": "", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment associated with the activity, comment, or\ntransition request.", + "required": false + }, + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "from_stage": { + "name": "from_stage", + "type": "string", + "description": "Source stage of the transition (if the activity is stage transition\nrelated). Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for the object.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Time of the object at last update, as a Unix timestamp in milliseconds.", + "required": false + }, + "system_comment": { + "name": "system_comment", + "type": "string", + "description": "Comment made by system, for example explaining an activity of type\n`SYSTEM_TRANSITION`. It usually describes a side effect, such as a\nversion being archived as part of another version's stage transition, and\nmay not be returned for some activity types.", + "required": false + }, + "to_stage": { + "name": "to_stage", + "type": "string", + "description": "Target stage of the transition (if the activity is stage transition\nrelated). Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "The username of the user that created the object.", + "required": false + } + } + }, + "ml.ApproveTransitionRequest": { + "name": "ApproveTransitionRequest", + "package": "ml", + "description": "Details required to identify and approve a model version stage transition\nrequest.", + "fields": { + "archive_existing_versions": { + "name": "archive_existing_versions", + "type": "bool", + "description": "Specifies whether to archive all current model versions in the target\nstage.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "stage": { + "name": "stage", + "type": "string", + "description": "Target stage of the transition. Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.ApproveTransitionRequestResponse": { + "name": "ApproveTransitionRequestResponse", + "package": "ml", + "description": "", + "fields": { + "activity": { + "name": "activity", + "type": "*Activity", + "description": "New activity generated as a result of this operation.", + "required": false + } + } + }, + "ml.AuthConfig": { + "name": "AuthConfig", + "package": "ml", + "description": "", + "fields": { + "uc_service_credential_name": { + "name": "uc_service_credential_name", + "type": "string", + "description": "Name of the Unity Catalog service credential. This value will be set\nunder the option databricks.serviceCredential", + "required": false + } + } + }, + "ml.BatchCreateMaterializedFeaturesRequest": { + "name": "BatchCreateMaterializedFeaturesRequest", + "package": "ml", + "description": "", + "fields": { + "requests": { + "name": "requests", + "type": "[]CreateMaterializedFeatureRequest", + "description": "The requests to create materialized features.", + "required": false + } + } + }, + "ml.BatchCreateMaterializedFeaturesResponse": { + "name": "BatchCreateMaterializedFeaturesResponse", + "package": "ml", + "description": "", + "fields": { + "materialized_features": { + "name": "materialized_features", + "type": "[]MaterializedFeature", + "description": "The created materialized features with assigned IDs.", + "required": false + } + } + }, + "ml.ColumnIdentifier": { + "name": "ColumnIdentifier", + "package": "ml", + "description": "", + "fields": { + "variant_expr_path": { + "name": "variant_expr_path", + "type": "string", + "description": "String representation of the column name or variant expression path. For\nnested fields, the leaf value is what will be present in materialized\ntables and expected to match at query time. For example, the leaf node of\nvalue:trip_details.location_details.pickup_zip is pickup_zip.", + "required": false + } + } + }, + "ml.CommentObject": { + "name": "CommentObject", + "package": "ml", + "description": "For activities, this contains the activity recorded for the action. For\ncomments, this contains the comment details. For transition requests, this\ncontains the transition request details.", + "fields": { + "available_actions": { + "name": "available_actions", + "type": "[]CommentActivityAction", + "description": "Array of actions on the activity allowed for the current viewer.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment associated with the activity, comment, or\ntransition request.", + "required": false + }, + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for the object.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Time of the object at last update, as a Unix timestamp in milliseconds.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "The username of the user that created the object.", + "required": false + } + } + }, + "ml.ContinuousWindow": { + "name": "ContinuousWindow", + "package": "ml", + "description": "", + "fields": { + "offset": { + "name": "offset", + "type": "string", + "description": "The offset of the continuous window (must be non-positive).", + "required": false + }, + "window_duration": { + "name": "window_duration", + "type": "string", + "description": "The duration of the continuous window (must be positive).", + "required": false + } + } + }, + "ml.CreateComment": { + "name": "CreateComment", + "package": "ml", + "description": "Details required to create a comment on a model version.", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.CreateCommentResponse": { + "name": "CreateCommentResponse", + "package": "ml", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "*CommentObject", + "description": "New comment object", + "required": false + } + } + }, + "ml.CreateExperiment": { + "name": "CreateExperiment", + "package": "ml", + "description": "", + "fields": { + "artifact_location": { + "name": "artifact_location", + "type": "string", + "description": "Location where all artifacts for the experiment are stored. If not\nprovided, the remote server will select an appropriate default.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Experiment name.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ExperimentTag", + "description": "A collection of tags to set on the experiment. Maximum tag size and\nnumber of tags per request depends on the storage backend. All storage\nbackends are guaranteed to support tag keys up to 250 bytes in size and\ntag values up to 5000 bytes in size. All storage backends are also\nguaranteed to support up to 20 tags per request.", + "required": false + } + } + }, + "ml.CreateExperimentResponse": { + "name": "CreateExperimentResponse", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "Unique identifier for the experiment.", + "required": false + } + } + }, + "ml.CreateFeatureRequest": { + "name": "CreateFeatureRequest", + "package": "ml", + "description": "", + "fields": { + "feature": { + "name": "feature", + "type": "Feature", + "description": "Feature to create.", + "required": false + } + } + }, + "ml.CreateFeatureTagRequest": { + "name": "CreateFeatureTagRequest", + "package": "ml", + "description": "", + "fields": { + "feature_tag": { + "name": "feature_tag", + "type": "FeatureTag", + "description": "", + "required": false + } + } + }, + "ml.CreateForecastingExperimentRequest": { + "name": "CreateForecastingExperimentRequest", + "package": "ml", + "description": "", + "fields": { + "custom_weights_column": { + "name": "custom_weights_column", + "type": "string", + "description": "The column in the training table used to customize weights for each time\nseries.", + "required": false + }, + "experiment_path": { + "name": "experiment_path", + "type": "string", + "description": "The path in the workspace to store the created experiment.", + "required": false + }, + "forecast_granularity": { + "name": "forecast_granularity", + "type": "string", + "description": "The time interval between consecutive rows in the time series data.\nPossible values include: '1 second', '1 minute', '5 minutes', '10\nminutes', '15 minutes', '30 minutes', 'Hourly', 'Daily', 'Weekly',\n'Monthly', 'Quarterly', 'Yearly'.", + "required": false + }, + "forecast_horizon": { + "name": "forecast_horizon", + "type": "int64", + "description": "The number of time steps into the future to make predictions, calculated\nas a multiple of forecast_granularity. This value represents how far\nahead the model should forecast.", + "required": false + }, + "future_feature_data_path": { + "name": "future_feature_data_path", + "type": "string", + "description": "The fully qualified path of a Unity Catalog table, formatted as\ncatalog_name.schema_name.table_name, used to store future feature data\nfor predictions.", + "required": false + }, + "holiday_regions": { + "name": "holiday_regions", + "type": "[]string", + "description": "The region code(s) to automatically add holiday features. Currently\nsupports only one region.", + "required": false + }, + "include_features": { + "name": "include_features", + "type": "[]string", + "description": "Specifies the list of feature columns to include in model training. These\ncolumns must exist in the training data and be of type string, numerical,\nor boolean. If not specified, no additional features will be included.\nNote: Certain columns are automatically handled: - Automatically\nexcluded: split_column, target_column, custom_weights_column. -\nAutomatically included: time_column.", + "required": false + }, + "max_runtime": { + "name": "max_runtime", + "type": "int64", + "description": "The maximum duration for the experiment in minutes. The experiment stops\nautomatically if it exceeds this limit.", + "required": false + }, + "prediction_data_path": { + "name": "prediction_data_path", + "type": "string", + "description": "The fully qualified path of a Unity Catalog table, formatted as\ncatalog_name.schema_name.table_name, used to store predictions.", + "required": false + }, + "primary_metric": { + "name": "primary_metric", + "type": "string", + "description": "The evaluation metric used to optimize the forecasting model.", + "required": false + }, + "register_to": { + "name": "register_to", + "type": "string", + "description": "The fully qualified path of a Unity Catalog model, formatted as\ncatalog_name.schema_name.model_name, used to store the best model.", + "required": false + }, + "split_column": { + "name": "split_column", + "type": "string", + "description": "// The column in the training table used for custom data splits. Values\nmust be 'train', 'validate', or 'test'.", + "required": false + }, + "target_column": { + "name": "target_column", + "type": "string", + "description": "The column in the input training table used as the prediction target for\nmodel training. The values in this column are used as the ground truth\nfor model training.", + "required": false + }, + "time_column": { + "name": "time_column", + "type": "string", + "description": "The column in the input training table that represents each row's\ntimestamp.", + "required": false + }, + "timeseries_identifier_columns": { + "name": "timeseries_identifier_columns", + "type": "[]string", + "description": "The column in the training table used to group the dataset for predicting\nindividual time series.", + "required": false + }, + "train_data_path": { + "name": "train_data_path", + "type": "string", + "description": "The fully qualified path of a Unity Catalog table, formatted as\ncatalog_name.schema_name.table_name, used as training data for the\nforecasting model.", + "required": false + }, + "training_frameworks": { + "name": "training_frameworks", + "type": "[]string", + "description": "List of frameworks to include for model tuning. Possible values are\n'Prophet', 'ARIMA', 'DeepAR'. An empty list includes all supported\nframeworks.", + "required": false + } + } + }, + "ml.CreateForecastingExperimentResponse": { + "name": "CreateForecastingExperimentResponse", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The unique ID of the created forecasting experiment", + "required": false + } + } + }, + "ml.CreateKafkaConfigRequest": { + "name": "CreateKafkaConfigRequest", + "package": "ml", + "description": "", + "fields": { + "kafka_config": { + "name": "kafka_config", + "type": "KafkaConfig", + "description": "", + "required": false + } + } + }, + "ml.CreateLoggedModelRequest": { + "name": "CreateLoggedModelRequest", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The ID of the experiment that owns the model.", + "required": false + }, + "model_type": { + "name": "model_type", + "type": "string", + "description": "The type of the model, such as ``\"Agent\"``, ``\"Classifier\"``, ``\"LLM\"``.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the model (optional). If not specified one will be generated.", + "required": false + }, + "params": { + "name": "params", + "type": "[]LoggedModelParameter", + "description": "Parameters attached to the model.", + "required": false + }, + "source_run_id": { + "name": "source_run_id", + "type": "string", + "description": "The ID of the run that created the model.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]LoggedModelTag", + "description": "Tags attached to the model.", + "required": false + } + } + }, + "ml.CreateLoggedModelResponse": { + "name": "CreateLoggedModelResponse", + "package": "ml", + "description": "", + "fields": { + "model": { + "name": "model", + "type": "*LoggedModel", + "description": "The newly created logged model.", + "required": false + } + } + }, + "ml.CreateMaterializedFeatureRequest": { + "name": "CreateMaterializedFeatureRequest", + "package": "ml", + "description": "", + "fields": { + "materialized_feature": { + "name": "materialized_feature", + "type": "MaterializedFeature", + "description": "The materialized feature to create.", + "required": false + } + } + }, + "ml.CreateModelRequest": { + "name": "CreateModelRequest", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "Optional description for registered model.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Register models under this name", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelTag", + "description": "Additional metadata for registered model.", + "required": false + } + } + }, + "ml.CreateModelResponse": { + "name": "CreateModelResponse", + "package": "ml", + "description": "", + "fields": { + "registered_model": { + "name": "registered_model", + "type": "*Model", + "description": "", + "required": false + } + } + }, + "ml.CreateModelVersionRequest": { + "name": "CreateModelVersionRequest", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "Optional description for model version.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Register model under this name", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "MLflow run ID for correlation, if `source` was generated by an experiment\nrun in MLflow tracking server", + "required": false + }, + "run_link": { + "name": "run_link", + "type": "string", + "description": "MLflow run link - this is the exact link of the run that generated this\nmodel version, potentially hosted at another instance of MLflow.", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "URI indicating the location of the model artifacts.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelVersionTag", + "description": "Additional metadata for model version.", + "required": false + } + } + }, + "ml.CreateModelVersionResponse": { + "name": "CreateModelVersionResponse", + "package": "ml", + "description": "", + "fields": { + "model_version": { + "name": "model_version", + "type": "*ModelVersion", + "description": "Return new version number generated for this model in registry.", + "required": false + } + } + }, + "ml.CreateOnlineStoreRequest": { + "name": "CreateOnlineStoreRequest", + "package": "ml", + "description": "", + "fields": { + "online_store": { + "name": "online_store", + "type": "OnlineStore", + "description": "Online store to create.", + "required": false + } + } + }, + "ml.CreateRegistryWebhook": { + "name": "CreateRegistryWebhook", + "package": "ml", + "description": "Details required to create a registry webhook.", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "User-specified description for the webhook.", + "required": false + }, + "events": { + "name": "events", + "type": "[]RegistryWebhookEvent", + "description": "Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A\nnew model version was created for the associated model.\n\n* `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was\nchanged.\n\n* `TRANSITION_REQUEST_CREATED`: A user requested a model version’s\nstage be transitioned.\n\n* `COMMENT_CREATED`: A user wrote a comment on a registered model.\n\n* `REGISTERED_MODEL_CREATED`: A new registered model was created. This\nevent type can only be specified for a registry-wide webh...", + "required": false + }, + "http_url_spec": { + "name": "http_url_spec", + "type": "*HttpUrlSpec", + "description": "External HTTPS URL called on event trigger (by using a POST request).", + "required": false + }, + "job_spec": { + "name": "job_spec", + "type": "*JobSpec", + "description": "ID of the job that the webhook runs.", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "If model name is not specified, a registry-wide webhook is created that\nlistens for the specified events across all versions of all registered\nmodels.", + "required": false + }, + "status": { + "name": "status", + "type": "RegistryWebhookStatus", + "description": "Enable or disable triggering the webhook, or put the webhook into test\nmode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an\nassociated event happens.\n\n* `DISABLED`: Webhook is not triggered.\n\n* `TEST_MODE`: Webhook can be triggered through the test endpoint, but is\nnot triggered on a real event.", + "required": false + } + } + }, + "ml.CreateRun": { + "name": "CreateRun", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "ID of the associated experiment.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "The name of the run.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "Unix timestamp in milliseconds of when the run started.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]RunTag", + "description": "Additional metadata for run.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "ID of the user executing the run. This field is deprecated as of MLflow\n1.0, and will be removed in a future MLflow release. Use 'mlflow.user'\ntag instead.", + "required": false + } + } + }, + "ml.CreateRunResponse": { + "name": "CreateRunResponse", + "package": "ml", + "description": "", + "fields": { + "run": { + "name": "run", + "type": "*Run", + "description": "The newly created run.", + "required": false + } + } + }, + "ml.CreateTransitionRequest": { + "name": "CreateTransitionRequest", + "package": "ml", + "description": "Details required to create a model version stage transition request.", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "stage": { + "name": "stage", + "type": "string", + "description": "Target stage of the transition. Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.CreateTransitionRequestResponse": { + "name": "CreateTransitionRequestResponse", + "package": "ml", + "description": "", + "fields": { + "request": { + "name": "request", + "type": "*TransitionRequest", + "description": "New activity generated for stage transition request.", + "required": false + } + } + }, + "ml.CreateWebhookResponse": { + "name": "CreateWebhookResponse", + "package": "ml", + "description": "", + "fields": { + "webhook": { + "name": "webhook", + "type": "*RegistryWebhook", + "description": "", + "required": false + } + } + }, + "ml.DataSource": { + "name": "DataSource", + "package": "ml", + "description": "", + "fields": { + "delta_table_source": { + "name": "delta_table_source", + "type": "*DeltaTableSource", + "description": "", + "required": false + }, + "kafka_source": { + "name": "kafka_source", + "type": "*KafkaSource", + "description": "", + "required": false + } + } + }, + "ml.Dataset": { + "name": "Dataset", + "package": "ml", + "description": "Dataset. Represents a reference to data used for training, testing, or\nevaluation during the model development process.", + "fields": { + "digest": { + "name": "digest", + "type": "string", + "description": "Dataset digest, e.g. an md5 hash of the dataset that uniquely identifies\nit within datasets of the same name.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the dataset. E.g. “my.uc.table@2” “nyc-taxi-dataset”,\n“fantastic-elk-3”", + "required": false + }, + "profile": { + "name": "profile", + "type": "string", + "description": "The profile of the dataset. Summary statistics for the dataset, such as\nthe number of rows in a table, the mean / std / mode of each column in a\ntable, or the number of elements in an array.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The schema of the dataset. E.g., MLflow ColSpec JSON for a dataframe,\nMLflow TensorSpec JSON for an ndarray, or another schema format.", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "Source information for the dataset. Note that the source may not exactly\nreproduce the dataset if it was transformed / modified before use with\nMLflow.", + "required": false + }, + "source_type": { + "name": "source_type", + "type": "string", + "description": "The type of the dataset source, e.g. ‘databricks-uc-table’,\n‘DBFS’, ‘S3’, ...", + "required": false + } + } + }, + "ml.DatasetInput": { + "name": "DatasetInput", + "package": "ml", + "description": "DatasetInput. Represents a dataset and input tags.", + "fields": { + "dataset": { + "name": "dataset", + "type": "Dataset", + "description": "The dataset being used as a Run input.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]InputTag", + "description": "A list of tags for the dataset input, e.g. a “context” tag with value\n“training”", + "required": false + } + } + }, + "ml.DeleteExperiment": { + "name": "DeleteExperiment", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "ID of the associated experiment.", + "required": false + } + } + }, + "ml.DeleteRun": { + "name": "DeleteRun", + "package": "ml", + "description": "", + "fields": { + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to delete.", + "required": false + } + } + }, + "ml.DeleteRuns": { + "name": "DeleteRuns", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The ID of the experiment containing the runs to delete.", + "required": false + }, + "max_runs": { + "name": "max_runs", + "type": "int", + "description": "An optional positive integer indicating the maximum number of runs to\ndelete. The maximum allowed value for max_runs is 10000.", + "required": false + }, + "max_timestamp_millis": { + "name": "max_timestamp_millis", + "type": "int64", + "description": "The maximum creation timestamp in milliseconds since the UNIX epoch for\ndeleting runs. Only runs created prior to or at this timestamp are\ndeleted.", + "required": false + } + } + }, + "ml.DeleteRunsResponse": { + "name": "DeleteRunsResponse", + "package": "ml", + "description": "", + "fields": { + "runs_deleted": { + "name": "runs_deleted", + "type": "int", + "description": "The number of runs deleted.", + "required": false + } + } + }, + "ml.DeleteTag": { + "name": "DeleteTag", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the tag. Maximum size is 255 bytes. Must be provided.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run that the tag was logged under. Must be provided.", + "required": false + } + } + }, + "ml.DeleteTransitionRequestResponse": { + "name": "DeleteTransitionRequestResponse", + "package": "ml", + "description": "", + "fields": { + "activity": { + "name": "activity", + "type": "*Activity", + "description": "New activity generated as a result of this operation.", + "required": false + } + } + }, + "ml.DeltaTableSource": { + "name": "DeltaTableSource", + "package": "ml", + "description": "", + "fields": { + "entity_columns": { + "name": "entity_columns", + "type": "[]string", + "description": "The entity columns of the Delta table.", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full three-part (catalog, schema, table) name of the Delta table.", + "required": false + }, + "timeseries_column": { + "name": "timeseries_column", + "type": "string", + "description": "The timeseries column of the Delta table.", + "required": false + } + } + }, + "ml.Experiment": { + "name": "Experiment", + "package": "ml", + "description": "An experiment and its metadata.", + "fields": { + "artifact_location": { + "name": "artifact_location", + "type": "string", + "description": "Location where artifacts for the experiment are stored.", + "required": false + }, + "creation_time": { + "name": "creation_time", + "type": "int64", + "description": "Creation time", + "required": false + }, + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "Unique identifier for the experiment.", + "required": false + }, + "last_update_time": { + "name": "last_update_time", + "type": "int64", + "description": "Last update time", + "required": false + }, + "lifecycle_stage": { + "name": "lifecycle_stage", + "type": "string", + "description": "Current life cycle stage of the experiment: \"active\" or \"deleted\".\nDeleted experiments are not returned by APIs.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Human readable name that identifies the experiment.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ExperimentTag", + "description": "Tags: Additional metadata key-value pairs.", + "required": false + } + } + }, + "ml.ExperimentAccessControlRequest": { + "name": "ExperimentAccessControlRequest", + "package": "ml", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ExperimentPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "ml.ExperimentAccessControlResponse": { + "name": "ExperimentAccessControlResponse", + "package": "ml", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]ExperimentPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "ml.ExperimentPermission": { + "name": "ExperimentPermission", + "package": "ml", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ExperimentPermissionLevel", + "description": "", + "required": false + } + } + }, + "ml.ExperimentPermissions": { + "name": "ExperimentPermissions", + "package": "ml", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ExperimentAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "ml.ExperimentPermissionsDescription": { + "name": "ExperimentPermissionsDescription", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ExperimentPermissionLevel", + "description": "", + "required": false + } + } + }, + "ml.ExperimentPermissionsRequest": { + "name": "ExperimentPermissionsRequest", + "package": "ml", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ExperimentAccessControlRequest", + "description": "", + "required": false + } + } + }, + "ml.ExperimentTag": { + "name": "ExperimentTag", + "package": "ml", + "description": "A tag for an experiment.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.Feature": { + "name": "Feature", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "The description of the feature.", + "required": false + }, + "filter_condition": { + "name": "filter_condition", + "type": "string", + "description": "The filter condition applied to the source data before aggregation.", + "required": false + }, + "full_name": { + "name": "full_name", + "type": "string", + "description": "The full three-part name (catalog, schema, name) of the feature.", + "required": false + }, + "function": { + "name": "function", + "type": "Function", + "description": "The function by which the feature is computed.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "[]string", + "description": "The input columns from which the feature is computed.", + "required": false + }, + "lineage_context": { + "name": "lineage_context", + "type": "*LineageContext", + "description": "WARNING: This field is primarily intended for internal use by Databricks\nsystems and is automatically populated when features are created through\nDatabricks notebooks or jobs. Users should not manually set this field as\nincorrect values may lead to inaccurate lineage tracking or unexpected\nbehavior. This field will be set by feature-engineering client and should\nbe left unset by SDK and terraform users.", + "required": false + }, + "source": { + "name": "source", + "type": "DataSource", + "description": "The data source of the feature.", + "required": false + }, + "time_window": { + "name": "time_window", + "type": "*TimeWindow", + "description": "The time window in which the feature is computed.", + "required": false + } + } + }, + "ml.FeatureLineage": { + "name": "FeatureLineage", + "package": "ml", + "description": "", + "fields": { + "feature_specs": { + "name": "feature_specs", + "type": "[]FeatureLineageFeatureSpec", + "description": "List of feature specs that contain this feature.", + "required": false + }, + "models": { + "name": "models", + "type": "[]FeatureLineageModel", + "description": "List of Unity Catalog models that were trained on this feature.", + "required": false + }, + "online_features": { + "name": "online_features", + "type": "[]FeatureLineageOnlineFeature", + "description": "List of online features that use this feature as source.", + "required": false + } + } + }, + "ml.FeatureLineageFeatureSpec": { + "name": "FeatureLineageFeatureSpec", + "package": "ml", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The full name of the feature spec in Unity Catalog.", + "required": false + } + } + }, + "ml.FeatureLineageModel": { + "name": "FeatureLineageModel", + "package": "ml", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The full name of the model in Unity Catalog.", + "required": false + }, + "version": { + "name": "version", + "type": "int64", + "description": "The version of the model.", + "required": false + } + } + }, + "ml.FeatureLineageOnlineFeature": { + "name": "FeatureLineageOnlineFeature", + "package": "ml", + "description": "", + "fields": { + "feature_name": { + "name": "feature_name", + "type": "string", + "description": "The name of the online feature (column name).", + "required": false + }, + "table_name": { + "name": "table_name", + "type": "string", + "description": "The full name of the online table in Unity Catalog.", + "required": false + } + } + }, + "ml.FeatureList": { + "name": "FeatureList", + "package": "ml", + "description": "Feature list wrap all the features for a model version", + "fields": { + "features": { + "name": "features", + "type": "[]LinkedFeature", + "description": "", + "required": false + } + } + }, + "ml.FeatureTag": { + "name": "FeatureTag", + "package": "ml", + "description": "Represents a tag on a feature in a feature table.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "ml.FileInfo": { + "name": "FileInfo", + "package": "ml", + "description": "Metadata of a single artifact file or directory.", + "fields": { + "file_size": { + "name": "file_size", + "type": "int64", + "description": "The size in bytes of the file. Unset for directories.", + "required": false + }, + "is_dir": { + "name": "is_dir", + "type": "bool", + "description": "Whether the path is a directory.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The path relative to the root artifact directory run.", + "required": false + } + } + }, + "ml.FinalizeLoggedModelRequest": { + "name": "FinalizeLoggedModelRequest", + "package": "ml", + "description": "", + "fields": { + "status": { + "name": "status", + "type": "LoggedModelStatus", + "description": "Whether or not the model is ready for use.\n``\"LOGGED_MODEL_UPLOAD_FAILED\"`` indicates that something went wrong when\nlogging the model weights / agent code.", + "required": false + } + } + }, + "ml.FinalizeLoggedModelResponse": { + "name": "FinalizeLoggedModelResponse", + "package": "ml", + "description": "", + "fields": { + "model": { + "name": "model", + "type": "*LoggedModel", + "description": "The updated logged model.", + "required": false + } + } + }, + "ml.ForecastingExperiment": { + "name": "ForecastingExperiment", + "package": "ml", + "description": "Represents a forecasting experiment with its unique identifier, URL, and\nstate.", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The unique ID for the forecasting experiment.", + "required": false + }, + "experiment_page_url": { + "name": "experiment_page_url", + "type": "string", + "description": "The URL to the forecasting experiment page.", + "required": false + }, + "state": { + "name": "state", + "type": "ForecastingExperimentState", + "description": "The current state of the forecasting experiment.", + "required": false + } + } + }, + "ml.Function": { + "name": "Function", + "package": "ml", + "description": "", + "fields": { + "extra_parameters": { + "name": "extra_parameters", + "type": "[]FunctionExtraParameter", + "description": "Extra parameters for parameterized functions.", + "required": false + }, + "function_type": { + "name": "function_type", + "type": "FunctionFunctionType", + "description": "The type of the function.", + "required": false + } + } + }, + "ml.FunctionExtraParameter": { + "name": "FunctionExtraParameter", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The name of the parameter.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value of the parameter.", + "required": false + } + } + }, + "ml.GetExperimentByNameResponse": { + "name": "GetExperimentByNameResponse", + "package": "ml", + "description": "", + "fields": { + "experiment": { + "name": "experiment", + "type": "*Experiment", + "description": "Experiment details.", + "required": false + } + } + }, + "ml.GetExperimentPermissionLevelsResponse": { + "name": "GetExperimentPermissionLevelsResponse", + "package": "ml", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]ExperimentPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "ml.GetExperimentResponse": { + "name": "GetExperimentResponse", + "package": "ml", + "description": "", + "fields": { + "experiment": { + "name": "experiment", + "type": "*Experiment", + "description": "Experiment details.", + "required": false + } + } + }, + "ml.GetLatestVersionsRequest": { + "name": "GetLatestVersionsRequest", + "package": "ml", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Registered model unique name identifier.", + "required": false + }, + "stages": { + "name": "stages", + "type": "[]string", + "description": "List of stages.", + "required": false + } + } + }, + "ml.GetLatestVersionsResponse": { + "name": "GetLatestVersionsResponse", + "package": "ml", + "description": "", + "fields": { + "model_versions": { + "name": "model_versions", + "type": "[]ModelVersion", + "description": "Latest version models for each requests stage. Only return models with\ncurrent `READY` status. If no `stages` provided, returns the latest\nversion for each stage, including `\"None\"`.", + "required": false + } + } + }, + "ml.GetLoggedModelResponse": { + "name": "GetLoggedModelResponse", + "package": "ml", + "description": "", + "fields": { + "model": { + "name": "model", + "type": "*LoggedModel", + "description": "The retrieved logged model.", + "required": false + } + } + }, + "ml.GetMetricHistoryResponse": { + "name": "GetMetricHistoryResponse", + "package": "ml", + "description": "", + "fields": { + "metrics": { + "name": "metrics", + "type": "[]Metric", + "description": "All logged values for this metric if `max_results` is not specified in\nthe request or if the total count of metrics returned is less than the\nservice level pagination threshold. Otherwise, this is one page of\nresults.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to issue a query for the next page of metric\nhistory values. A missing token indicates that no additional metrics are\navailable to fetch.", + "required": false + } + } + }, + "ml.GetModelResponse": { + "name": "GetModelResponse", + "package": "ml", + "description": "", + "fields": { + "registered_model_databricks": { + "name": "registered_model_databricks", + "type": "*ModelDatabricks", + "description": "", + "required": false + } + } + }, + "ml.GetModelVersionDownloadUriResponse": { + "name": "GetModelVersionDownloadUriResponse", + "package": "ml", + "description": "", + "fields": { + "artifact_uri": { + "name": "artifact_uri", + "type": "string", + "description": "URI corresponding to where artifacts for this model version are stored.", + "required": false + } + } + }, + "ml.GetModelVersionResponse": { + "name": "GetModelVersionResponse", + "package": "ml", + "description": "", + "fields": { + "model_version": { + "name": "model_version", + "type": "*ModelVersion", + "description": "", + "required": false + } + } + }, + "ml.GetRegisteredModelPermissionLevelsResponse": { + "name": "GetRegisteredModelPermissionLevelsResponse", + "package": "ml", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]RegisteredModelPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "ml.GetRunResponse": { + "name": "GetRunResponse", + "package": "ml", + "description": "", + "fields": { + "run": { + "name": "run", + "type": "*Run", + "description": "Run metadata (name, start time, etc) and data (metrics, params, and\ntags).", + "required": false + } + } + }, + "ml.HttpUrlSpec": { + "name": "HttpUrlSpec", + "package": "ml", + "description": "", + "fields": { + "authorization": { + "name": "authorization", + "type": "string", + "description": "Value of the authorization header that should be sent in the request sent\nby the wehbook. It should be of the form `\"\u003cauth type\u003e \u003ccredentials\u003e\"`.\nIf set to an empty string, no authorization header will be included in\nthe request.", + "required": false + }, + "enable_ssl_verification": { + "name": "enable_ssl_verification", + "type": "bool", + "description": "Enable/disable SSL certificate validation. Default is true. For\nself-signed certificates, this field must be false AND the destination\nserver must disable certificate validation as well. For security\npurposes, it is encouraged to perform secret validation with the\nHMAC-encoded portion of the payload and acknowledge the risk associated\nwith disabling hostname validation whereby it becomes more likely that\nrequests can be maliciously routed to an unintended host.", + "required": false + }, + "secret": { + "name": "secret", + "type": "string", + "description": "Shared secret required for HMAC encoding payload. The HMAC-encoded\npayload will be sent in the header as: { \"X-Databricks-Signature\":\n$encoded_payload }.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "External HTTPS URL called on event trigger (by using a POST request).", + "required": false + } + } + }, + "ml.HttpUrlSpecWithoutSecret": { + "name": "HttpUrlSpecWithoutSecret", + "package": "ml", + "description": "", + "fields": { + "enable_ssl_verification": { + "name": "enable_ssl_verification", + "type": "bool", + "description": "Enable/disable SSL certificate validation. Default is true. For\nself-signed certificates, this field must be false AND the destination\nserver must disable certificate validation as well. For security\npurposes, it is encouraged to perform secret validation with the\nHMAC-encoded portion of the payload and acknowledge the risk associated\nwith disabling hostname validation whereby it becomes more likely that\nrequests can be maliciously routed to an unintended host.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "External HTTPS URL called on event trigger (by using a POST request).", + "required": false + } + } + }, + "ml.InputTag": { + "name": "InputTag", + "package": "ml", + "description": "Tag for a dataset input.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.JobContext": { + "name": "JobContext", + "package": "ml", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "int64", + "description": "The job ID where this API invoked.", + "required": false + }, + "job_run_id": { + "name": "job_run_id", + "type": "int64", + "description": "The job run ID where this API was invoked.", + "required": false + } + } + }, + "ml.JobSpec": { + "name": "JobSpec", + "package": "ml", + "description": "", + "fields": { + "access_token": { + "name": "access_token", + "type": "string", + "description": "The personal access token used to authorize webhook's job runs.", + "required": false + }, + "job_id": { + "name": "job_id", + "type": "string", + "description": "ID of the job that the webhook runs.", + "required": false + }, + "workspace_url": { + "name": "workspace_url", + "type": "string", + "description": "URL of the workspace containing the job that this webhook runs. If not\nspecified, the job’s workspace URL is assumed to be the same as the\nworkspace where the webhook is created.", + "required": false + } + } + }, + "ml.JobSpecWithoutSecret": { + "name": "JobSpecWithoutSecret", + "package": "ml", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "string", + "description": "ID of the job that the webhook runs.", + "required": false + }, + "workspace_url": { + "name": "workspace_url", + "type": "string", + "description": "URL of the workspace containing the job that this webhook runs. If not\nspecified, the job’s workspace URL is assumed to be the same as the\nworkspace where the webhook is created.", + "required": false + } + } + }, + "ml.KafkaConfig": { + "name": "KafkaConfig", + "package": "ml", + "description": "", + "fields": { + "auth_config": { + "name": "auth_config", + "type": "AuthConfig", + "description": "Authentication configuration for connection to topics.", + "required": false + }, + "bootstrap_servers": { + "name": "bootstrap_servers", + "type": "string", + "description": "A comma-separated list of host/port pairs pointing to Kafka cluster.", + "required": false + }, + "extra_options": { + "name": "extra_options", + "type": "map[string]string", + "description": "Catch-all for miscellaneous options. Keys should be source options or\nKafka consumer options (kafka.*)", + "required": false + }, + "key_schema": { + "name": "key_schema", + "type": "*SchemaConfig", + "description": "Schema configuration for extracting message keys from topics. At least\none of key_schema and value_schema must be provided.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name that uniquely identifies this Kafka config within the metastore.\nThis will be the identifier used from the Feature object to reference\nthese configs for a feature. Can be distinct from topic name.", + "required": false + }, + "subscription_mode": { + "name": "subscription_mode", + "type": "SubscriptionMode", + "description": "Options to configure which Kafka topics to pull data from.", + "required": false + }, + "value_schema": { + "name": "value_schema", + "type": "*SchemaConfig", + "description": "Schema configuration for extracting message values from topics. At least\none of key_schema and value_schema must be provided.", + "required": false + } + } + }, + "ml.KafkaSource": { + "name": "KafkaSource", + "package": "ml", + "description": "", + "fields": { + "entity_column_identifiers": { + "name": "entity_column_identifiers", + "type": "[]ColumnIdentifier", + "description": "The entity column identifiers of the Kafka source.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the Kafka source, used to identify it. This is used to look up\nthe corresponding KafkaConfig object. Can be distinct from topic name.", + "required": false + }, + "timeseries_column_identifier": { + "name": "timeseries_column_identifier", + "type": "ColumnIdentifier", + "description": "The timeseries column identifier of the Kafka source.", + "required": false + } + } + }, + "ml.LineageContext": { + "name": "LineageContext", + "package": "ml", + "description": "Lineage context information for tracking where an API was invoked. This will\nallow us to track lineage, which currently uses caller entity information for\nuse across the Lineage Client and Observability in Lumberjack.", + "fields": { + "job_context": { + "name": "job_context", + "type": "*JobContext", + "description": "Job context information including job ID and run ID.", + "required": false + }, + "notebook_id": { + "name": "notebook_id", + "type": "int64", + "description": "The notebook ID where this API was invoked.", + "required": false + } + } + }, + "ml.LinkedFeature": { + "name": "LinkedFeature", + "package": "ml", + "description": "Feature for model version. ([ML-57150] Renamed from Feature to LinkedFeature)", + "fields": { + "feature_name": { + "name": "feature_name", + "type": "string", + "description": "Feature name", + "required": false + }, + "feature_table_id": { + "name": "feature_table_id", + "type": "string", + "description": "Feature table id", + "required": false + }, + "feature_table_name": { + "name": "feature_table_name", + "type": "string", + "description": "Feature table name", + "required": false + } + } + }, + "ml.ListArtifactsResponse": { + "name": "ListArtifactsResponse", + "package": "ml", + "description": "", + "fields": { + "files": { + "name": "files", + "type": "[]FileInfo", + "description": "The file location and metadata for artifacts.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "The token that can be used to retrieve the next page of artifact results.", + "required": false + }, + "root_uri": { + "name": "root_uri", + "type": "string", + "description": "The root artifact directory for the run.", + "required": false + } + } + }, + "ml.ListExperimentsResponse": { + "name": "ListExperimentsResponse", + "package": "ml", + "description": "", + "fields": { + "experiments": { + "name": "experiments", + "type": "[]Experiment", + "description": "Paginated Experiments beginning with the first item on the requested\npage.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Token that can be used to retrieve the next page of experiments. Empty\ntoken means no more experiment is available for retrieval.", + "required": false + } + } + }, + "ml.ListFeatureTagsResponse": { + "name": "ListFeatureTagsResponse", + "package": "ml", + "description": "Response message for ListFeatureTag.", + "fields": { + "feature_tags": { + "name": "feature_tags", + "type": "[]FeatureTag", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of results for this query.", + "required": false + } + } + }, + "ml.ListFeaturesResponse": { + "name": "ListFeaturesResponse", + "package": "ml", + "description": "", + "fields": { + "features": { + "name": "features", + "type": "[]Feature", + "description": "List of features.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of results for this query.", + "required": false + } + } + }, + "ml.ListKafkaConfigsResponse": { + "name": "ListKafkaConfigsResponse", + "package": "ml", + "description": "", + "fields": { + "kafka_configs": { + "name": "kafka_configs", + "type": "[]KafkaConfig", + "description": "List of Kafka configs. Schemas are not included in the response.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of results for this query.", + "required": false + } + } + }, + "ml.ListMaterializedFeaturesResponse": { + "name": "ListMaterializedFeaturesResponse", + "package": "ml", + "description": "", + "fields": { + "materialized_features": { + "name": "materialized_features", + "type": "[]MaterializedFeature", + "description": "List of materialized features.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of results for this query.", + "required": false + } + } + }, + "ml.ListModelsResponse": { + "name": "ListModelsResponse", + "package": "ml", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request next page of models for the same query.", + "required": false + }, + "registered_models": { + "name": "registered_models", + "type": "[]Model", + "description": "", + "required": false + } + } + }, + "ml.ListOnlineStoresResponse": { + "name": "ListOnlineStoresResponse", + "package": "ml", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of results for this query.", + "required": false + }, + "online_stores": { + "name": "online_stores", + "type": "[]OnlineStore", + "description": "List of online stores.", + "required": false + } + } + }, + "ml.ListRegistryWebhooks": { + "name": "ListRegistryWebhooks", + "package": "ml", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Token that can be used to retrieve the next page of artifact results", + "required": false + }, + "webhooks": { + "name": "webhooks", + "type": "[]RegistryWebhook", + "description": "Array of registry webhooks.", + "required": false + } + } + }, + "ml.ListTransitionRequestsResponse": { + "name": "ListTransitionRequestsResponse", + "package": "ml", + "description": "", + "fields": { + "requests": { + "name": "requests", + "type": "[]Activity", + "description": "Array of open transition requests.", + "required": false + } + } + }, + "ml.LogBatch": { + "name": "LogBatch", + "package": "ml", + "description": "", + "fields": { + "metrics": { + "name": "metrics", + "type": "[]Metric", + "description": "Metrics to log. A single request can contain up to 1000 metrics, and up\nto 1000 metrics, params, and tags in total.", + "required": false + }, + "params": { + "name": "params", + "type": "[]Param", + "description": "Params to log. A single request can contain up to 100 params, and up to\n1000 metrics, params, and tags in total.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to log under", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]RunTag", + "description": "Tags to log. A single request can contain up to 100 tags, and up to 1000\nmetrics, params, and tags in total.", + "required": false + } + } + }, + "ml.LogInputs": { + "name": "LogInputs", + "package": "ml", + "description": "", + "fields": { + "datasets": { + "name": "datasets", + "type": "[]DatasetInput", + "description": "Dataset inputs", + "required": false + }, + "models": { + "name": "models", + "type": "[]ModelInput", + "description": "Model inputs", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to log under", + "required": false + } + } + }, + "ml.LogLoggedModelParamsRequest": { + "name": "LogLoggedModelParamsRequest", + "package": "ml", + "description": "", + "fields": { + "params": { + "name": "params", + "type": "[]LoggedModelParameter", + "description": "Parameters to attach to the model.", + "required": false + } + } + }, + "ml.LogMetric": { + "name": "LogMetric", + "package": "ml", + "description": "", + "fields": { + "dataset_digest": { + "name": "dataset_digest", + "type": "string", + "description": "Dataset digest of the dataset associated with the metric, e.g. an md5\nhash of the dataset that uniquely identifies it within datasets of the\nsame name.", + "required": false + }, + "dataset_name": { + "name": "dataset_name", + "type": "string", + "description": "The name of the dataset associated with the metric. E.g.\n“my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3”", + "required": false + }, + "key": { + "name": "key", + "type": "string", + "description": "Name of the metric.", + "required": false + }, + "model_id": { + "name": "model_id", + "type": "string", + "description": "ID of the logged model associated with the metric, if applicable", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run under which to log the metric. Must be provided.", + "required": false + }, + "run_uuid": { + "name": "run_uuid", + "type": "string", + "description": "[Deprecated, use `run_id` instead] ID of the run under which to log the\nmetric. This field will be removed in a future MLflow version.", + "required": false + }, + "step": { + "name": "step", + "type": "int64", + "description": "Step at which to log the metric", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "int64", + "description": "Unix timestamp in milliseconds at the time metric was logged.", + "required": false + }, + "value": { + "name": "value", + "type": "float64", + "description": "Double value of the metric being logged.", + "required": false + } + } + }, + "ml.LogModel": { + "name": "LogModel", + "package": "ml", + "description": "", + "fields": { + "model_json": { + "name": "model_json", + "type": "string", + "description": "MLmodel file in json format.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to log under", + "required": false + } + } + }, + "ml.LogOutputsRequest": { + "name": "LogOutputsRequest", + "package": "ml", + "description": "", + "fields": { + "models": { + "name": "models", + "type": "[]ModelOutput", + "description": "The model outputs from the Run.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "The ID of the Run from which to log outputs.", + "required": false + } + } + }, + "ml.LogParam": { + "name": "LogParam", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the param. Maximum size is 255 bytes.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run under which to log the param. Must be provided.", + "required": false + }, + "run_uuid": { + "name": "run_uuid", + "type": "string", + "description": "[Deprecated, use `run_id` instead] ID of the run under which to log the\nparam. This field will be removed in a future MLflow version.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "String value of the param being logged. Maximum size is 500 bytes.", + "required": false + } + } + }, + "ml.LoggedModel": { + "name": "LoggedModel", + "package": "ml", + "description": "A logged model message includes logged model attributes, tags, registration\ninfo, params, and linked run metrics.", + "fields": { + "data": { + "name": "data", + "type": "*LoggedModelData", + "description": "The params and metrics attached to the logged model.", + "required": false + }, + "info": { + "name": "info", + "type": "*LoggedModelInfo", + "description": "The logged model attributes such as model ID, status, tags, etc.", + "required": false + } + } + }, + "ml.LoggedModelData": { + "name": "LoggedModelData", + "package": "ml", + "description": "A LoggedModelData message includes logged model params and linked metrics.", + "fields": { + "metrics": { + "name": "metrics", + "type": "[]Metric", + "description": "Performance metrics linked to the model.", + "required": false + }, + "params": { + "name": "params", + "type": "[]LoggedModelParameter", + "description": "Immutable string key-value pairs of the model.", + "required": false + } + } + }, + "ml.LoggedModelInfo": { + "name": "LoggedModelInfo", + "package": "ml", + "description": "A LoggedModelInfo includes logged model attributes, tags, and registration\ninfo.", + "fields": { + "artifact_uri": { + "name": "artifact_uri", + "type": "string", + "description": "The URI of the directory where model artifacts are stored.", + "required": false + }, + "creation_timestamp_ms": { + "name": "creation_timestamp_ms", + "type": "int64", + "description": "The timestamp when the model was created in milliseconds since the UNIX\nepoch.", + "required": false + }, + "creator_id": { + "name": "creator_id", + "type": "int64", + "description": "The ID of the user or principal that created the model.", + "required": false + }, + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The ID of the experiment that owns the model.", + "required": false + }, + "last_updated_timestamp_ms": { + "name": "last_updated_timestamp_ms", + "type": "int64", + "description": "The timestamp when the model was last updated in milliseconds since the\nUNIX epoch.", + "required": false + }, + "model_id": { + "name": "model_id", + "type": "string", + "description": "The unique identifier for the logged model.", + "required": false + }, + "model_type": { + "name": "model_type", + "type": "string", + "description": "The type of model, such as ``\"Agent\"``, ``\"Classifier\"``, ``\"LLM\"``.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the model.", + "required": false + }, + "source_run_id": { + "name": "source_run_id", + "type": "string", + "description": "The ID of the run that created the model.", + "required": false + }, + "status": { + "name": "status", + "type": "LoggedModelStatus", + "description": "The status of whether or not the model is ready for use.", + "required": false + }, + "status_message": { + "name": "status_message", + "type": "string", + "description": "Details on the current model status.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]LoggedModelTag", + "description": "Mutable string key-value pairs set on the model.", + "required": false + } + } + }, + "ml.LoggedModelParameter": { + "name": "LoggedModelParameter", + "package": "ml", + "description": "Parameter associated with a LoggedModel.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The key identifying this param.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value of this param.", + "required": false + } + } + }, + "ml.LoggedModelTag": { + "name": "LoggedModelTag", + "package": "ml", + "description": "Tag for a LoggedModel.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.MaterializedFeature": { + "name": "MaterializedFeature", + "package": "ml", + "description": "A materialized feature represents a feature that is continuously computed and\nstored.", + "fields": { + "cron_schedule": { + "name": "cron_schedule", + "type": "string", + "description": "The quartz cron expression that defines the schedule of the\nmaterialization pipeline. The schedule is evaluated in the UTC timezone.", + "required": false + }, + "feature_name": { + "name": "feature_name", + "type": "string", + "description": "The full name of the feature in Unity Catalog.", + "required": false + }, + "last_materialization_time": { + "name": "last_materialization_time", + "type": "string", + "description": "The timestamp when the pipeline last ran and updated the materialized\nfeature values. If the pipeline has not run yet, this field will be null.", + "required": false + }, + "materialized_feature_id": { + "name": "materialized_feature_id", + "type": "string", + "description": "Unique identifier for the materialized feature.", + "required": false + }, + "offline_store_config": { + "name": "offline_store_config", + "type": "*OfflineStoreConfig", + "description": "", + "required": false + }, + "online_store_config": { + "name": "online_store_config", + "type": "*OnlineStoreConfig", + "description": "", + "required": false + }, + "pipeline_schedule_state": { + "name": "pipeline_schedule_state", + "type": "MaterializedFeaturePipelineScheduleState", + "description": "The schedule state of the materialization pipeline.", + "required": false + }, + "table_name": { + "name": "table_name", + "type": "string", + "description": "The fully qualified Unity Catalog path to the table containing the\nmaterialized feature (Delta table or Lakebase table). Output only.", + "required": false + } + } + }, + "ml.Metric": { + "name": "Metric", + "package": "ml", + "description": "Metric associated with a run, represented as a key-value pair.", + "fields": { + "dataset_digest": { + "name": "dataset_digest", + "type": "string", + "description": "The dataset digest of the dataset associated with the metric, e.g. an md5\nhash of the dataset that uniquely identifies it within datasets of the\nsame name.", + "required": false + }, + "dataset_name": { + "name": "dataset_name", + "type": "string", + "description": "The name of the dataset associated with the metric. E.g.\n“my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3”", + "required": false + }, + "key": { + "name": "key", + "type": "string", + "description": "The key identifying the metric.", + "required": false + }, + "model_id": { + "name": "model_id", + "type": "string", + "description": "The ID of the logged model or registered model version associated with\nthe metric, if applicable.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "The ID of the run containing the metric.", + "required": false + }, + "step": { + "name": "step", + "type": "int64", + "description": "The step at which the metric was logged.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "int64", + "description": "The timestamp at which the metric was recorded.", + "required": false + }, + "value": { + "name": "value", + "type": "float64", + "description": "The value of the metric.", + "required": false + } + } + }, + "ml.Model": { + "name": "Model", + "package": "ml", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Timestamp recorded when this `registered_model` was created.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of this `registered_model`.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Timestamp recorded when metadata for this `registered_model` was last\nupdated.", + "required": false + }, + "latest_versions": { + "name": "latest_versions", + "type": "[]ModelVersion", + "description": "Collection of latest model versions for each stage. Only contains models\nwith current `READY` status.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Unique name for the model.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelTag", + "description": "Tags: Additional metadata key-value pairs for this `registered_model`.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "User that created this `registered_model`", + "required": false + } + } + }, + "ml.ModelDatabricks": { + "name": "ModelDatabricks", + "package": "ml", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "User-specified description for the object.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for the object.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Last update time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "latest_versions": { + "name": "latest_versions", + "type": "[]ModelVersion", + "description": "Array of model versions, each the latest version for its stage.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "Permission level granted for the requesting user on this registered model", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelTag", + "description": "Array of tags associated with the model.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "The username of the user that created the object.", + "required": false + } + } + }, + "ml.ModelInput": { + "name": "ModelInput", + "package": "ml", + "description": "Represents a LoggedModel or Registered Model Version input to a Run.", + "fields": { + "model_id": { + "name": "model_id", + "type": "string", + "description": "The unique identifier of the model.", + "required": false + } + } + }, + "ml.ModelOutput": { + "name": "ModelOutput", + "package": "ml", + "description": "Represents a LoggedModel output of a Run.", + "fields": { + "model_id": { + "name": "model_id", + "type": "string", + "description": "The unique identifier of the model.", + "required": false + }, + "step": { + "name": "step", + "type": "int64", + "description": "The step at which the model was produced.", + "required": false + } + } + }, + "ml.ModelTag": { + "name": "ModelTag", + "package": "ml", + "description": "Tag for a registered model", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.ModelVersion": { + "name": "ModelVersion", + "package": "ml", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Timestamp recorded when this `model_version` was created.", + "required": false + }, + "current_stage": { + "name": "current_stage", + "type": "string", + "description": "Current stage for this `model_version`.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of this `model_version`.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Timestamp recorded when metadata for this `model_version` was last\nupdated.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Unique name of the model", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "MLflow run ID used when creating `model_version`, if `source` was\ngenerated by an experiment run stored in MLflow tracking server.", + "required": false + }, + "run_link": { + "name": "run_link", + "type": "string", + "description": "Run Link: Direct link to the run that generated this version", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "URI indicating the location of the source model artifacts, used when\ncreating `model_version`", + "required": false + }, + "status": { + "name": "status", + "type": "ModelVersionStatus", + "description": "Current status of `model_version`", + "required": false + }, + "status_message": { + "name": "status_message", + "type": "string", + "description": "Details on current `status`, if it is pending or failed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelVersionTag", + "description": "Tags: Additional metadata key-value pairs for this `model_version`.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "User that created this `model_version`.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Model's version number.", + "required": false + } + } + }, + "ml.ModelVersionDatabricks": { + "name": "ModelVersionDatabricks", + "package": "ml", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "current_stage": { + "name": "current_stage", + "type": "string", + "description": "", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "User-specified description for the object.", + "required": false + }, + "email_subscription_status": { + "name": "email_subscription_status", + "type": "RegistryEmailSubscriptionType", + "description": "Email Subscription Status: This is the subscription status of the user to\nthe model version Users get subscribed by interacting with the model\nversion.", + "required": false + }, + "feature_list": { + "name": "feature_list", + "type": "*FeatureList", + "description": "Feature lineage of `model_version`.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Time of the object at last update, as a Unix timestamp in milliseconds.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "open_requests": { + "name": "open_requests", + "type": "[]Activity", + "description": "Open requests for this `model_versions`. Gap in sequence number is\nintentional and is done in order to match field sequence numbers of\n`ModelVersion` proto message", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "Unique identifier for the MLflow tracking run associated with the source\nmodel artifacts.", + "required": false + }, + "run_link": { + "name": "run_link", + "type": "string", + "description": "URL of the run associated with the model artifacts. This field is set at\nmodel version creation time only for model versions whose source run is\nfrom a tracking server that is different from the registry server.", + "required": false + }, + "source": { + "name": "source", + "type": "string", + "description": "URI that indicates the location of the source model artifacts. This is\nused when creating the model version.", + "required": false + }, + "status": { + "name": "status", + "type": "Status", + "description": "", + "required": false + }, + "status_message": { + "name": "status_message", + "type": "string", + "description": "Details on the current status, for example why registration failed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]ModelVersionTag", + "description": "Array of tags that are associated with the model version.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "The username of the user that created the object.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.ModelVersionTag": { + "name": "ModelVersionTag", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.OfflineStoreConfig": { + "name": "OfflineStoreConfig", + "package": "ml", + "description": "Configuration for offline store destination.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The Unity Catalog catalog name.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The Unity Catalog schema name.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "string", + "description": "Prefix for Unity Catalog table name. The materialized feature will be\nstored in a table with this prefix and a generated postfix.", + "required": false + } + } + }, + "ml.OnlineStore": { + "name": "OnlineStore", + "package": "ml", + "description": "An OnlineStore is a logical database instance that stores and serves features\nonline.", + "fields": { + "capacity": { + "name": "capacity", + "type": "string", + "description": "The capacity of the online store. Valid values are \"CU_1\", \"CU_2\",\n\"CU_4\", \"CU_8\".", + "required": false + }, + "creation_time": { + "name": "creation_time", + "type": "string", + "description": "The timestamp when the online store was created.", + "required": false + }, + "creator": { + "name": "creator", + "type": "string", + "description": "The email of the creator of the online store.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the online store. This is the unique identifier for the\nonline store.", + "required": false + }, + "read_replica_count": { + "name": "read_replica_count", + "type": "int", + "description": "The number of read replicas for the online store. Defaults to 0.", + "required": false + }, + "state": { + "name": "state", + "type": "OnlineStoreState", + "description": "The current state of the online store.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The usage policy applied to the online store to track billing.", + "required": false + } + } + }, + "ml.OnlineStoreConfig": { + "name": "OnlineStoreConfig", + "package": "ml", + "description": "Configuration for online store destination.", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The Unity Catalog catalog name. This name is also used as the Lakebase\nlogical database name.", + "required": false + }, + "online_store_name": { + "name": "online_store_name", + "type": "string", + "description": "The name of the target online store.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The Unity Catalog schema name.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "string", + "description": "Prefix for Unity Catalog table name. The materialized feature will be\nstored in a Lakebase table with this prefix and a generated postfix.", + "required": false + } + } + }, + "ml.Param": { + "name": "Param", + "package": "ml", + "description": "Param associated with a run.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Key identifying this param.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "Value associated with this param.", + "required": false + } + } + }, + "ml.PublishSpec": { + "name": "PublishSpec", + "package": "ml", + "description": "", + "fields": { + "online_store": { + "name": "online_store", + "type": "string", + "description": "The name of the target online store.", + "required": false + }, + "online_table_name": { + "name": "online_table_name", + "type": "string", + "description": "The full three-part (catalog, schema, table) name of the online table.", + "required": false + }, + "publish_mode": { + "name": "publish_mode", + "type": "PublishSpecPublishMode", + "description": "The publish mode of the pipeline that syncs the online table with the\nsource table.", + "required": false + } + } + }, + "ml.PublishTableRequest": { + "name": "PublishTableRequest", + "package": "ml", + "description": "", + "fields": { + "publish_spec": { + "name": "publish_spec", + "type": "PublishSpec", + "description": "The specification for publishing the online table from the source table.", + "required": false + } + } + }, + "ml.PublishTableResponse": { + "name": "PublishTableResponse", + "package": "ml", + "description": "", + "fields": { + "online_table_name": { + "name": "online_table_name", + "type": "string", + "description": "The full three-part (catalog, schema, table) name of the online table.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The ID of the pipeline that syncs the online table with the source table.", + "required": false + } + } + }, + "ml.RegisteredModelAccessControlRequest": { + "name": "RegisteredModelAccessControlRequest", + "package": "ml", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RegisteredModelPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "ml.RegisteredModelAccessControlResponse": { + "name": "RegisteredModelAccessControlResponse", + "package": "ml", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]RegisteredModelPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "ml.RegisteredModelPermission": { + "name": "RegisteredModelPermission", + "package": "ml", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RegisteredModelPermissionLevel", + "description": "", + "required": false + } + } + }, + "ml.RegisteredModelPermissions": { + "name": "RegisteredModelPermissions", + "package": "ml", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]RegisteredModelAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "ml.RegisteredModelPermissionsDescription": { + "name": "RegisteredModelPermissionsDescription", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RegisteredModelPermissionLevel", + "description": "", + "required": false + } + } + }, + "ml.RegisteredModelPermissionsRequest": { + "name": "RegisteredModelPermissionsRequest", + "package": "ml", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]RegisteredModelAccessControlRequest", + "description": "", + "required": false + } + } + }, + "ml.RegistryWebhook": { + "name": "RegistryWebhook", + "package": "ml", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "User-specified description for the webhook.", + "required": false + }, + "events": { + "name": "events", + "type": "[]RegistryWebhookEvent", + "description": "Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A\nnew model version was created for the associated model.\n\n* `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was\nchanged.\n\n* `TRANSITION_REQUEST_CREATED`: A user requested a model version’s\nstage be transitioned.\n\n* `COMMENT_CREATED`: A user wrote a comment on a registered model.\n\n* `REGISTERED_MODEL_CREATED`: A new registered model was created. This\nevent type can only be specified for a registry-wide webh...", + "required": false + }, + "http_url_spec": { + "name": "http_url_spec", + "type": "*HttpUrlSpecWithoutSecret", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Webhook ID", + "required": false + }, + "job_spec": { + "name": "job_spec", + "type": "*JobSpecWithoutSecret", + "description": "", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "Time of the object at last update, as a Unix timestamp in milliseconds.", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "Name of the model whose events would trigger this webhook.", + "required": false + }, + "status": { + "name": "status", + "type": "RegistryWebhookStatus", + "description": "", + "required": false + } + } + }, + "ml.RejectTransitionRequest": { + "name": "RejectTransitionRequest", + "package": "ml", + "description": "Details required to identify and reject a model version stage transition\nrequest.", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "stage": { + "name": "stage", + "type": "string", + "description": "Target stage of the transition. Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.RejectTransitionRequestResponse": { + "name": "RejectTransitionRequestResponse", + "package": "ml", + "description": "", + "fields": { + "activity": { + "name": "activity", + "type": "*Activity", + "description": "New activity generated as a result of this operation.", + "required": false + } + } + }, + "ml.RenameModelRequest": { + "name": "RenameModelRequest", + "package": "ml", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Registered model unique name identifier.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "If provided, updates the name for this `registered_model`.", + "required": false + } + } + }, + "ml.RenameModelResponse": { + "name": "RenameModelResponse", + "package": "ml", + "description": "", + "fields": { + "registered_model": { + "name": "registered_model", + "type": "*Model", + "description": "", + "required": false + } + } + }, + "ml.RestoreExperiment": { + "name": "RestoreExperiment", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "ID of the associated experiment.", + "required": false + } + } + }, + "ml.RestoreRun": { + "name": "RestoreRun", + "package": "ml", + "description": "", + "fields": { + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to restore.", + "required": false + } + } + }, + "ml.RestoreRuns": { + "name": "RestoreRuns", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The ID of the experiment containing the runs to restore.", + "required": false + }, + "max_runs": { + "name": "max_runs", + "type": "int", + "description": "An optional positive integer indicating the maximum number of runs to\nrestore. The maximum allowed value for max_runs is 10000.", + "required": false + }, + "min_timestamp_millis": { + "name": "min_timestamp_millis", + "type": "int64", + "description": "The minimum deletion timestamp in milliseconds since the UNIX epoch for\nrestoring runs. Only runs deleted no earlier than this timestamp are\nrestored.", + "required": false + } + } + }, + "ml.RestoreRunsResponse": { + "name": "RestoreRunsResponse", + "package": "ml", + "description": "", + "fields": { + "runs_restored": { + "name": "runs_restored", + "type": "int", + "description": "The number of runs restored.", + "required": false + } + } + }, + "ml.Run": { + "name": "Run", + "package": "ml", + "description": "A single run.", + "fields": { + "data": { + "name": "data", + "type": "*RunData", + "description": "Run data.", + "required": false + }, + "info": { + "name": "info", + "type": "*RunInfo", + "description": "Run metadata.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "*RunInputs", + "description": "Run inputs.", + "required": false + } + } + }, + "ml.RunData": { + "name": "RunData", + "package": "ml", + "description": "Run data (metrics, params, and tags).", + "fields": { + "metrics": { + "name": "metrics", + "type": "[]Metric", + "description": "Run metrics.", + "required": false + }, + "params": { + "name": "params", + "type": "[]Param", + "description": "Run parameters.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]RunTag", + "description": "Additional metadata key-value pairs.", + "required": false + } + } + }, + "ml.RunInfo": { + "name": "RunInfo", + "package": "ml", + "description": "Metadata of a single run.", + "fields": { + "artifact_uri": { + "name": "artifact_uri", + "type": "string", + "description": "URI of the directory where artifacts should be uploaded. This can be a\nlocal path (starting with \"/\"), or a distributed file system (DFS) path,\nlike ``s3://bucket/directory`` or ``dbfs:/my/directory``. If not set, the\nlocal ``./mlruns`` directory is chosen.", + "required": false + }, + "end_time": { + "name": "end_time", + "type": "int64", + "description": "Unix timestamp of when the run ended in milliseconds.", + "required": false + }, + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "The experiment ID.", + "required": false + }, + "lifecycle_stage": { + "name": "lifecycle_stage", + "type": "string", + "description": "Current life cycle stage of the experiment : OneOf(\"active\", \"deleted\")", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "Unique identifier for the run.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "The name of the run.", + "required": false + }, + "run_uuid": { + "name": "run_uuid", + "type": "string", + "description": "[Deprecated, use run_id instead] Unique identifier for the run. This\nfield will be removed in a future MLflow version.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "Unix timestamp of when the run started in milliseconds.", + "required": false + }, + "status": { + "name": "status", + "type": "RunInfoStatus", + "description": "Current status of the run.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "User who initiated the run. This field is deprecated as of MLflow 1.0,\nand will be removed in a future MLflow release. Use 'mlflow.user' tag\ninstead.", + "required": false + } + } + }, + "ml.RunInputs": { + "name": "RunInputs", + "package": "ml", + "description": "Run inputs.", + "fields": { + "dataset_inputs": { + "name": "dataset_inputs", + "type": "[]DatasetInput", + "description": "Run metrics.", + "required": false + }, + "model_inputs": { + "name": "model_inputs", + "type": "[]ModelInput", + "description": "Model inputs to the Run.", + "required": false + } + } + }, + "ml.RunTag": { + "name": "RunTag", + "package": "ml", + "description": "Tag for a run.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The tag key.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The tag value.", + "required": false + } + } + }, + "ml.SchemaConfig": { + "name": "SchemaConfig", + "package": "ml", + "description": "", + "fields": { + "json_schema": { + "name": "json_schema", + "type": "string", + "description": "Schema of the JSON object in standard IETF JSON schema format\n(https://json-schema.org/)", + "required": false + } + } + }, + "ml.SearchExperiments": { + "name": "SearchExperiments", + "package": "ml", + "description": "", + "fields": { + "filter": { + "name": "filter", + "type": "string", + "description": "String representing a SQL filter condition (e.g. \"name ILIKE\n'my-experiment%'\")", + "required": false + }, + "max_results": { + "name": "max_results", + "type": "int64", + "description": "Maximum number of experiments desired. Max threshold is 3000.", + "required": false + }, + "order_by": { + "name": "order_by", + "type": "[]string", + "description": "List of columns for ordering search results, which can include experiment\nname and last updated timestamp with an optional \"DESC\" or \"ASC\"\nannotation, where \"ASC\" is the default. Tiebreaks are done by experiment\nid DESC.", + "required": false + }, + "page_token": { + "name": "page_token", + "type": "string", + "description": "Token indicating the page of experiments to fetch", + "required": false + }, + "view_type": { + "name": "view_type", + "type": "ViewType", + "description": "Qualifier for type of experiments to be returned. If unspecified, return\nonly active experiments.", + "required": false + } + } + }, + "ml.SearchExperimentsResponse": { + "name": "SearchExperimentsResponse", + "package": "ml", + "description": "", + "fields": { + "experiments": { + "name": "experiments", + "type": "[]Experiment", + "description": "Experiments that match the search criteria", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Token that can be used to retrieve the next page of experiments. An empty\ntoken means that no more experiments are available for retrieval.", + "required": false + } + } + }, + "ml.SearchLoggedModelsDataset": { + "name": "SearchLoggedModelsDataset", + "package": "ml", + "description": "", + "fields": { + "dataset_digest": { + "name": "dataset_digest", + "type": "string", + "description": "The digest of the dataset.", + "required": false + }, + "dataset_name": { + "name": "dataset_name", + "type": "string", + "description": "The name of the dataset.", + "required": false + } + } + }, + "ml.SearchLoggedModelsOrderBy": { + "name": "SearchLoggedModelsOrderBy", + "package": "ml", + "description": "", + "fields": { + "ascending": { + "name": "ascending", + "type": "bool", + "description": "Whether the search results order is ascending or not.", + "required": false + }, + "dataset_digest": { + "name": "dataset_digest", + "type": "string", + "description": "If ``field_name`` refers to a metric, this field specifies the digest of\nthe dataset associated with the metric. Only metrics associated with the\nspecified dataset name and digest will be considered for ordering. This\nfield may only be set if ``dataset_name`` is also set.", + "required": false + }, + "dataset_name": { + "name": "dataset_name", + "type": "string", + "description": "If ``field_name`` refers to a metric, this field specifies the name of\nthe dataset associated with the metric. Only metrics associated with the\nspecified dataset name will be considered for ordering. This field may\nonly be set if ``field_name`` refers to a metric.", + "required": false + }, + "field_name": { + "name": "field_name", + "type": "string", + "description": "The name of the field to order by, e.g. \"metrics.accuracy\".", + "required": false + } + } + }, + "ml.SearchLoggedModelsRequest": { + "name": "SearchLoggedModelsRequest", + "package": "ml", + "description": "", + "fields": { + "datasets": { + "name": "datasets", + "type": "[]SearchLoggedModelsDataset", + "description": "List of datasets on which to apply the metrics filter clauses. For\nexample, a filter with `metrics.accuracy \u003e 0.9` and dataset info with\nname \"test_dataset\" means we will return all logged models with accuracy\n\u003e 0.9 on the test_dataset. Metric values from ANY dataset matching the\ncriteria are considered. If no datasets are specified, then metrics\nacross all datasets are considered in the filter.", + "required": false + }, + "experiment_ids": { + "name": "experiment_ids", + "type": "[]string", + "description": "The IDs of the experiments in which to search for logged models.", + "required": false + }, + "filter": { + "name": "filter", + "type": "string", + "description": "A filter expression over logged model info and data that allows returning\na subset of logged models. The syntax is a subset of SQL that supports\nAND'ing together binary operations.\n\nExample: ``params.alpha \u003c 0.3 AND metrics.accuracy \u003e 0.9``.", + "required": false + }, + "max_results": { + "name": "max_results", + "type": "int", + "description": "The maximum number of Logged Models to return. The maximum limit is 50.", + "required": false + }, + "order_by": { + "name": "order_by", + "type": "[]SearchLoggedModelsOrderBy", + "description": "The list of columns for ordering the results, with additional fields for\nsorting criteria.", + "required": false + }, + "page_token": { + "name": "page_token", + "type": "string", + "description": "The token indicating the page of logged models to fetch.", + "required": false + } + } + }, + "ml.SearchLoggedModelsResponse": { + "name": "SearchLoggedModelsResponse", + "package": "ml", + "description": "", + "fields": { + "models": { + "name": "models", + "type": "[]LoggedModel", + "description": "Logged models that match the search criteria.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "The token that can be used to retrieve the next page of logged models.", + "required": false + } + } + }, + "ml.SearchModelVersionsResponse": { + "name": "SearchModelVersionsResponse", + "package": "ml", + "description": "", + "fields": { + "model_versions": { + "name": "model_versions", + "type": "[]ModelVersion", + "description": "Models that match the search criteria", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request next page of models for the same search\nquery.", + "required": false + } + } + }, + "ml.SearchModelsResponse": { + "name": "SearchModelsResponse", + "package": "ml", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Pagination token to request the next page of models.", + "required": false + }, + "registered_models": { + "name": "registered_models", + "type": "[]Model", + "description": "Registered Models that match the search criteria.", + "required": false + } + } + }, + "ml.SearchRuns": { + "name": "SearchRuns", + "package": "ml", + "description": "", + "fields": { + "experiment_ids": { + "name": "experiment_ids", + "type": "[]string", + "description": "List of experiment IDs to search over.", + "required": false + }, + "filter": { + "name": "filter", + "type": "string", + "description": "A filter expression over params, metrics, and tags, that allows returning\na subset of runs. The syntax is a subset of SQL that supports ANDing\ntogether binary operations between a param, metric, or tag and a\nconstant.\n\nExample: `metrics.rmse \u003c 1 and params.model_class = 'LogisticRegression'`\n\nYou can select columns with special characters (hyphen, space, period,\netc.) by using double quotes: `metrics.\"model class\" = 'LinearRegression'\nand tags.\"user-name\" = 'Tomas'`\n\nSupported operators are `...", + "required": false + }, + "max_results": { + "name": "max_results", + "type": "int", + "description": "Maximum number of runs desired. Max threshold is 50000", + "required": false + }, + "order_by": { + "name": "order_by", + "type": "[]string", + "description": "List of columns to be ordered by, including attributes, params, metrics,\nand tags with an optional `\"DESC\"` or `\"ASC\"` annotation, where `\"ASC\"`\nis the default. Example: `[\"params.input DESC\", \"metrics.alpha ASC\",\n\"metrics.rmse\"]`. Tiebreaks are done by start_time `DESC` followed by\n`run_id` for runs with the same start time (and this is the default\nordering criterion if order_by is not provided).", + "required": false + }, + "page_token": { + "name": "page_token", + "type": "string", + "description": "Token for the current page of runs.", + "required": false + }, + "run_view_type": { + "name": "run_view_type", + "type": "ViewType", + "description": "Whether to display only active, only deleted, or all runs. Defaults to\nonly active runs.", + "required": false + } + } + }, + "ml.SearchRunsResponse": { + "name": "SearchRunsResponse", + "package": "ml", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Token for the next page of runs.", + "required": false + }, + "runs": { + "name": "runs", + "type": "[]Run", + "description": "Runs that match the search criteria.", + "required": false + } + } + }, + "ml.SetExperimentTag": { + "name": "SetExperimentTag", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "ID of the experiment under which to log the tag. Must be provided.", + "required": false + }, + "key": { + "name": "key", + "type": "string", + "description": "Name of the tag. Keys up to 250 bytes in size are supported.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "String value of the tag being logged. Values up to 64KB in size are\nsupported.", + "required": false + } + } + }, + "ml.SetLoggedModelTagsRequest": { + "name": "SetLoggedModelTagsRequest", + "package": "ml", + "description": "", + "fields": { + "tags": { + "name": "tags", + "type": "[]LoggedModelTag", + "description": "The tags to set on the logged model.", + "required": false + } + } + }, + "ml.SetModelTagRequest": { + "name": "SetModelTagRequest", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the tag. Maximum size depends on storage backend. If a tag with\nthis name already exists, its preexisting value will be replaced by the\nspecified `value`. All storage backends are guaranteed to support key\nvalues up to 250 bytes in size.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Unique name of the model.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "String value of the tag being logged. Maximum size depends on storage\nbackend. All storage backends are guaranteed to support key values up to\n5000 bytes in size.", + "required": false + } + } + }, + "ml.SetModelVersionTagRequest": { + "name": "SetModelVersionTagRequest", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the tag. Maximum size depends on storage backend. If a tag with\nthis name already exists, its preexisting value will be replaced by the\nspecified `value`. All storage backends are guaranteed to support key\nvalues up to 250 bytes in size.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Unique name of the model.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "String value of the tag being logged. Maximum size depends on storage\nbackend. All storage backends are guaranteed to support key values up to\n5000 bytes in size.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Model version number.", + "required": false + } + } + }, + "ml.SetTag": { + "name": "SetTag", + "package": "ml", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the tag. Keys up to 250 bytes in size are supported.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run under which to log the tag. Must be provided.", + "required": false + }, + "run_uuid": { + "name": "run_uuid", + "type": "string", + "description": "[Deprecated, use `run_id` instead] ID of the run under which to log the\ntag. This field will be removed in a future MLflow version.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "String value of the tag being logged. Values up to 64KB in size are\nsupported.", + "required": false + } + } + }, + "ml.SlidingWindow": { + "name": "SlidingWindow", + "package": "ml", + "description": "", + "fields": { + "slide_duration": { + "name": "slide_duration", + "type": "string", + "description": "The slide duration (interval by which windows advance, must be positive\nand less than duration).", + "required": false + }, + "window_duration": { + "name": "window_duration", + "type": "string", + "description": "The duration of the sliding window.", + "required": false + } + } + }, + "ml.SubscriptionMode": { + "name": "SubscriptionMode", + "package": "ml", + "description": "", + "fields": { + "assign": { + "name": "assign", + "type": "string", + "description": "A JSON string that contains the specific topic-partitions to consume\nfrom. For example, for '{\"topicA\":[0,1],\"topicB\":[2,4]}', topicA's 0'th\nand 1st partitions will be consumed from.", + "required": false + }, + "subscribe": { + "name": "subscribe", + "type": "string", + "description": "A comma-separated list of Kafka topics to read from. For example,\n'topicA,topicB,topicC'.", + "required": false + }, + "subscribe_pattern": { + "name": "subscribe_pattern", + "type": "string", + "description": "A regular expression matching topics to subscribe to. For example,\n'topic.*' will subscribe to all topics starting with 'topic'.", + "required": false + } + } + }, + "ml.TestRegistryWebhookRequest": { + "name": "TestRegistryWebhookRequest", + "package": "ml", + "description": "Details required to test a registry webhook.", + "fields": { + "event": { + "name": "event", + "type": "RegistryWebhookEvent", + "description": "If `event` is specified, the test trigger uses the specified event. If\n`event` is not specified, the test trigger uses a randomly chosen event\nassociated with the webhook.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Webhook ID", + "required": false + } + } + }, + "ml.TestRegistryWebhookResponse": { + "name": "TestRegistryWebhookResponse", + "package": "ml", + "description": "", + "fields": { + "body": { + "name": "body", + "type": "string", + "description": "Body of the response from the webhook URL", + "required": false + }, + "status_code": { + "name": "status_code", + "type": "int", + "description": "Status code returned by the webhook URL", + "required": false + } + } + }, + "ml.TimeWindow": { + "name": "TimeWindow", + "package": "ml", + "description": "", + "fields": { + "continuous": { + "name": "continuous", + "type": "*ContinuousWindow", + "description": "", + "required": false + }, + "sliding": { + "name": "sliding", + "type": "*SlidingWindow", + "description": "", + "required": false + }, + "tumbling": { + "name": "tumbling", + "type": "*TumblingWindow", + "description": "", + "required": false + } + } + }, + "ml.TransitionModelVersionStageDatabricks": { + "name": "TransitionModelVersionStageDatabricks", + "package": "ml", + "description": "Details required to transition a model version's stage.", + "fields": { + "archive_existing_versions": { + "name": "archive_existing_versions", + "type": "bool", + "description": "Specifies whether to archive all current model versions in the target\nstage.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the model.", + "required": false + }, + "stage": { + "name": "stage", + "type": "string", + "description": "Target stage of the transition. Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Version of the model.", + "required": false + } + } + }, + "ml.TransitionRequest": { + "name": "TransitionRequest", + "package": "ml", + "description": "For activities, this contains the activity recorded for the action. For\ncomments, this contains the comment details. For transition requests, this\ncontains the transition request details.", + "fields": { + "available_actions": { + "name": "available_actions", + "type": "[]ActivityAction", + "description": "Array of actions on the activity allowed for the current viewer.", + "required": false + }, + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment associated with the activity, comment, or\ntransition request.", + "required": false + }, + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "Creation time of the object, as a Unix timestamp in milliseconds.", + "required": false + }, + "to_stage": { + "name": "to_stage", + "type": "string", + "description": "Target stage of the transition (if the activity is stage transition\nrelated). Valid values are:\n\n* `None`: The initial stage of a model version.\n\n* `Staging`: Staging or pre-production stage.\n\n* `Production`: Production stage.\n\n* `Archived`: Archived stage.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "string", + "description": "The username of the user that created the object.", + "required": false + } + } + }, + "ml.TransitionStageResponse": { + "name": "TransitionStageResponse", + "package": "ml", + "description": "", + "fields": { + "model_version_databricks": { + "name": "model_version_databricks", + "type": "*ModelVersionDatabricks", + "description": "Updated model version", + "required": false + } + } + }, + "ml.TumblingWindow": { + "name": "TumblingWindow", + "package": "ml", + "description": "", + "fields": { + "window_duration": { + "name": "window_duration", + "type": "string", + "description": "The duration of each tumbling window (non-overlapping, fixed-duration\nwindows).", + "required": false + } + } + }, + "ml.UpdateComment": { + "name": "UpdateComment", + "package": "ml", + "description": "Details required to edit a comment on a model version.", + "fields": { + "comment": { + "name": "comment", + "type": "string", + "description": "User-provided comment on the action.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier of an activity", + "required": false + } + } + }, + "ml.UpdateCommentResponse": { + "name": "UpdateCommentResponse", + "package": "ml", + "description": "", + "fields": { + "comment": { + "name": "comment", + "type": "*CommentObject", + "description": "Updated comment object", + "required": false + } + } + }, + "ml.UpdateExperiment": { + "name": "UpdateExperiment", + "package": "ml", + "description": "", + "fields": { + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "ID of the associated experiment.", + "required": false + }, + "new_name": { + "name": "new_name", + "type": "string", + "description": "If provided, the experiment's name is changed to the new name. The new\nname must be unique.", + "required": false + } + } + }, + "ml.UpdateFeatureRequest": { + "name": "UpdateFeatureRequest", + "package": "ml", + "description": "", + "fields": { + "feature": { + "name": "feature", + "type": "Feature", + "description": "Feature to update.", + "required": false + } + } + }, + "ml.UpdateFeatureTagRequest": { + "name": "UpdateFeatureTagRequest", + "package": "ml", + "description": "", + "fields": { + "feature_tag": { + "name": "feature_tag", + "type": "FeatureTag", + "description": "", + "required": false + } + } + }, + "ml.UpdateKafkaConfigRequest": { + "name": "UpdateKafkaConfigRequest", + "package": "ml", + "description": "", + "fields": { + "kafka_config": { + "name": "kafka_config", + "type": "KafkaConfig", + "description": "The Kafka config to update.", + "required": false + } + } + }, + "ml.UpdateMaterializedFeatureRequest": { + "name": "UpdateMaterializedFeatureRequest", + "package": "ml", + "description": "", + "fields": { + "materialized_feature": { + "name": "materialized_feature", + "type": "MaterializedFeature", + "description": "The materialized feature to update.", + "required": false + } + } + }, + "ml.UpdateModelRequest": { + "name": "UpdateModelRequest", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "If provided, updates the description for this `registered_model`.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Registered model unique name identifier.", + "required": false + } + } + }, + "ml.UpdateModelResponse": { + "name": "UpdateModelResponse", + "package": "ml", + "description": "", + "fields": { + "registered_model": { + "name": "registered_model", + "type": "*Model", + "description": "", + "required": false + } + } + }, + "ml.UpdateModelVersionRequest": { + "name": "UpdateModelVersionRequest", + "package": "ml", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "If provided, updates the description for this `registered_model`.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the registered model", + "required": false + }, + "version": { + "name": "version", + "type": "string", + "description": "Model version number", + "required": false + } + } + }, + "ml.UpdateModelVersionResponse": { + "name": "UpdateModelVersionResponse", + "package": "ml", + "description": "", + "fields": { + "model_version": { + "name": "model_version", + "type": "*ModelVersion", + "description": "Return new version number generated for this model in registry.", + "required": false + } + } + }, + "ml.UpdateOnlineStoreRequest": { + "name": "UpdateOnlineStoreRequest", + "package": "ml", + "description": "", + "fields": { + "online_store": { + "name": "online_store", + "type": "OnlineStore", + "description": "Online store to update.", + "required": false + } + } + }, + "ml.UpdateRegistryWebhook": { + "name": "UpdateRegistryWebhook", + "package": "ml", + "description": "Details required to update a registry webhook. Only the fields that need to\nbe updated should be specified, and both `http_url_spec` and `job_spec`\nshould not be specified in the same request.", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "User-specified description for the webhook.", + "required": false + }, + "events": { + "name": "events", + "type": "[]RegistryWebhookEvent", + "description": "Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A\nnew model version was created for the associated model.\n\n* `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was\nchanged.\n\n* `TRANSITION_REQUEST_CREATED`: A user requested a model version’s\nstage be transitioned.\n\n* `COMMENT_CREATED`: A user wrote a comment on a registered model.\n\n* `REGISTERED_MODEL_CREATED`: A new registered model was created. This\nevent type can only be specified for a registry-wide webh...", + "required": false + }, + "http_url_spec": { + "name": "http_url_spec", + "type": "*HttpUrlSpec", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Webhook ID", + "required": false + }, + "job_spec": { + "name": "job_spec", + "type": "*JobSpec", + "description": "", + "required": false + }, + "status": { + "name": "status", + "type": "RegistryWebhookStatus", + "description": "", + "required": false + } + } + }, + "ml.UpdateRun": { + "name": "UpdateRun", + "package": "ml", + "description": "", + "fields": { + "end_time": { + "name": "end_time", + "type": "int64", + "description": "Unix timestamp in milliseconds of when the run ended.", + "required": false + }, + "run_id": { + "name": "run_id", + "type": "string", + "description": "ID of the run to update. Must be provided.", + "required": false + }, + "run_name": { + "name": "run_name", + "type": "string", + "description": "Updated name of the run.", + "required": false + }, + "run_uuid": { + "name": "run_uuid", + "type": "string", + "description": "[Deprecated, use `run_id` instead] ID of the run to update. This field\nwill be removed in a future MLflow version.", + "required": false + }, + "status": { + "name": "status", + "type": "UpdateRunStatus", + "description": "Updated status of the run.", + "required": false + } + } + }, + "ml.UpdateRunResponse": { + "name": "UpdateRunResponse", + "package": "ml", + "description": "", + "fields": { + "run_info": { + "name": "run_info", + "type": "*RunInfo", + "description": "Updated metadata of the run.", + "required": false + } + } + }, + "ml.UpdateWebhookResponse": { + "name": "UpdateWebhookResponse", + "package": "ml", + "description": "", + "fields": { + "webhook": { + "name": "webhook", + "type": "*RegistryWebhook", + "description": "", + "required": false + } + } + }, + "ml.WaitGetExperimentForecastingSucceeded": { + "name": "WaitGetExperimentForecastingSucceeded", + "package": "ml", + "description": "WaitGetExperimentForecastingSucceeded is a wrapper that calls [ForecastingAPI.WaitGetExperimentForecastingSucceeded] and waits to reach SUCCEEDED state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*ForecastingExperiment)) (*ForecastingExperiment, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*ForecastingExperiment)", + "description": "", + "required": false + }, + "experiment_id": { + "name": "experiment_id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "ml.experimentsImpl": { + "name": "experimentsImpl", + "package": "ml", + "description": "unexported type that holds implementations of just Experiments API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.featureEngineeringImpl": { + "name": "featureEngineeringImpl", + "package": "ml", + "description": "unexported type that holds implementations of just FeatureEngineering API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.featureStoreImpl": { + "name": "featureStoreImpl", + "package": "ml", + "description": "unexported type that holds implementations of just FeatureStore API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.forecastingImpl": { + "name": "forecastingImpl", + "package": "ml", + "description": "unexported type that holds implementations of just Forecasting API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.materializedFeaturesImpl": { + "name": "materializedFeaturesImpl", + "package": "ml", + "description": "unexported type that holds implementations of just MaterializedFeatures API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "ml.modelRegistryImpl": { + "name": "modelRegistryImpl", + "package": "ml", + "description": "unexported type that holds implementations of just ModelRegistry API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "pipelines.ClonePipelineRequest": { + "name": "ClonePipelineRequest", + "package": "pipelines", + "description": "", + "fields": { + "allow_duplicate_names": { + "name": "allow_duplicate_names", + "type": "bool", + "description": "If false, deployment will fail if name conflicts with that of another\npipeline.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy of this pipeline.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If\n`target` is specified, tables in this pipeline are published to a\n`target` schema inside `catalog` (for example,\n`catalog`.`target`.`table`). If `target` is not specified, no data is\npublished to Unity Catalog.", + "required": false + }, + "channel": { + "name": "channel", + "type": "string", + "description": "DLT Release Channel that specifies which version to use.", + "required": false + }, + "clusters": { + "name": "clusters", + "type": "[]PipelineCluster", + "description": "Cluster settings for this pipeline deployment.", + "required": false + }, + "configuration": { + "name": "configuration", + "type": "map[string]string", + "description": "String-String configuration for this pipeline execution.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "bool", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*PipelineDeployment", + "description": "Deployment type of this pipeline.", + "required": false + }, + "development": { + "name": "development", + "type": "bool", + "description": "Whether the pipeline is in Development mode. Defaults to false.", + "required": false + }, + "edition": { + "name": "edition", + "type": "string", + "description": "Pipeline product edition.", + "required": false + }, + "environment": { + "name": "environment", + "type": "*PipelinesEnvironment", + "description": "Environment specification for this pipeline used to install dependencies.", + "required": false + }, + "event_log": { + "name": "event_log", + "type": "*EventLogSpec", + "description": "Event log configuration for this pipeline", + "required": false + }, + "expected_last_modified": { + "name": "expected_last_modified", + "type": "int64", + "description": "If present, the last-modified time of the pipeline settings before the\nclone. If the settings were modified after that time, then the request\nwill fail with a conflict.", + "required": false + }, + "filters": { + "name": "filters", + "type": "*Filters", + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false + }, + "gateway_definition": { + "name": "gateway_definition", + "type": "*IngestionGatewayPipelineDefinition", + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for this pipeline.", + "required": false + }, + "ingestion_definition": { + "name": "ingestion_definition", + "type": "*IngestionPipelineDefinition", + "description": "The configuration for a managed ingestion pipeline. These settings cannot\nbe used with the 'libraries', 'schema', 'target', or 'catalog' settings.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]PipelineLibrary", + "description": "Libraries or code needed by this deployment.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Friendly identifier for this pipeline.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "[]Notifications", + "description": "List of notification settings for this pipeline.", + "required": false + }, + "photon": { + "name": "photon", + "type": "bool", + "description": "Whether Photon is enabled for this pipeline.", + "required": false + }, + "restart_window": { + "name": "restart_window", + "type": "*RestartWindow", + "description": "Restart window of this pipeline.", + "required": false + }, + "root_path": { + "name": "root_path", + "type": "string", + "description": "Root path for this pipeline. This is used as the root directory when\nediting the pipeline in the Databricks user interface and it is added to\nsys.path when executing Python sources during pipeline execution.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The default schema (database) where tables are read from or published to.", + "required": false + }, + "serverless": { + "name": "serverless", + "type": "bool", + "description": "Whether serverless compute is enabled for this pipeline.", + "required": false + }, + "storage": { + "name": "storage", + "type": "string", + "description": "DBFS root directory for storing checkpoints and tables.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the pipeline. These are forwarded to the\ncluster as cluster tags, and are therefore subject to the same\nlimitations. A maximum of 25 tags can be added to the pipeline.", + "required": false + }, + "target": { + "name": "target", + "type": "string", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one\nof `schema` or `target` must be specified. To publish to Unity Catalog,\nalso specify `catalog`. This legacy field is deprecated for pipeline\ncreation in favor of the `schema` field.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*PipelineTrigger", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "Usage policy of this pipeline.", + "required": false + } + } + }, + "pipelines.ClonePipelineResponse": { + "name": "ClonePipelineResponse", + "package": "pipelines", + "description": "", + "fields": { + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The pipeline id of the cloned pipeline", + "required": false + } + } + }, + "pipelines.ConnectionParameters": { + "name": "ConnectionParameters", + "package": "pipelines", + "description": "", + "fields": { + "source_catalog": { + "name": "source_catalog", + "type": "string", + "description": "Source catalog for initial connection. This is necessary for schema\nexploration in some database systems like Oracle, and optional but\nnice-to-have in some other database systems like Postgres. For Oracle\ndatabases, this maps to a service name.", + "required": false + } + } + }, + "pipelines.CreatePipeline": { + "name": "CreatePipeline", + "package": "pipelines", + "description": "", + "fields": { + "allow_duplicate_names": { + "name": "allow_duplicate_names", + "type": "bool", + "description": "If false, deployment will fail if name conflicts with that of another\npipeline.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy of this pipeline.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If\n`target` is specified, tables in this pipeline are published to a\n`target` schema inside `catalog` (for example,\n`catalog`.`target`.`table`). If `target` is not specified, no data is\npublished to Unity Catalog.", + "required": false + }, + "channel": { + "name": "channel", + "type": "string", + "description": "DLT Release Channel that specifies which version to use.", + "required": false + }, + "clusters": { + "name": "clusters", + "type": "[]PipelineCluster", + "description": "Cluster settings for this pipeline deployment.", + "required": false + }, + "configuration": { + "name": "configuration", + "type": "map[string]string", + "description": "String-String configuration for this pipeline execution.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "bool", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*PipelineDeployment", + "description": "Deployment type of this pipeline.", + "required": false + }, + "development": { + "name": "development", + "type": "bool", + "description": "Whether the pipeline is in Development mode. Defaults to false.", + "required": false + }, + "dry_run": { + "name": "dry_run", + "type": "bool", + "description": "", + "required": false + }, + "edition": { + "name": "edition", + "type": "string", + "description": "Pipeline product edition.", + "required": false + }, + "environment": { + "name": "environment", + "type": "*PipelinesEnvironment", + "description": "Environment specification for this pipeline used to install dependencies.", + "required": false + }, + "event_log": { + "name": "event_log", + "type": "*EventLogSpec", + "description": "Event log configuration for this pipeline", + "required": false + }, + "filters": { + "name": "filters", + "type": "*Filters", + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false + }, + "gateway_definition": { + "name": "gateway_definition", + "type": "*IngestionGatewayPipelineDefinition", + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for this pipeline.", + "required": false + }, + "ingestion_definition": { + "name": "ingestion_definition", + "type": "*IngestionPipelineDefinition", + "description": "The configuration for a managed ingestion pipeline. These settings cannot\nbe used with the 'libraries', 'schema', 'target', or 'catalog' settings.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]PipelineLibrary", + "description": "Libraries or code needed by this deployment.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Friendly identifier for this pipeline.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "[]Notifications", + "description": "List of notification settings for this pipeline.", + "required": false + }, + "photon": { + "name": "photon", + "type": "bool", + "description": "Whether Photon is enabled for this pipeline.", + "required": false + }, + "restart_window": { + "name": "restart_window", + "type": "*RestartWindow", + "description": "Restart window of this pipeline.", + "required": false + }, + "root_path": { + "name": "root_path", + "type": "string", + "description": "Root path for this pipeline. This is used as the root directory when\nediting the pipeline in the Databricks user interface and it is added to\nsys.path when executing Python sources during pipeline execution.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*RunAs", + "description": "", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The default schema (database) where tables are read from or published to.", + "required": false + }, + "serverless": { + "name": "serverless", + "type": "bool", + "description": "Whether serverless compute is enabled for this pipeline.", + "required": false + }, + "storage": { + "name": "storage", + "type": "string", + "description": "DBFS root directory for storing checkpoints and tables.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the pipeline. These are forwarded to the\ncluster as cluster tags, and are therefore subject to the same\nlimitations. A maximum of 25 tags can be added to the pipeline.", + "required": false + }, + "target": { + "name": "target", + "type": "string", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one\nof `schema` or `target` must be specified. To publish to Unity Catalog,\nalso specify `catalog`. This legacy field is deprecated for pipeline\ncreation in favor of the `schema` field.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*PipelineTrigger", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "Usage policy of this pipeline.", + "required": false + } + } + }, + "pipelines.CreatePipelineResponse": { + "name": "CreatePipelineResponse", + "package": "pipelines", + "description": "", + "fields": { + "effective_settings": { + "name": "effective_settings", + "type": "*PipelineSpec", + "description": "Only returned when dry_run is true.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The unique identifier for the newly created pipeline. Only returned when\ndry_run is false.", + "required": false + } + } + }, + "pipelines.CronTrigger": { + "name": "CronTrigger", + "package": "pipelines", + "description": "", + "fields": { + "quartz_cron_schedule": { + "name": "quartz_cron_schedule", + "type": "string", + "description": "", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "pipelines.DataPlaneId": { + "name": "DataPlaneId", + "package": "pipelines", + "description": "", + "fields": { + "instance": { + "name": "instance", + "type": "string", + "description": "The instance name of the data plane emitting an event.", + "required": false + }, + "seq_no": { + "name": "seq_no", + "type": "int64", + "description": "A sequence number, unique and increasing within the data plane instance.", + "required": false + } + } + }, + "pipelines.DayOfWeek": { + "name": "DayOfWeek", + "package": "pipelines", + "description": "Days of week in which the window is allowed to happen.\nIf not specified all days of the week will be used.", + "fields": {} + }, + "pipelines.DeploymentKind": { + "name": "DeploymentKind", + "package": "pipelines", + "description": "The deployment method that manages the pipeline:\n- BUNDLE: The pipeline is managed by a Databricks Asset Bundle.", + "fields": {} + }, + "pipelines.EditPipeline": { + "name": "EditPipeline", + "package": "pipelines", + "description": "", + "fields": { + "allow_duplicate_names": { + "name": "allow_duplicate_names", + "type": "bool", + "description": "If false, deployment will fail if name has changed and conflicts the name\nof another pipeline.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy of this pipeline.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If\n`target` is specified, tables in this pipeline are published to a\n`target` schema inside `catalog` (for example,\n`catalog`.`target`.`table`). If `target` is not specified, no data is\npublished to Unity Catalog.", + "required": false + }, + "channel": { + "name": "channel", + "type": "string", + "description": "DLT Release Channel that specifies which version to use.", + "required": false + }, + "clusters": { + "name": "clusters", + "type": "[]PipelineCluster", + "description": "Cluster settings for this pipeline deployment.", + "required": false + }, + "configuration": { + "name": "configuration", + "type": "map[string]string", + "description": "String-String configuration for this pipeline execution.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "bool", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*PipelineDeployment", + "description": "Deployment type of this pipeline.", + "required": false + }, + "development": { + "name": "development", + "type": "bool", + "description": "Whether the pipeline is in Development mode. Defaults to false.", + "required": false + }, + "edition": { + "name": "edition", + "type": "string", + "description": "Pipeline product edition.", + "required": false + }, + "environment": { + "name": "environment", + "type": "*PipelinesEnvironment", + "description": "Environment specification for this pipeline used to install dependencies.", + "required": false + }, + "event_log": { + "name": "event_log", + "type": "*EventLogSpec", + "description": "Event log configuration for this pipeline", + "required": false + }, + "expected_last_modified": { + "name": "expected_last_modified", + "type": "int64", + "description": "If present, the last-modified time of the pipeline settings before the\nedit. If the settings were modified after that time, then the request\nwill fail with a conflict.", + "required": false + }, + "filters": { + "name": "filters", + "type": "*Filters", + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false + }, + "gateway_definition": { + "name": "gateway_definition", + "type": "*IngestionGatewayPipelineDefinition", + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for this pipeline.", + "required": false + }, + "ingestion_definition": { + "name": "ingestion_definition", + "type": "*IngestionPipelineDefinition", + "description": "The configuration for a managed ingestion pipeline. These settings cannot\nbe used with the 'libraries', 'schema', 'target', or 'catalog' settings.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]PipelineLibrary", + "description": "Libraries or code needed by this deployment.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Friendly identifier for this pipeline.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "[]Notifications", + "description": "List of notification settings for this pipeline.", + "required": false + }, + "photon": { + "name": "photon", + "type": "bool", + "description": "Whether Photon is enabled for this pipeline.", + "required": false + }, + "restart_window": { + "name": "restart_window", + "type": "*RestartWindow", + "description": "Restart window of this pipeline.", + "required": false + }, + "root_path": { + "name": "root_path", + "type": "string", + "description": "Root path for this pipeline. This is used as the root directory when\nediting the pipeline in the Databricks user interface and it is added to\nsys.path when executing Python sources during pipeline execution.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*RunAs", + "description": "", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The default schema (database) where tables are read from or published to.", + "required": false + }, + "serverless": { + "name": "serverless", + "type": "bool", + "description": "Whether serverless compute is enabled for this pipeline.", + "required": false + }, + "storage": { + "name": "storage", + "type": "string", + "description": "DBFS root directory for storing checkpoints and tables.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the pipeline. These are forwarded to the\ncluster as cluster tags, and are therefore subject to the same\nlimitations. A maximum of 25 tags can be added to the pipeline.", + "required": false + }, + "target": { + "name": "target", + "type": "string", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one\nof `schema` or `target` must be specified. To publish to Unity Catalog,\nalso specify `catalog`. This legacy field is deprecated for pipeline\ncreation in favor of the `schema` field.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*PipelineTrigger", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "Usage policy of this pipeline.", + "required": false + } + } + }, + "pipelines.ErrorDetail": { + "name": "ErrorDetail", + "package": "pipelines", + "description": "", + "fields": { + "exceptions": { + "name": "exceptions", + "type": "[]SerializedException", + "description": "The exception thrown for this error, with its chain of cause.", + "required": false + }, + "fatal": { + "name": "fatal", + "type": "bool", + "description": "Whether this error is considered fatal, that is, unrecoverable.", + "required": false + } + } + }, + "pipelines.EventLogSpec": { + "name": "EventLogSpec", + "package": "pipelines", + "description": "Configurable event log parameters.", + "fields": { + "catalog": { + "name": "catalog", + "type": "string", + "description": "The UC catalog the event log is published under.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name the event log is published to in UC.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The UC schema the event log is published under.", + "required": false + } + } + }, + "pipelines.FileLibrary": { + "name": "FileLibrary", + "package": "pipelines", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the source code.", + "required": false + } + } + }, + "pipelines.Filters": { + "name": "Filters", + "package": "pipelines", + "description": "", + "fields": { + "exclude": { + "name": "exclude", + "type": "[]string", + "description": "Paths to exclude.", + "required": false + }, + "include": { + "name": "include", + "type": "[]string", + "description": "Paths to include.", + "required": false + } + } + }, + "pipelines.GetPipelinePermissionLevelsResponse": { + "name": "GetPipelinePermissionLevelsResponse", + "package": "pipelines", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]PipelinePermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "pipelines.GetPipelineResponse": { + "name": "GetPipelineResponse", + "package": "pipelines", + "description": "", + "fields": { + "cause": { + "name": "cause", + "type": "string", + "description": "An optional message detailing the cause of the pipeline state.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The ID of the cluster that the pipeline is running on.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The username of the pipeline creator.", + "required": false + }, + "effective_budget_policy_id": { + "name": "effective_budget_policy_id", + "type": "string", + "description": "Serverless budget policy ID of this pipeline.", + "required": false + }, + "health": { + "name": "health", + "type": "GetPipelineResponseHealth", + "description": "The health of a pipeline.", + "required": false + }, + "last_modified": { + "name": "last_modified", + "type": "int64", + "description": "The last time the pipeline settings were modified or created.", + "required": false + }, + "latest_updates": { + "name": "latest_updates", + "type": "[]UpdateStateInfo", + "description": "Status of the latest updates for the pipeline. Ordered with the newest\nupdate first.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "A human friendly identifier for the pipeline, taken from the `spec`.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The ID of the pipeline.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*RunAs", + "description": "The user or service principal that the pipeline runs as, if specified in\nthe request. This field indicates the explicit configuration of `run_as`\nfor the pipeline. To find the value in all cases, explicit or implicit,\nuse `run_as_user_name`.", + "required": false + }, + "run_as_user_name": { + "name": "run_as_user_name", + "type": "string", + "description": "Username of the user that the pipeline will run on behalf of.", + "required": false + }, + "spec": { + "name": "spec", + "type": "*PipelineSpec", + "description": "The pipeline specification. This field is not returned when called by\n`ListPipelines`.", + "required": false + }, + "state": { + "name": "state", + "type": "PipelineState", + "description": "The pipeline state.", + "required": false + } + } + }, + "pipelines.GetUpdateResponse": { + "name": "GetUpdateResponse", + "package": "pipelines", + "description": "", + "fields": { + "update": { + "name": "update", + "type": "*UpdateInfo", + "description": "The current update info.", + "required": false + } + } + }, + "pipelines.IngestionConfig": { + "name": "IngestionConfig", + "package": "pipelines", + "description": "", + "fields": { + "report": { + "name": "report", + "type": "*ReportSpec", + "description": "Select a specific source report.", + "required": false + }, + "schema": { + "name": "schema", + "type": "*SchemaSpec", + "description": "Select all tables from a specific source schema.", + "required": false + }, + "table": { + "name": "table", + "type": "*TableSpec", + "description": "Select a specific source table.", + "required": false + } + } + }, + "pipelines.IngestionGatewayPipelineDefinition": { + "name": "IngestionGatewayPipelineDefinition", + "package": "pipelines", + "description": "", + "fields": { + "connection_id": { + "name": "connection_id", + "type": "string", + "description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog\nconnection that this gateway pipeline uses to communicate with the\nsource.", + "required": false + }, + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "Immutable. The Unity Catalog connection that this gateway pipeline uses\nto communicate with the source.", + "required": false + }, + "connection_parameters": { + "name": "connection_parameters", + "type": "*ConnectionParameters", + "description": "Optional, Internal. Parameters required to establish an initial\nconnection with the source.", + "required": false + }, + "gateway_storage_catalog": { + "name": "gateway_storage_catalog", + "type": "string", + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's\nstorage location.", + "required": false + }, + "gateway_storage_name": { + "name": "gateway_storage_name", + "type": "string", + "description": "Optional. The Unity Catalog-compatible name for the gateway storage\nlocation. This is the destination to use for the data that is extracted\nby the gateway. Spark Declarative Pipelines system will automatically\ncreate the storage location under the catalog and schema.", + "required": false + }, + "gateway_storage_schema": { + "name": "gateway_storage_schema", + "type": "string", + "description": "Required, Immutable. The name of the schema for the gateway pipelines's\nstorage location.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinition": { + "name": "IngestionPipelineDefinition", + "package": "pipelines", + "description": "", + "fields": { + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses\nto communicate with the source. This is used with connectors for\napplications like Salesforce, Workday, and so on.", + "required": false + }, + "ingest_from_uc_foreign_catalog": { + "name": "ingest_from_uc_foreign_catalog", + "type": "bool", + "description": "Immutable. If set to true, the pipeline will ingest tables from the UC\nforeign catalogs directly without the need to specify a UC connection or\ningestion gateway. The `source_catalog` fields in objects of\nIngestionConfig are interpreted as the UC foreign catalogs to ingest\nfrom.", + "required": false + }, + "ingestion_gateway_id": { + "name": "ingestion_gateway_id", + "type": "string", + "description": "Immutable. Identifier for the gateway that is used by this ingestion\npipeline to communicate with the source database. This is used with\nconnectors to databases like SQL Server.", + "required": false + }, + "netsuite_jar_path": { + "name": "netsuite_jar_path", + "type": "string", + "description": "Netsuite only configuration. When the field is set for a netsuite\nconnector, the jar stored in the field will be validated and added to the\nclasspath of pipeline's cluster.", + "required": false + }, + "objects": { + "name": "objects", + "type": "[]IngestionConfig", + "description": "Required. Settings specifying tables to replicate and the destination for\nthe replicated tables.", + "required": false + }, + "source_configurations": { + "name": "source_configurations", + "type": "[]SourceConfig", + "description": "Top-level source configurations", + "required": false + }, + "source_type": { + "name": "source_type", + "type": "IngestionSourceType", + "description": "The type of the foreign source. The source type will be inferred from the\nsource connection or ingestion gateway. This field is output only and\nwill be ignored if provided.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "*TableSpecificConfig", + "description": "Configuration settings to control the ingestion of tables. These settings\nare applied to all tables in the pipeline.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig": { + "name": "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", + "package": "pipelines", + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "fields": { + "cursor_columns": { + "name": "cursor_columns", + "type": "[]string", + "description": "The names of the monotonically increasing columns in the source table\nthat are used to enable the table to be read and ingested incrementally\nthrough structured streaming. The columns are allowed to have repeated\nvalues but have to be non-decreasing. If the source data is merged into\nthe destination (e.g., using SCD Type 1 or Type 2), these columns will\nimplicitly define the `sequence_by` behavior. You can still explicitly\nset `sequence_by` to override this default.", + "required": false + }, + "deletion_condition": { + "name": "deletion_condition", + "type": "string", + "description": "Specifies a SQL WHERE condition that specifies that the source row has\nbeen deleted. This is sometimes referred to as \"soft-deletes\". For\nexample: \"Operation = 'DELETE'\" or \"is_deleted = true\". This field is\northogonal to `hard_deletion_sync_interval_in_seconds`, one for\nsoft-deletes and the other for hard-deletes. See also the\nhard_deletion_sync_min_interval_in_seconds field for handling of \"hard\ndeletes\" where the source rows are physically removed from the table.", + "required": false + }, + "hard_deletion_sync_min_interval_in_seconds": { + "name": "hard_deletion_sync_min_interval_in_seconds", + "type": "int64", + "description": "Specifies the minimum interval (in seconds) between snapshots on primary\nkeys for detecting and synchronizing hard deletions—i.e., rows that\nhave been physically removed from the source table. This interval acts as\na lower bound. If ingestion runs less frequently than this value, hard\ndeletion synchronization will align with the actual ingestion frequency\ninstead of happening more often. If not set, hard deletion\nsynchronization via snapshots is disabled. This field is mutable and can\nbe up...", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinitionWorkdayReportParameters": { + "name": "IngestionPipelineDefinitionWorkdayReportParameters", + "package": "pipelines", + "description": "", + "fields": { + "incremental": { + "name": "incremental", + "type": "bool", + "description": "(Optional) Marks the report as incremental. This field is deprecated and\nshould not be used. Use `parameters` instead. The incremental behavior is\nnow controlled by the `parameters` field.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "Parameters for the Workday report. Each key represents the parameter name\n(e.g., \"start_date\", \"end_date\"), and the corresponding value is a\nSQL-like expression used to compute the parameter value at runtime.\nExample: { \"start_date\": \"{ coalesce(current_offset(),\ndate(\\\"2025-02-01\\\")) }\", \"end_date\": \"{ current_date() - INTERVAL 1 DAY\n}\" }", + "required": false + }, + "report_parameters": { + "name": "report_parameters", + "type": "[]IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", + "description": "(Optional) Additional custom parameters for Workday Report This field is\ndeprecated and should not be used. Use `parameters` instead.", + "required": false + } + } + }, + "pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue": { + "name": "IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", + "package": "pipelines", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Key for the report parameter, can be a column name or other metadata", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "Value for the report parameter. Possible values it can take are these sql\nfunctions: 1. coalesce(current_offset(), date(\"YYYY-MM-DD\")) -\u003e if\ncurrent_offset() is null, then the passed date, else current_offset() 2.\ncurrent_date() 3. date_sub(current_date(), x) -\u003e subtract x (some\nnon-negative integer) days from current date", + "required": false + } + } + }, + "pipelines.ListPipelineEventsResponse": { + "name": "ListPipelineEventsResponse", + "package": "pipelines", + "description": "", + "fields": { + "events": { + "name": "events", + "type": "[]PipelineEvent", + "description": "The list of events matching the request criteria.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "If present, a token to fetch the next page of events.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "If present, a token to fetch the previous page of events.", + "required": false + } + } + }, + "pipelines.ListPipelinesResponse": { + "name": "ListPipelinesResponse", + "package": "pipelines", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "If present, a token to fetch the next page of events.", + "required": false + }, + "statuses": { + "name": "statuses", + "type": "[]PipelineStateInfo", + "description": "The list of events matching the request criteria.", + "required": false + } + } + }, + "pipelines.ListUpdatesResponse": { + "name": "ListUpdatesResponse", + "package": "pipelines", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "If present, then there are more results, and this a token to be used in a\nsubsequent request to fetch the next page.", + "required": false + }, + "prev_page_token": { + "name": "prev_page_token", + "type": "string", + "description": "If present, then this token can be used in a subsequent request to fetch\nthe previous page.", + "required": false + }, + "updates": { + "name": "updates", + "type": "[]UpdateInfo", + "description": "", + "required": false + } + } + }, + "pipelines.NotebookLibrary": { + "name": "NotebookLibrary", + "package": "pipelines", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the source code.", + "required": false + } + } + }, + "pipelines.Notifications": { + "name": "Notifications", + "package": "pipelines", + "description": "", + "fields": { + "alerts": { + "name": "alerts", + "type": "[]string", + "description": "A list of alerts that trigger the sending of notifications to the\nconfigured destinations. The supported alerts are:\n\n* `on-update-success`: A pipeline update completes successfully. *\n`on-update-failure`: Each time a pipeline update fails. *\n`on-update-fatal-failure`: A pipeline update fails with a non-retryable\n(fatal) error. * `on-flow-failure`: A single data flow fails.", + "required": false + }, + "email_recipients": { + "name": "email_recipients", + "type": "[]string", + "description": "A list of email addresses notified when a configured alert is triggered.", + "required": false + } + } + }, + "pipelines.Origin": { + "name": "Origin", + "package": "pipelines", + "description": "", + "fields": { + "batch_id": { + "name": "batch_id", + "type": "int64", + "description": "The id of a batch. Unique within a flow.", + "required": false + }, + "cloud": { + "name": "cloud", + "type": "string", + "description": "The cloud provider, e.g., AWS or Azure.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The id of the cluster where an execution happens. Unique within a region.", + "required": false + }, + "dataset_name": { + "name": "dataset_name", + "type": "string", + "description": "The name of a dataset. Unique within a pipeline.", + "required": false + }, + "flow_id": { + "name": "flow_id", + "type": "string", + "description": "The id of the flow. Globally unique. Incremental queries will generally\nreuse the same id while complete queries will have a new id per update.", + "required": false + }, + "flow_name": { + "name": "flow_name", + "type": "string", + "description": "The name of the flow. Not unique.", + "required": false + }, + "host": { + "name": "host", + "type": "string", + "description": "The optional host name where the event was triggered", + "required": false + }, + "maintenance_id": { + "name": "maintenance_id", + "type": "string", + "description": "The id of a maintenance run. Globally unique.", + "required": false + }, + "materialization_name": { + "name": "materialization_name", + "type": "string", + "description": "Materialization name.", + "required": false + }, + "org_id": { + "name": "org_id", + "type": "int64", + "description": "The org id of the user. Unique within a cloud.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The id of the pipeline. Globally unique.", + "required": false + }, + "pipeline_name": { + "name": "pipeline_name", + "type": "string", + "description": "The name of the pipeline. Not unique.", + "required": false + }, + "region": { + "name": "region", + "type": "string", + "description": "The cloud region.", + "required": false + }, + "request_id": { + "name": "request_id", + "type": "string", + "description": "The id of the request that caused an update.", + "required": false + }, + "table_id": { + "name": "table_id", + "type": "string", + "description": "The id of a (delta) table. Globally unique.", + "required": false + }, + "uc_resource_id": { + "name": "uc_resource_id", + "type": "string", + "description": "The Unity Catalog id of the MV or ST being updated.", + "required": false + }, + "update_id": { + "name": "update_id", + "type": "string", + "description": "The id of an execution. Globally unique.", + "required": false + } + } + }, + "pipelines.PathPattern": { + "name": "PathPattern", + "package": "pipelines", + "description": "", + "fields": { + "include": { + "name": "include", + "type": "string", + "description": "The source code to include for pipelines", + "required": false + } + } + }, + "pipelines.PipelineAccessControlRequest": { + "name": "PipelineAccessControlRequest", + "package": "pipelines", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PipelinePermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "pipelines.PipelineAccessControlResponse": { + "name": "PipelineAccessControlResponse", + "package": "pipelines", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]PipelinePermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "pipelines.PipelineCluster": { + "name": "PipelineCluster", + "package": "pipelines", + "description": "", + "fields": { + "apply_policy_default_values": { + "name": "apply_policy_default_values", + "type": "bool", + "description": "Note: This field won't be persisted. Only API users will check this\nfield.", + "required": false + }, + "autoscale": { + "name": "autoscale", + "type": "*PipelineClusterAutoscale", + "description": "Parameters needed in order to automatically scale clusters up and down\nbased on load. Note: autoscaling works best with DB runtime versions 3.0\nor later.", + "required": false + }, + "aws_attributes": { + "name": "aws_attributes", + "type": "*compute.AwsAttributes", + "description": "Attributes related to clusters running on Amazon Web Services. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "azure_attributes": { + "name": "azure_attributes", + "type": "*compute.AzureAttributes", + "description": "Attributes related to clusters running on Microsoft Azure. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "cluster_log_conf": { + "name": "cluster_log_conf", + "type": "*compute.ClusterLogConf", + "description": "The configuration for delivering spark logs to a long-term storage\ndestination. Only dbfs destinations are supported. Only one destination\ncan be specified for one cluster. If the conf is given, the logs will be\ndelivered to the destination every `5 mins`. The destination of driver\nlogs is `$destination/$clusterId/driver`, while the destination of\nexecutor logs is `$destination/$clusterId/executor`.", + "required": false + }, + "custom_tags": { + "name": "custom_tags", + "type": "map[string]string", + "description": "Additional tags for cluster resources. Databricks will tag all cluster\nresources (e.g., AWS instances and EBS volumes) with these tags in\naddition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a\nsubset of the cluster tags", + "required": false + }, + "driver_instance_pool_id": { + "name": "driver_instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool for the driver of the cluster\nbelongs. The pool cluster uses the instance pool with id\n(instance_pool_id) if the driver pool is not assigned.", + "required": false + }, + "driver_node_type_id": { + "name": "driver_node_type_id", + "type": "string", + "description": "The node type of the Spark driver. Note that this field is optional; if\nunset, the driver node type will be set as the same value as\n`node_type_id` defined above.", + "required": false + }, + "enable_local_disk_encryption": { + "name": "enable_local_disk_encryption", + "type": "bool", + "description": "Whether to enable local disk encryption for the cluster.", + "required": false + }, + "gcp_attributes": { + "name": "gcp_attributes", + "type": "*compute.GcpAttributes", + "description": "Attributes related to clusters running on Google Cloud Platform. If not\nspecified at cluster creation, a set of default values will be used.", + "required": false + }, + "init_scripts": { + "name": "init_scripts", + "type": "[]compute.InitScriptInfo", + "description": "The configuration for storing init scripts. Any number of destinations\ncan be specified. The scripts are executed sequentially in the order\nprovided. If `cluster_log_conf` is specified, init script logs are sent\nto `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "required": false + }, + "instance_pool_id": { + "name": "instance_pool_id", + "type": "string", + "description": "The optional ID of the instance pool to which the cluster belongs.", + "required": false + }, + "label": { + "name": "label", + "type": "string", + "description": "A label for the cluster specification, either `default` to configure the\ndefault cluster, or `maintenance` to configure the maintenance cluster.\nThis field is optional. The default value is `default`.", + "required": false + }, + "node_type_id": { + "name": "node_type_id", + "type": "string", + "description": "This field encodes, through a single value, the resources available to\neach of the Spark nodes in this cluster. For example, the Spark nodes can\nbe provisioned and optimized for memory or compute intensive workloads. A\nlist of available node types can be retrieved by using the\n:method:clusters/listNodeTypes API call.", + "required": false + }, + "num_workers": { + "name": "num_workers", + "type": "int", + "description": "Number of worker nodes that this cluster should have. A cluster has one\nSpark Driver and `num_workers` Executors for a total of `num_workers` + 1\nSpark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the\ndesired number of workers rather than the actual current number of\nworkers. For instance, if a cluster is resized from 5 to 10 workers, this\nfield will immediately be updated to reflect the target size of 10\nworkers, whereas the workers listed in `spark_info` will ...", + "required": false + }, + "policy_id": { + "name": "policy_id", + "type": "string", + "description": "The ID of the cluster policy used to create the cluster if applicable.", + "required": false + }, + "spark_conf": { + "name": "spark_conf", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified Spark\nconfiguration key-value pairs. See :method:clusters/create for more\ndetails.", + "required": false + }, + "spark_env_vars": { + "name": "spark_env_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs. Please note that key-value pair of the form\n(X,Y) will be exported as is (i.e., `export X='Y'`) while launching the\ndriver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we\nrecommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the\nexample below. This ensures that all default databricks managed\nenvironmental variables are included as well.\n\nExample Spark en...", + "required": false + }, + "ssh_public_keys": { + "name": "ssh_public_keys", + "type": "[]string", + "description": "SSH public key contents that will be added to each Spark node in this\ncluster. The corresponding private keys can be used to login with the\nuser name `ubuntu` on port `2200`. Up to 10 keys can be specified.", + "required": false + } + } + }, + "pipelines.PipelineClusterAutoscale": { + "name": "PipelineClusterAutoscale", + "package": "pipelines", + "description": "", + "fields": { + "max_workers": { + "name": "max_workers", + "type": "int", + "description": "The maximum number of workers to which the cluster can scale up when\noverloaded. `max_workers` must be strictly greater than `min_workers`.", + "required": false + }, + "min_workers": { + "name": "min_workers", + "type": "int", + "description": "The minimum number of workers the cluster can scale down to when\nunderutilized. It is also the initial number of workers the cluster will\nhave after creation.", + "required": false + }, + "mode": { + "name": "mode", + "type": "PipelineClusterAutoscaleMode", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by\nautomatically allocating cluster resources based on workload volume, with\nminimal impact to the data processing latency of your pipelines. Enhanced\nAutoscaling is available for `updates` clusters only. The legacy\nautoscaling feature is used for `maintenance` clusters.", + "required": false + } + } + }, + "pipelines.PipelineClusterAutoscaleMode": { + "name": "PipelineClusterAutoscaleMode", + "package": "pipelines", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.", + "fields": {} + }, + "pipelines.PipelineDeployment": { + "name": "PipelineDeployment", + "package": "pipelines", + "description": "", + "fields": { + "kind": { + "name": "kind", + "type": "DeploymentKind", + "description": "The deployment method that manages the pipeline.", + "required": false + }, + "metadata_file_path": { + "name": "metadata_file_path", + "type": "string", + "description": "The path to the file containing metadata about the deployment.", + "required": false + } + } + }, + "pipelines.PipelineEvent": { + "name": "PipelineEvent", + "package": "pipelines", + "description": "", + "fields": { + "error": { + "name": "error", + "type": "*ErrorDetail", + "description": "Information about an error captured by the event.", + "required": false + }, + "event_type": { + "name": "event_type", + "type": "string", + "description": "The event type. Should always correspond to the details", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "A time-based, globally unique id.", + "required": false + }, + "level": { + "name": "level", + "type": "EventLevel", + "description": "The severity level of the event.", + "required": false + }, + "maturity_level": { + "name": "maturity_level", + "type": "MaturityLevel", + "description": "Maturity level for event_type.", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "The display message associated with the event.", + "required": false + }, + "origin": { + "name": "origin", + "type": "*Origin", + "description": "Describes where the event originates from.", + "required": false + }, + "sequence": { + "name": "sequence", + "type": "*Sequencing", + "description": "A sequencing object to identify and order events.", + "required": false + }, + "timestamp": { + "name": "timestamp", + "type": "string", + "description": "The time of the event.", + "required": false + }, + "truncation": { + "name": "truncation", + "type": "*Truncation", + "description": "Information about which fields were truncated from this event due to size\nconstraints. If empty or absent, no truncation occurred. See\nhttps://docs.databricks.com/en/ldp/monitor-event-logs for information on\nretrieving complete event data.", + "required": false + } + } + }, + "pipelines.PipelineLibrary": { + "name": "PipelineLibrary", + "package": "pipelines", + "description": "", + "fields": { + "file": { + "name": "file", + "type": "*FileLibrary", + "description": "The path to a file that defines a pipeline and is stored in the\nDatabricks Repos.", + "required": false + }, + "glob": { + "name": "glob", + "type": "*PathPattern", + "description": "The unified field to include source codes. Each entry can be a notebook\npath, a file path, or a folder path that ends `/**`. This field cannot be\nused together with `notebook` or `file`.", + "required": false + }, + "jar": { + "name": "jar", + "type": "string", + "description": "URI of the jar to be installed. Currently only DBFS is supported.", + "required": false + }, + "maven": { + "name": "maven", + "type": "*compute.MavenLibrary", + "description": "Specification of a maven library to be installed.", + "required": false + }, + "notebook": { + "name": "notebook", + "type": "*NotebookLibrary", + "description": "The path to a notebook that defines a pipeline and is stored in the\nDatabricks workspace.", + "required": false + }, + "whl": { + "name": "whl", + "type": "string", + "description": "URI of the whl to be installed.", + "required": false + } + } + }, + "pipelines.PipelinePermission": { + "name": "PipelinePermission", + "package": "pipelines", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PipelinePermissionLevel", + "description": "", + "required": false + } + } + }, + "pipelines.PipelinePermissions": { + "name": "PipelinePermissions", + "package": "pipelines", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]PipelineAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "pipelines.PipelinePermissionsDescription": { + "name": "PipelinePermissionsDescription", + "package": "pipelines", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PipelinePermissionLevel", + "description": "", + "required": false + } + } + }, + "pipelines.PipelinePermissionsRequest": { + "name": "PipelinePermissionsRequest", + "package": "pipelines", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]PipelineAccessControlRequest", + "description": "", + "required": false + } + } + }, + "pipelines.PipelineSpec": { + "name": "PipelineSpec", + "package": "pipelines", + "description": "", + "fields": { + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "Budget policy of this pipeline.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "A catalog in Unity Catalog to publish data from this pipeline to. If\n`target` is specified, tables in this pipeline are published to a\n`target` schema inside `catalog` (for example,\n`catalog`.`target`.`table`). If `target` is not specified, no data is\npublished to Unity Catalog.", + "required": false + }, + "channel": { + "name": "channel", + "type": "string", + "description": "DLT Release Channel that specifies which version to use.", + "required": false + }, + "clusters": { + "name": "clusters", + "type": "[]PipelineCluster", + "description": "Cluster settings for this pipeline deployment.", + "required": false + }, + "configuration": { + "name": "configuration", + "type": "map[string]string", + "description": "String-String configuration for this pipeline execution.", + "required": false + }, + "continuous": { + "name": "continuous", + "type": "bool", + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`.", + "required": false + }, + "deployment": { + "name": "deployment", + "type": "*PipelineDeployment", + "description": "Deployment type of this pipeline.", + "required": false + }, + "development": { + "name": "development", + "type": "bool", + "description": "Whether the pipeline is in Development mode. Defaults to false.", + "required": false + }, + "edition": { + "name": "edition", + "type": "string", + "description": "Pipeline product edition.", + "required": false + }, + "environment": { + "name": "environment", + "type": "*PipelinesEnvironment", + "description": "Environment specification for this pipeline used to install dependencies.", + "required": false + }, + "event_log": { + "name": "event_log", + "type": "*EventLogSpec", + "description": "Event log configuration for this pipeline", + "required": false + }, + "filters": { + "name": "filters", + "type": "*Filters", + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "required": false + }, + "gateway_definition": { + "name": "gateway_definition", + "type": "*IngestionGatewayPipelineDefinition", + "description": "The definition of a gateway pipeline to support change data capture.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Unique identifier for this pipeline.", + "required": false + }, + "ingestion_definition": { + "name": "ingestion_definition", + "type": "*IngestionPipelineDefinition", + "description": "The configuration for a managed ingestion pipeline. These settings cannot\nbe used with the 'libraries', 'schema', 'target', or 'catalog' settings.", + "required": false + }, + "libraries": { + "name": "libraries", + "type": "[]PipelineLibrary", + "description": "Libraries or code needed by this deployment.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Friendly identifier for this pipeline.", + "required": false + }, + "notifications": { + "name": "notifications", + "type": "[]Notifications", + "description": "List of notification settings for this pipeline.", + "required": false + }, + "photon": { + "name": "photon", + "type": "bool", + "description": "Whether Photon is enabled for this pipeline.", + "required": false + }, + "restart_window": { + "name": "restart_window", + "type": "*RestartWindow", + "description": "Restart window of this pipeline.", + "required": false + }, + "root_path": { + "name": "root_path", + "type": "string", + "description": "Root path for this pipeline. This is used as the root directory when\nediting the pipeline in the Databricks user interface and it is added to\nsys.path when executing Python sources during pipeline execution.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The default schema (database) where tables are read from or published to.", + "required": false + }, + "serverless": { + "name": "serverless", + "type": "bool", + "description": "Whether serverless compute is enabled for this pipeline.", + "required": false + }, + "storage": { + "name": "storage", + "type": "string", + "description": "DBFS root directory for storing checkpoints and tables.", + "required": false + }, + "tags": { + "name": "tags", + "type": "map[string]string", + "description": "A map of tags associated with the pipeline. These are forwarded to the\ncluster as cluster tags, and are therefore subject to the same\nlimitations. A maximum of 25 tags can be added to the pipeline.", + "required": false + }, + "target": { + "name": "target", + "type": "string", + "description": "Target schema (database) to add tables in this pipeline to. Exactly one\nof `schema` or `target` must be specified. To publish to Unity Catalog,\nalso specify `catalog`. This legacy field is deprecated for pipeline\ncreation in favor of the `schema` field.", + "required": false + }, + "trigger": { + "name": "trigger", + "type": "*PipelineTrigger", + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "Usage policy of this pipeline.", + "required": false + } + } + }, + "pipelines.PipelineStateInfo": { + "name": "PipelineStateInfo", + "package": "pipelines", + "description": "", + "fields": { + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The unique identifier of the cluster running the pipeline.", + "required": false + }, + "creator_user_name": { + "name": "creator_user_name", + "type": "string", + "description": "The username of the pipeline creator.", + "required": false + }, + "health": { + "name": "health", + "type": "PipelineStateInfoHealth", + "description": "The health of a pipeline.", + "required": false + }, + "latest_updates": { + "name": "latest_updates", + "type": "[]UpdateStateInfo", + "description": "Status of the latest updates for the pipeline. Ordered with the newest\nupdate first.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The user-friendly name of the pipeline.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The unique identifier of the pipeline.", + "required": false + }, + "run_as_user_name": { + "name": "run_as_user_name", + "type": "string", + "description": "The username that the pipeline runs as. This is a read only value derived\nfrom the pipeline owner.", + "required": false + }, + "state": { + "name": "state", + "type": "PipelineState", + "description": "", + "required": false + } + } + }, + "pipelines.PipelineTrigger": { + "name": "PipelineTrigger", + "package": "pipelines", + "description": "", + "fields": { + "cron": { + "name": "cron", + "type": "*CronTrigger", + "description": "", + "required": false + }, + "manual": { + "name": "manual", + "type": "*ManualTrigger", + "description": "", + "required": false + } + } + }, + "pipelines.PipelinesEnvironment": { + "name": "PipelinesEnvironment", + "package": "pipelines", + "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", + "fields": { + "dependencies": { + "name": "dependencies", + "type": "[]string", + "description": "List of pip dependencies, as supported by the version of pip in this\nenvironment. Each dependency is a pip requirement file line\nhttps://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed\ndependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal\nproject path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e", + "required": false + } + } + }, + "pipelines.PostgresCatalogConfig": { + "name": "PostgresCatalogConfig", + "package": "pipelines", + "description": "PG-specific catalog-level configuration parameters", + "fields": { + "slot_config": { + "name": "slot_config", + "type": "*PostgresSlotConfig", + "description": "Optional. The Postgres slot configuration to use for logical replication", + "required": false + } + } + }, + "pipelines.PostgresSlotConfig": { + "name": "PostgresSlotConfig", + "package": "pipelines", + "description": "PostgresSlotConfig contains the configuration for a Postgres logical replication slot", + "fields": { + "publication_name": { + "name": "publication_name", + "type": "string", + "description": "The name of the publication to use for the Postgres source", + "required": false + }, + "slot_name": { + "name": "slot_name", + "type": "string", + "description": "The name of the logical replication slot to use for the Postgres source", + "required": false + } + } + }, + "pipelines.ReportSpec": { + "name": "ReportSpec", + "package": "pipelines", + "description": "", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "string", + "description": "Required. Destination catalog to store table.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "string", + "description": "Required. Destination schema to store table.", + "required": false + }, + "destination_table": { + "name": "destination_table", + "type": "string", + "description": "Required. Destination table name. The pipeline fails if a table with that\nname already exists.", + "required": false + }, + "source_url": { + "name": "source_url", + "type": "string", + "description": "Required. Report URL in the source system.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "*TableSpecificConfig", + "description": "Configuration settings to control the ingestion of tables. These settings\noverride the table_configuration defined in the\nIngestionPipelineDefinition object.", + "required": false + } + } + }, + "pipelines.RestartWindow": { + "name": "RestartWindow", + "package": "pipelines", + "description": "", + "fields": { + "days_of_week": { + "name": "days_of_week", + "type": "[]DayOfWeek", + "description": "Days of week in which the restart is allowed to happen (within a\nfive-hour window starting at start_hour). If not specified all days of\nthe week will be used.", + "required": false + }, + "start_hour": { + "name": "start_hour", + "type": "int", + "description": "An integer between 0 and 23 denoting the start hour for the restart\nwindow in the 24-hour day. Continuous pipeline restart is triggered only\nwithin a five-hour window starting at this hour.", + "required": false + }, + "time_zone_id": { + "name": "time_zone_id", + "type": "string", + "description": "Time zone id of restart window. See\nhttps://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html\nfor details. If not specified, UTC will be used.", + "required": false + } + } + }, + "pipelines.RewindDatasetSpec": { + "name": "RewindDatasetSpec", + "package": "pipelines", + "description": "Configuration for rewinding a specific dataset.", + "fields": { + "cascade": { + "name": "cascade", + "type": "bool", + "description": "Whether to cascade the rewind to dependent datasets. Must be specified.", + "required": false + }, + "identifier": { + "name": "identifier", + "type": "string", + "description": "The identifier of the dataset (e.g., \"main.foo.tbl1\").", + "required": false + }, + "reset_checkpoints": { + "name": "reset_checkpoints", + "type": "bool", + "description": "Whether to reset checkpoints for this dataset.", + "required": false + } + } + }, + "pipelines.RewindSpec": { + "name": "RewindSpec", + "package": "pipelines", + "description": "Information about a rewind being requested for this pipeline or some of the\ndatasets in it.", + "fields": { + "datasets": { + "name": "datasets", + "type": "[]RewindDatasetSpec", + "description": "List of datasets to rewind with specific configuration for each. When not\nspecified, all datasets will be rewound with cascade = true and\nreset_checkpoints = true.", + "required": false + }, + "dry_run": { + "name": "dry_run", + "type": "bool", + "description": "If true, this is a dry run and we should emit the RewindSummary but not\nperform the rewind.", + "required": false + }, + "rewind_timestamp": { + "name": "rewind_timestamp", + "type": "string", + "description": "The base timestamp to rewind to. Must be specified.", + "required": false + } + } + }, + "pipelines.RunAs": { + "name": "RunAs", + "package": "pipelines", + "description": "Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.\n\nOnly `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.", + "fields": { + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field\nrequires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Users can only set this field to\ntheir own email.", + "required": false + } + } + }, + "pipelines.SchemaSpec": { + "name": "SchemaSpec", + "package": "pipelines", + "description": "", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "string", + "description": "Required. Destination catalog to store tables.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "string", + "description": "Required. Destination schema to store tables in. Tables with the same\nname as the source tables are created in this destination schema. The\npipeline fails If a table with the same name already exists.", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "string", + "description": "The source catalog name. Might be optional depending on the type of\nsource.", + "required": false + }, + "source_schema": { + "name": "source_schema", + "type": "string", + "description": "Required. Schema name in the source database.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "*TableSpecificConfig", + "description": "Configuration settings to control the ingestion of tables. These settings\nare applied to all tables in this schema and override the\ntable_configuration defined in the IngestionPipelineDefinition object.", + "required": false + } + } + }, + "pipelines.Sequencing": { + "name": "Sequencing", + "package": "pipelines", + "description": "", + "fields": { + "control_plane_seq_no": { + "name": "control_plane_seq_no", + "type": "int64", + "description": "A sequence number, unique and increasing per pipeline.", + "required": false + }, + "data_plane_id": { + "name": "data_plane_id", + "type": "*DataPlaneId", + "description": "the ID assigned by the data plane.", + "required": false + } + } + }, + "pipelines.SerializedException": { + "name": "SerializedException", + "package": "pipelines", + "description": "", + "fields": { + "class_name": { + "name": "class_name", + "type": "string", + "description": "Runtime class of the exception", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "Exception message", + "required": false + }, + "stack": { + "name": "stack", + "type": "[]StackFrame", + "description": "Stack trace consisting of a list of stack frames", + "required": false + } + } + }, + "pipelines.SourceCatalogConfig": { + "name": "SourceCatalogConfig", + "package": "pipelines", + "description": "SourceCatalogConfig contains catalog-level custom configuration parameters for each source", + "fields": { + "postgres": { + "name": "postgres", + "type": "*PostgresCatalogConfig", + "description": "Postgres-specific catalog-level configuration parameters", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "string", + "description": "Source catalog name", + "required": false + } + } + }, + "pipelines.SourceConfig": { + "name": "SourceConfig", + "package": "pipelines", + "description": "", + "fields": { + "catalog": { + "name": "catalog", + "type": "*SourceCatalogConfig", + "description": "Catalog-level source configuration parameters", + "required": false + } + } + }, + "pipelines.StackFrame": { + "name": "StackFrame", + "package": "pipelines", + "description": "", + "fields": { + "declaring_class": { + "name": "declaring_class", + "type": "string", + "description": "Class from which the method call originated", + "required": false + }, + "file_name": { + "name": "file_name", + "type": "string", + "description": "File where the method is defined", + "required": false + }, + "line_number": { + "name": "line_number", + "type": "int", + "description": "Line from which the method was called", + "required": false + }, + "method_name": { + "name": "method_name", + "type": "string", + "description": "Name of the method which was called", + "required": false + } + } + }, + "pipelines.StartUpdate": { + "name": "StartUpdate", + "package": "pipelines", + "description": "", + "fields": { + "cause": { + "name": "cause", + "type": "StartUpdateCause", + "description": "", + "required": false + }, + "full_refresh": { + "name": "full_refresh", + "type": "bool", + "description": "If true, this update will reset all tables before running.", + "required": false + }, + "full_refresh_selection": { + "name": "full_refresh_selection", + "type": "[]string", + "description": "A list of tables to update with fullRefresh. If both refresh_selection\nand full_refresh_selection are empty, this is a full graph update. Full\nRefresh on a table means that the states of the table will be reset\nbefore the refresh.", + "required": false + }, + "refresh_selection": { + "name": "refresh_selection", + "type": "[]string", + "description": "A list of tables to update without fullRefresh. If both refresh_selection\nand full_refresh_selection are empty, this is a full graph update. Full\nRefresh on a table means that the states of the table will be reset\nbefore the refresh.", + "required": false + }, + "rewind_spec": { + "name": "rewind_spec", + "type": "*RewindSpec", + "description": "The information about the requested rewind operation. If specified this\nis a rewind mode update.", + "required": false + }, + "validate_only": { + "name": "validate_only", + "type": "bool", + "description": "If true, this update only validates the correctness of pipeline source\ncode but does not materialize or publish any datasets.", + "required": false + } + } + }, + "pipelines.StartUpdateResponse": { + "name": "StartUpdateResponse", + "package": "pipelines", + "description": "", + "fields": { + "update_id": { + "name": "update_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "pipelines.TableSpec": { + "name": "TableSpec", + "package": "pipelines", + "description": "", + "fields": { + "destination_catalog": { + "name": "destination_catalog", + "type": "string", + "description": "Required. Destination catalog to store table.", + "required": false + }, + "destination_schema": { + "name": "destination_schema", + "type": "string", + "description": "Required. Destination schema to store table.", + "required": false + }, + "destination_table": { + "name": "destination_table", + "type": "string", + "description": "Optional. Destination table name. The pipeline fails if a table with that\nname already exists. If not set, the source table name is used.", + "required": false + }, + "source_catalog": { + "name": "source_catalog", + "type": "string", + "description": "Source catalog name. Might be optional depending on the type of source.", + "required": false + }, + "source_schema": { + "name": "source_schema", + "type": "string", + "description": "Schema name in the source database. Might be optional depending on the\ntype of source.", + "required": false + }, + "source_table": { + "name": "source_table", + "type": "string", + "description": "Required. Table name in the source database.", + "required": false + }, + "table_configuration": { + "name": "table_configuration", + "type": "*TableSpecificConfig", + "description": "Configuration settings to control the ingestion of tables. These settings\noverride the table_configuration defined in the\nIngestionPipelineDefinition object and the SchemaSpec.", + "required": false + } + } + }, + "pipelines.TableSpecificConfig": { + "name": "TableSpecificConfig", + "package": "pipelines", + "description": "", + "fields": { + "exclude_columns": { + "name": "exclude_columns", + "type": "[]string", + "description": "A list of column names to be excluded for the ingestion. When not\nspecified, include_columns fully controls what columns to be ingested.\nWhen specified, all other columns including future ones will be\nautomatically included for ingestion. This field in mutually exclusive\nwith `include_columns`.", + "required": false + }, + "include_columns": { + "name": "include_columns", + "type": "[]string", + "description": "A list of column names to be included for the ingestion. When not\nspecified, all columns except ones in exclude_columns will be included.\nFuture columns will be automatically included. When specified, all other\nfuture columns will be automatically excluded from ingestion. This field\nin mutually exclusive with `exclude_columns`.", + "required": false + }, + "primary_keys": { + "name": "primary_keys", + "type": "[]string", + "description": "The primary key of the table used to apply changes.", + "required": false + }, + "query_based_connector_config": { + "name": "query_based_connector_config", + "type": "*IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "required": false + }, + "row_filter": { + "name": "row_filter", + "type": "string", + "description": "(Optional, Immutable) The row filter condition to be applied to the\ntable. It must not contain the WHERE keyword, only the actual filter\ncondition. It must be in DBSQL format.", + "required": false + }, + "salesforce_include_formula_fields": { + "name": "salesforce_include_formula_fields", + "type": "bool", + "description": "If true, formula fields defined in the table are included in the\ningestion. This setting is only valid for the Salesforce connector", + "required": false + }, + "scd_type": { + "name": "scd_type", + "type": "TableSpecificConfigScdType", + "description": "The SCD type to use to ingest the table.", + "required": false + }, + "sequence_by": { + "name": "sequence_by", + "type": "[]string", + "description": "The column names specifying the logical order of events in the source\ndata. Spark Declarative Pipelines uses this sequencing to handle change\nevents that arrive out of order.", + "required": false + }, + "workday_report_parameters": { + "name": "workday_report_parameters", + "type": "*IngestionPipelineDefinitionWorkdayReportParameters", + "description": "(Optional) Additional custom parameters for Workday Report", + "required": false + } + } + }, + "pipelines.TableSpecificConfigScdType": { + "name": "TableSpecificConfigScdType", + "package": "pipelines", + "description": "The SCD type to use to ingest the table.", + "fields": {} + }, + "pipelines.Truncation": { + "name": "Truncation", + "package": "pipelines", + "description": "Information about truncations applied to this event.", + "fields": { + "truncated_fields": { + "name": "truncated_fields", + "type": "[]TruncationTruncationDetail", + "description": "List of fields that were truncated from this event. If empty or absent,\nno truncation occurred.", + "required": false + } + } + }, + "pipelines.TruncationTruncationDetail": { + "name": "TruncationTruncationDetail", + "package": "pipelines", + "description": "Details about a specific field that was truncated.", + "fields": { + "field_name": { + "name": "field_name", + "type": "string", + "description": "The name of the truncated field (e.g., \"error\"). Corresponds to field\nnames in PipelineEvent.", + "required": false + } + } + }, + "pipelines.UpdateInfo": { + "name": "UpdateInfo", + "package": "pipelines", + "description": "", + "fields": { + "cause": { + "name": "cause", + "type": "UpdateInfoCause", + "description": "What triggered this update.", + "required": false + }, + "cluster_id": { + "name": "cluster_id", + "type": "string", + "description": "The ID of the cluster that the update is running on.", + "required": false + }, + "config": { + "name": "config", + "type": "*PipelineSpec", + "description": "The pipeline configuration with system defaults applied where unspecified\nby the user. Not returned by ListUpdates.", + "required": false + }, + "creation_time": { + "name": "creation_time", + "type": "int64", + "description": "The time when this update was created.", + "required": false + }, + "full_refresh": { + "name": "full_refresh", + "type": "bool", + "description": "If true, this update will reset all tables before running.", + "required": false + }, + "full_refresh_selection": { + "name": "full_refresh_selection", + "type": "[]string", + "description": "A list of tables to update with fullRefresh. If both refresh_selection\nand full_refresh_selection are empty, this is a full graph update. Full\nRefresh on a table means that the states of the table will be reset\nbefore the refresh.", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "The ID of the pipeline.", + "required": false + }, + "refresh_selection": { + "name": "refresh_selection", + "type": "[]string", + "description": "A list of tables to update without fullRefresh. If both refresh_selection\nand full_refresh_selection are empty, this is a full graph update. Full\nRefresh on a table means that the states of the table will be reset\nbefore the refresh.", + "required": false + }, + "state": { + "name": "state", + "type": "UpdateInfoState", + "description": "The update state.", + "required": false + }, + "update_id": { + "name": "update_id", + "type": "string", + "description": "The ID of this update.", + "required": false + }, + "validate_only": { + "name": "validate_only", + "type": "bool", + "description": "If true, this update only validates the correctness of pipeline source\ncode but does not materialize or publish any datasets.", + "required": false + } + } + }, + "pipelines.UpdateStateInfo": { + "name": "UpdateStateInfo", + "package": "pipelines", + "description": "", + "fields": { + "creation_time": { + "name": "creation_time", + "type": "string", + "description": "", + "required": false + }, + "state": { + "name": "state", + "type": "UpdateStateInfoState", + "description": "", + "required": false + }, + "update_id": { + "name": "update_id", + "type": "string", + "description": "", + "required": false + } + } + }, + "pipelines.WaitGetPipelineIdle": { + "name": "WaitGetPipelineIdle", + "package": "pipelines", + "description": "WaitGetPipelineIdle is a wrapper that calls [PipelinesAPI.WaitGetPipelineIdle] and waits to reach IDLE state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*GetPipelineResponse)) (*GetPipelineResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*GetPipelineResponse)", + "description": "", + "required": false + }, + "pipeline_id": { + "name": "pipeline_id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "pipelines.pipelinesImpl": { + "name": "pipelinesImpl", + "package": "pipelines", + "description": "unexported type that holds implementations of just Pipelines API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "serving.Ai21LabsConfig": { + "name": "Ai21LabsConfig", + "package": "serving", + "description": "", + "fields": { + "ai21labs_api_key": { + "name": "ai21labs_api_key", + "type": "string", + "description": "The Databricks secret key reference for an AI21 Labs API key. If you\nprefer to paste your API key directly, see `ai21labs_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "required": false + }, + "ai21labs_api_key_plaintext": { + "name": "ai21labs_api_key_plaintext", + "type": "string", + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `ai21labs_api_key`. You\nmust provide an API key using one of the following fields:\n`ai21labs_api_key` or `ai21labs_api_key_plaintext`.", + "required": false + } + } + }, + "serving.AiGatewayConfig": { + "name": "AiGatewayConfig", + "package": "serving", + "description": "", + "fields": { + "fallback_config": { + "name": "fallback_config", + "type": "*FallbackConfig", + "description": "Configuration for traffic fallback which auto fallbacks to other served\nentities if the request to a served entity fails with certain error\ncodes, to increase availability.", + "required": false + }, + "guardrails": { + "name": "guardrails", + "type": "*AiGatewayGuardrails", + "description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data\nin requests and responses.", + "required": false + }, + "inference_table_config": { + "name": "inference_table_config", + "type": "*AiGatewayInferenceTableConfig", + "description": "Configuration for payload logging using inference tables. Use these\ntables to monitor and audit data being sent to and received from model\nAPIs and to improve model quality.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "[]AiGatewayRateLimit", + "description": "Configuration for rate limits which can be set to limit endpoint traffic.", + "required": false + }, + "usage_tracking_config": { + "name": "usage_tracking_config", + "type": "*AiGatewayUsageTrackingConfig", + "description": "Configuration to enable usage tracking using system tables. These tables\nallow you to monitor operational usage on endpoints and their associated\ncosts.", + "required": false + } + } + }, + "serving.AiGatewayGuardrailParameters": { + "name": "AiGatewayGuardrailParameters", + "package": "serving", + "description": "", + "fields": { + "invalid_keywords": { + "name": "invalid_keywords", + "type": "[]string", + "description": "List of invalid keywords. AI guardrail uses keyword or string matching to\ndecide if the keyword exists in the request or response content.", + "required": false + }, + "pii": { + "name": "pii", + "type": "*AiGatewayGuardrailPiiBehavior", + "description": "Configuration for guardrail PII filter.", + "required": false + }, + "safety": { + "name": "safety", + "type": "bool", + "description": "Indicates whether the safety filter is enabled.", + "required": false + }, + "valid_topics": { + "name": "valid_topics", + "type": "[]string", + "description": "The list of allowed topics. Given a chat request, this guardrail flags\nthe request if its topic is not in the allowed topics.", + "required": false + } + } + }, + "serving.AiGatewayGuardrailPiiBehavior": { + "name": "AiGatewayGuardrailPiiBehavior", + "package": "serving", + "description": "", + "fields": { + "behavior": { + "name": "behavior", + "type": "AiGatewayGuardrailPiiBehaviorBehavior", + "description": "Configuration for input guardrail filters.", + "required": false + } + } + }, + "serving.AiGatewayGuardrails": { + "name": "AiGatewayGuardrails", + "package": "serving", + "description": "", + "fields": { + "input": { + "name": "input", + "type": "*AiGatewayGuardrailParameters", + "description": "Configuration for input guardrail filters.", + "required": false + }, + "output": { + "name": "output", + "type": "*AiGatewayGuardrailParameters", + "description": "Configuration for output guardrail filters.", + "required": false + } + } + }, + "serving.AiGatewayInferenceTableConfig": { + "name": "AiGatewayInferenceTableConfig", + "package": "serving", + "description": "", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog in Unity Catalog. Required when enabling\ninference tables. NOTE: On update, you have to disable inference table\nfirst in order to change the catalog name.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Indicates whether the inference table is enabled.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema in Unity Catalog. Required when enabling inference\ntables. NOTE: On update, you have to disable inference table first in\norder to change the schema name.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "string", + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you have to\ndisable inference table first in order to change the prefix name.", + "required": false + } + } + }, + "serving.AiGatewayRateLimit": { + "name": "AiGatewayRateLimit", + "package": "serving", + "description": "", + "fields": { + "calls": { + "name": "calls", + "type": "int64", + "description": "Used to specify how many calls are allowed for a key within the\nrenewal_period.", + "required": false + }, + "key": { + "name": "key", + "type": "AiGatewayRateLimitKey", + "description": "Key field for a rate limit. Currently, 'user', 'user_group,\n'service_principal', and 'endpoint' are supported, with 'endpoint' being\nthe default if not specified.", + "required": false + }, + "principal": { + "name": "principal", + "type": "string", + "description": "Principal field for a user, user group, or service principal to apply\nrate limiting to. Accepts a user email, group name, or service principal\napplication ID.", + "required": false + }, + "renewal_period": { + "name": "renewal_period", + "type": "AiGatewayRateLimitRenewalPeriod", + "description": "Renewal period field for a rate limit. Currently, only 'minute' is\nsupported.", + "required": false + }, + "tokens": { + "name": "tokens", + "type": "int64", + "description": "Used to specify how many tokens are allowed for a key within the\nrenewal_period.", + "required": false + } + } + }, + "serving.AiGatewayUsageTrackingConfig": { + "name": "AiGatewayUsageTrackingConfig", + "package": "serving", + "description": "", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable usage tracking.", + "required": false + } + } + }, + "serving.AmazonBedrockConfig": { + "name": "AmazonBedrockConfig", + "package": "serving", + "description": "", + "fields": { + "aws_access_key_id": { + "name": "aws_access_key_id", + "type": "string", + "description": "The Databricks secret key reference for an AWS access key ID with\npermissions to interact with Bedrock services. If you prefer to paste\nyour API key directly, see `aws_access_key_id_plaintext`. You must\nprovide an API key using one of the following fields: `aws_access_key_id`\nor `aws_access_key_id_plaintext`.", + "required": false + }, + "aws_access_key_id_plaintext": { + "name": "aws_access_key_id_plaintext", + "type": "string", + "description": "An AWS access key ID with permissions to interact with Bedrock services\nprovided as a plaintext string. If you prefer to reference your key using\nDatabricks Secrets, see `aws_access_key_id`. You must provide an API key\nusing one of the following fields: `aws_access_key_id` or\n`aws_access_key_id_plaintext`.", + "required": false + }, + "aws_region": { + "name": "aws_region", + "type": "string", + "description": "The AWS region to use. Bedrock has to be enabled there.", + "required": false + }, + "aws_secret_access_key": { + "name": "aws_secret_access_key", + "type": "string", + "description": "The Databricks secret key reference for an AWS secret access key paired\nwith the access key ID, with permissions to interact with Bedrock\nservices. If you prefer to paste your API key directly, see\n`aws_secret_access_key_plaintext`. You must provide an API key using one\nof the following fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "required": false + }, + "aws_secret_access_key_plaintext": { + "name": "aws_secret_access_key_plaintext", + "type": "string", + "description": "An AWS secret access key paired with the access key ID, with permissions\nto interact with Bedrock services provided as a plaintext string. If you\nprefer to reference your key using Databricks Secrets, see\n`aws_secret_access_key`. You must provide an API key using one of the\nfollowing fields: `aws_secret_access_key` or\n`aws_secret_access_key_plaintext`.", + "required": false + }, + "bedrock_provider": { + "name": "bedrock_provider", + "type": "AmazonBedrockConfigBedrockProvider", + "description": "The underlying provider in Amazon Bedrock. Supported values (case\ninsensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "ARN of the instance profile that the external model will use to access\nAWS resources. You must authenticate using an instance profile or access\nkeys. If you prefer to authenticate using access keys, see\n`aws_access_key_id`, `aws_access_key_id_plaintext`,\n`aws_secret_access_key` and `aws_secret_access_key_plaintext`.", + "required": false + } + } + }, + "serving.AnthropicConfig": { + "name": "AnthropicConfig", + "package": "serving", + "description": "", + "fields": { + "anthropic_api_key": { + "name": "anthropic_api_key", + "type": "string", + "description": "The Databricks secret key reference for an Anthropic API key. If you\nprefer to paste your API key directly, see `anthropic_api_key_plaintext`.\nYou must provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + "required": false + }, + "anthropic_api_key_plaintext": { + "name": "anthropic_api_key_plaintext", + "type": "string", + "description": "The Anthropic API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `anthropic_api_key`. You\nmust provide an API key using one of the following fields:\n`anthropic_api_key` or `anthropic_api_key_plaintext`.", + "required": false + } + } + }, + "serving.ApiKeyAuth": { + "name": "ApiKeyAuth", + "package": "serving", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "The name of the API key parameter used for authentication.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The Databricks secret key reference for an API Key. If you prefer to\npaste your token directly, see `value_plaintext`.", + "required": false + }, + "value_plaintext": { + "name": "value_plaintext", + "type": "string", + "description": "The API Key provided as a plaintext string. If you prefer to reference\nyour token using Databricks Secrets, see `value`.", + "required": false + } + } + }, + "serving.AutoCaptureConfigInput": { + "name": "AutoCaptureConfigInput", + "package": "serving", + "description": "", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot\nchange the catalog name if the inference table is already enabled.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Indicates whether the inference table is enabled.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot\nchange the schema name if the inference table is already enabled.", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "string", + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot\nchange the prefix name if the inference table is already enabled.", + "required": false + } + } + }, + "serving.AutoCaptureConfigOutput": { + "name": "AutoCaptureConfigOutput", + "package": "serving", + "description": "", + "fields": { + "catalog_name": { + "name": "catalog_name", + "type": "string", + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot\nchange the catalog name if the inference table is already enabled.", + "required": false + }, + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Indicates whether the inference table is enabled.", + "required": false + }, + "schema_name": { + "name": "schema_name", + "type": "string", + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot\nchange the schema name if the inference table is already enabled.", + "required": false + }, + "state": { + "name": "state", + "type": "*AutoCaptureState", + "description": "", + "required": false + }, + "table_name_prefix": { + "name": "table_name_prefix", + "type": "string", + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot\nchange the prefix name if the inference table is already enabled.", + "required": false + } + } + }, + "serving.AutoCaptureState": { + "name": "AutoCaptureState", + "package": "serving", + "description": "", + "fields": { + "payload_table": { + "name": "payload_table", + "type": "*PayloadTable", + "description": "", + "required": false + } + } + }, + "serving.BearerTokenAuth": { + "name": "BearerTokenAuth", + "package": "serving", + "description": "", + "fields": { + "token": { + "name": "token", + "type": "string", + "description": "The Databricks secret key reference for a token. If you prefer to paste\nyour token directly, see `token_plaintext`.", + "required": false + }, + "token_plaintext": { + "name": "token_plaintext", + "type": "string", + "description": "The token provided as a plaintext string. If you prefer to reference your\ntoken using Databricks Secrets, see `token`.", + "required": false + } + } + }, + "serving.BuildLogsResponse": { + "name": "BuildLogsResponse", + "package": "serving", + "description": "", + "fields": { + "logs": { + "name": "logs", + "type": "string", + "description": "The logs associated with building the served entity's environment.", + "required": false + } + } + }, + "serving.ChatMessage": { + "name": "ChatMessage", + "package": "serving", + "description": "", + "fields": { + "content": { + "name": "content", + "type": "string", + "description": "The content of the message.", + "required": false + }, + "role": { + "name": "role", + "type": "ChatMessageRole", + "description": "The role of the message. One of [system, user, assistant].", + "required": false + } + } + }, + "serving.CohereConfig": { + "name": "CohereConfig", + "package": "serving", + "description": "", + "fields": { + "cohere_api_base": { + "name": "cohere_api_base", + "type": "string", + "description": "This is an optional field to provide a customized base URL for the Cohere\nAPI. If left unspecified, the standard Cohere base URL is used.", + "required": false + }, + "cohere_api_key": { + "name": "cohere_api_key", + "type": "string", + "description": "The Databricks secret key reference for a Cohere API key. If you prefer\nto paste your API key directly, see `cohere_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `cohere_api_key` or\n`cohere_api_key_plaintext`.", + "required": false + }, + "cohere_api_key_plaintext": { + "name": "cohere_api_key_plaintext", + "type": "string", + "description": "The Cohere API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `cohere_api_key`. You\nmust provide an API key using one of the following fields:\n`cohere_api_key` or `cohere_api_key_plaintext`.", + "required": false + } + } + }, + "serving.CreatePtEndpointRequest": { + "name": "CreatePtEndpointRequest", + "package": "serving", + "description": "", + "fields": { + "ai_gateway": { + "name": "ai_gateway", + "type": "*AiGatewayConfig", + "description": "The AI Gateway configuration for the serving endpoint.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The budget policy associated with the endpoint.", + "required": false + }, + "config": { + "name": "config", + "type": "PtEndpointCoreConfig", + "description": "The core config of the serving endpoint.", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*EmailNotifications", + "description": "Email notification settings.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the serving endpoint. This field is required and must be\nunique across a Databricks workspace. An endpoint name can consist of\nalphanumeric characters, dashes, and underscores.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]EndpointTag", + "description": "Tags to be attached to the serving endpoint and automatically propagated\nto billing logs.", + "required": false + } + } + }, + "serving.CreateServingEndpoint": { + "name": "CreateServingEndpoint", + "package": "serving", + "description": "", + "fields": { + "ai_gateway": { + "name": "ai_gateway", + "type": "*AiGatewayConfig", + "description": "The AI Gateway configuration for the serving endpoint. NOTE: External\nmodel, provisioned throughput, and pay-per-token endpoints are fully\nsupported; agent endpoints currently only support inference tables.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The budget policy to be applied to the serving endpoint.", + "required": false + }, + "config": { + "name": "config", + "type": "*EndpointCoreConfigInput", + "description": "The core config of the serving endpoint.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*EmailNotifications", + "description": "Email notification settings.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the serving endpoint. This field is required and must be\nunique across a Databricks workspace. An endpoint name can consist of\nalphanumeric characters, dashes, and underscores.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "[]RateLimit", + "description": "Rate limits to be applied to the serving endpoint. NOTE: this field is\ndeprecated, please use AI Gateway to manage rate limits.", + "required": false + }, + "route_optimized": { + "name": "route_optimized", + "type": "bool", + "description": "Enable route optimization for the serving endpoint.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]EndpointTag", + "description": "Tags to be attached to the serving endpoint and automatically propagated\nto billing logs.", + "required": false + } + } + }, + "serving.CustomProviderConfig": { + "name": "CustomProviderConfig", + "package": "serving", + "description": "Configs needed to create a custom provider model route.", + "fields": { + "api_key_auth": { + "name": "api_key_auth", + "type": "*ApiKeyAuth", + "description": "This is a field to provide API key authentication for the custom provider\nAPI. You can only specify one authentication method.", + "required": false + }, + "bearer_token_auth": { + "name": "bearer_token_auth", + "type": "*BearerTokenAuth", + "description": "This is a field to provide bearer token authentication for the custom\nprovider API. You can only specify one authentication method.", + "required": false + }, + "custom_provider_url": { + "name": "custom_provider_url", + "type": "string", + "description": "This is a field to provide the URL of the custom provider API.", + "required": false + } + } + }, + "serving.DataPlaneInfo": { + "name": "DataPlaneInfo", + "package": "serving", + "description": "Details necessary to query this object's API through the DataPlane APIs.", + "fields": { + "authorization_details": { + "name": "authorization_details", + "type": "string", + "description": "Authorization details as a string.", + "required": false + }, + "endpoint_url": { + "name": "endpoint_url", + "type": "string", + "description": "The URL of the endpoint for this operation in the dataplane.", + "required": false + } + } + }, + "serving.DatabricksModelServingConfig": { + "name": "DatabricksModelServingConfig", + "package": "serving", + "description": "", + "fields": { + "databricks_api_token": { + "name": "databricks_api_token", + "type": "string", + "description": "The Databricks secret key reference for a Databricks API token that\ncorresponds to a user or service principal with Can Query access to the\nmodel serving endpoint pointed to by this external model. If you prefer\nto paste your API key directly, see `databricks_api_token_plaintext`. You\nmust provide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "required": false + }, + "databricks_api_token_plaintext": { + "name": "databricks_api_token_plaintext", + "type": "string", + "description": "The Databricks API token that corresponds to a user or service principal\nwith Can Query access to the model serving endpoint pointed to by this\nexternal model provided as a plaintext string. If you prefer to reference\nyour key using Databricks Secrets, see `databricks_api_token`. You must\nprovide an API key using one of the following fields:\n`databricks_api_token` or `databricks_api_token_plaintext`.", + "required": false + }, + "databricks_workspace_url": { + "name": "databricks_workspace_url", + "type": "string", + "description": "The URL of the Databricks workspace containing the model serving endpoint\npointed to by this external model.", + "required": false + } + } + }, + "serving.DataframeSplitInput": { + "name": "DataframeSplitInput", + "package": "serving", + "description": "", + "fields": { + "columns": { + "name": "columns", + "type": "[]any", + "description": "Columns array for the dataframe", + "required": false + }, + "data": { + "name": "data", + "type": "[]any", + "description": "Data array for the dataframe", + "required": false + }, + "index": { + "name": "index", + "type": "[]int", + "description": "Index array for the dataframe", + "required": false + } + } + }, + "serving.EmailNotifications": { + "name": "EmailNotifications", + "package": "serving", + "description": "", + "fields": { + "on_update_failure": { + "name": "on_update_failure", + "type": "[]string", + "description": "A list of email addresses to be notified when an endpoint fails to update\nits configuration or state.", + "required": false + }, + "on_update_success": { + "name": "on_update_success", + "type": "[]string", + "description": "A list of email addresses to be notified when an endpoint successfully\nupdates its configuration or state.", + "required": false + } + } + }, + "serving.EmbeddingsV1ResponseEmbeddingElement": { + "name": "EmbeddingsV1ResponseEmbeddingElement", + "package": "serving", + "description": "", + "fields": { + "embedding": { + "name": "embedding", + "type": "[]float64", + "description": "The embedding vector", + "required": false + }, + "index": { + "name": "index", + "type": "int", + "description": "The index of the embedding in the response.", + "required": false + }, + "object": { + "name": "object", + "type": "EmbeddingsV1ResponseEmbeddingElementObject", + "description": "This will always be 'embedding'.", + "required": false + } + } + }, + "serving.EndpointCoreConfigInput": { + "name": "EndpointCoreConfigInput", + "package": "serving", + "description": "", + "fields": { + "auto_capture_config": { + "name": "auto_capture_config", + "type": "*AutoCaptureConfigInput", + "description": "Configuration for Inference Tables which automatically logs requests and\nresponses to Unity Catalog. Note: this field is deprecated for creating\nnew provisioned throughput endpoints, or updating existing provisioned\nthroughput endpoints that never have inference table configured; in these\ncases please use AI Gateway to manage inference tables.", + "required": false + }, + "served_entities": { + "name": "served_entities", + "type": "[]ServedEntityInput", + "description": "The list of served entities under the serving endpoint config.", + "required": false + }, + "served_models": { + "name": "served_models", + "type": "[]ServedModelInput", + "description": "(Deprecated, use served_entities instead) The list of served models under\nthe serving endpoint config.", + "required": false + }, + "traffic_config": { + "name": "traffic_config", + "type": "*TrafficConfig", + "description": "The traffic configuration associated with the serving endpoint config.", + "required": false + } + } + }, + "serving.EndpointCoreConfigOutput": { + "name": "EndpointCoreConfigOutput", + "package": "serving", + "description": "", + "fields": { + "auto_capture_config": { + "name": "auto_capture_config", + "type": "*AutoCaptureConfigOutput", + "description": "Configuration for Inference Tables which automatically logs requests and\nresponses to Unity Catalog. Note: this field is deprecated for creating\nnew provisioned throughput endpoints, or updating existing provisioned\nthroughput endpoints that never have inference table configured; in these\ncases please use AI Gateway to manage inference tables.", + "required": false + }, + "config_version": { + "name": "config_version", + "type": "int64", + "description": "The config version that the serving endpoint is currently serving.", + "required": false + }, + "served_entities": { + "name": "served_entities", + "type": "[]ServedEntityOutput", + "description": "The list of served entities under the serving endpoint config.", + "required": false + }, + "served_models": { + "name": "served_models", + "type": "[]ServedModelOutput", + "description": "(Deprecated, use served_entities instead) The list of served models under\nthe serving endpoint config.", + "required": false + }, + "traffic_config": { + "name": "traffic_config", + "type": "*TrafficConfig", + "description": "The traffic configuration associated with the serving endpoint config.", + "required": false + } + } + }, + "serving.EndpointCoreConfigSummary": { + "name": "EndpointCoreConfigSummary", + "package": "serving", + "description": "", + "fields": { + "served_entities": { + "name": "served_entities", + "type": "[]ServedEntitySpec", + "description": "The list of served entities under the serving endpoint config.", + "required": false + }, + "served_models": { + "name": "served_models", + "type": "[]ServedModelSpec", + "description": "(Deprecated, use served_entities instead) The list of served models under\nthe serving endpoint config.", + "required": false + } + } + }, + "serving.EndpointPendingConfig": { + "name": "EndpointPendingConfig", + "package": "serving", + "description": "", + "fields": { + "auto_capture_config": { + "name": "auto_capture_config", + "type": "*AutoCaptureConfigOutput", + "description": "Configuration for Inference Tables which automatically logs requests and\nresponses to Unity Catalog. Note: this field is deprecated for creating\nnew provisioned throughput endpoints, or updating existing provisioned\nthroughput endpoints that never have inference table configured; in these\ncases please use AI Gateway to manage inference tables.", + "required": false + }, + "config_version": { + "name": "config_version", + "type": "int", + "description": "The config version that the serving endpoint is currently serving.", + "required": false + }, + "served_entities": { + "name": "served_entities", + "type": "[]ServedEntityOutput", + "description": "The list of served entities belonging to the last issued update to the\nserving endpoint.", + "required": false + }, + "served_models": { + "name": "served_models", + "type": "[]ServedModelOutput", + "description": "(Deprecated, use served_entities instead) The list of served models\nbelonging to the last issued update to the serving endpoint.", + "required": false + }, + "start_time": { + "name": "start_time", + "type": "int64", + "description": "The timestamp when the update to the pending config started.", + "required": false + }, + "traffic_config": { + "name": "traffic_config", + "type": "*TrafficConfig", + "description": "The traffic config defining how invocations to the serving endpoint\nshould be routed.", + "required": false + } + } + }, + "serving.EndpointState": { + "name": "EndpointState", + "package": "serving", + "description": "", + "fields": { + "config_update": { + "name": "config_update", + "type": "EndpointStateConfigUpdate", + "description": "The state of an endpoint's config update. This informs the user if the\npending_config is in progress, if the update failed, or if there is no\nupdate in progress. Note that if the endpoint's config_update state value\nis IN_PROGRESS, another update can not be made until the update completes\nor fails.", + "required": false + }, + "ready": { + "name": "ready", + "type": "EndpointStateReady", + "description": "The state of an endpoint, indicating whether or not the endpoint is\nqueryable. An endpoint is READY if all of the served entities in its\nactive configuration are ready. If any of the actively served entities\nare in a non-ready state, the endpoint state will be NOT_READY.", + "required": false + } + } + }, + "serving.EndpointTag": { + "name": "EndpointTag", + "package": "serving", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Key field for a serving endpoint tag.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "Optional value field for a serving endpoint tag.", + "required": false + } + } + }, + "serving.EndpointTags": { + "name": "EndpointTags", + "package": "serving", + "description": "", + "fields": { + "tags": { + "name": "tags", + "type": "[]EndpointTag", + "description": "", + "required": false + } + } + }, + "serving.ExternalFunctionRequest": { + "name": "ExternalFunctionRequest", + "package": "serving", + "description": "Simple Proto message for testing", + "fields": { + "connection_name": { + "name": "connection_name", + "type": "string", + "description": "The connection name to use. This is required to identify the external\nconnection.", + "required": false + }, + "headers": { + "name": "headers", + "type": "string", + "description": "Additional headers for the request. If not provided, only auth headers\nfrom connections would be passed.", + "required": false + }, + "json": { + "name": "json", + "type": "string", + "description": "The JSON payload to send in the request body.", + "required": false + }, + "method": { + "name": "method", + "type": "ExternalFunctionRequestHttpMethod", + "description": "The HTTP method to use (e.g., 'GET', 'POST').", + "required": false + }, + "params": { + "name": "params", + "type": "string", + "description": "Query parameters for the request.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The relative path for the API endpoint. This is required.", + "required": false + } + } + }, + "serving.ExternalModel": { + "name": "ExternalModel", + "package": "serving", + "description": "", + "fields": { + "ai21labs_config": { + "name": "ai21labs_config", + "type": "*Ai21LabsConfig", + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", + "required": false + }, + "amazon_bedrock_config": { + "name": "amazon_bedrock_config", + "type": "*AmazonBedrockConfig", + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", + "required": false + }, + "anthropic_config": { + "name": "anthropic_config", + "type": "*AnthropicConfig", + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "required": false + }, + "cohere_config": { + "name": "cohere_config", + "type": "*CohereConfig", + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "required": false + }, + "custom_provider_config": { + "name": "custom_provider_config", + "type": "*CustomProviderConfig", + "description": "Custom Provider Config. Only required if the provider is 'custom'.", + "required": false + }, + "databricks_model_serving_config": { + "name": "databricks_model_serving_config", + "type": "*DatabricksModelServingConfig", + "description": "Databricks Model Serving Config. Only required if the provider is\n'databricks-model-serving'.", + "required": false + }, + "google_cloud_vertex_ai_config": { + "name": "google_cloud_vertex_ai_config", + "type": "*GoogleCloudVertexAiConfig", + "description": "Google Cloud Vertex AI Config. Only required if the provider is\n'google-cloud-vertex-ai'.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the external model.", + "required": false + }, + "openai_config": { + "name": "openai_config", + "type": "*OpenAiConfig", + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "required": false + }, + "palm_config": { + "name": "palm_config", + "type": "*PaLmConfig", + "description": "PaLM Config. Only required if the provider is 'palm'.", + "required": false + }, + "provider": { + "name": "provider", + "type": "ExternalModelProvider", + "description": "The name of the provider for the external model. Currently, the supported\nproviders are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere',\n'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', 'palm',\nand 'custom'.", + "required": false + }, + "task": { + "name": "task", + "type": "string", + "description": "The task type of the external model.", + "required": false + } + } + }, + "serving.ExternalModelUsageElement": { + "name": "ExternalModelUsageElement", + "package": "serving", + "description": "", + "fields": { + "completion_tokens": { + "name": "completion_tokens", + "type": "int", + "description": "The number of tokens in the chat/completions response.", + "required": false + }, + "prompt_tokens": { + "name": "prompt_tokens", + "type": "int", + "description": "The number of tokens in the prompt.", + "required": false + }, + "total_tokens": { + "name": "total_tokens", + "type": "int", + "description": "The total number of tokens in the prompt and response.", + "required": false + } + } + }, + "serving.FallbackConfig": { + "name": "FallbackConfig", + "package": "serving", + "description": "", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "Whether to enable traffic fallback. When a served entity in the serving\nendpoint returns specific error codes (e.g. 500), the request will\nautomatically be round-robin attempted with other served entities in the\nsame endpoint, following the order of served entity list, until a\nsuccessful response is returned. If all attempts fail, return the last\nresponse with the error code.", + "required": false + } + } + }, + "serving.FoundationModel": { + "name": "FoundationModel", + "package": "serving", + "description": "All fields are not sensitive as they are hard-coded in the system and made\navailable to customers.", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "", + "required": false + }, + "docs": { + "name": "docs", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.GetServingEndpointPermissionLevelsResponse": { + "name": "GetServingEndpointPermissionLevelsResponse", + "package": "serving", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]ServingEndpointPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "serving.GoogleCloudVertexAiConfig": { + "name": "GoogleCloudVertexAiConfig", + "package": "serving", + "description": "", + "fields": { + "private_key": { + "name": "private_key", + "type": "string", + "description": "The Databricks secret key reference for a private key for the service\naccount which has access to the Google Cloud Vertex AI Service. See [Best\npractices for managing service account keys]. If you prefer to paste your\nAPI key directly, see `private_key_plaintext`. You must provide an API\nkey using one of the following fields: `private_key` or\n`private_key_plaintext`\n\n[Best practices for managing service account keys]:\nhttps://cloud.google.com/iam/docs/best-practices-for-managing-service-accou...", + "required": false + }, + "private_key_plaintext": { + "name": "private_key_plaintext", + "type": "string", + "description": "The private key for the service account which has access to the Google\nCloud Vertex AI Service provided as a plaintext secret. See [Best\npractices for managing service account keys]. If you prefer to reference\nyour key using Databricks Secrets, see `private_key`. You must provide an\nAPI key using one of the following fields: `private_key` or\n`private_key_plaintext`.\n\n[Best practices for managing service account keys]:\nhttps://cloud.google.com/iam/docs/best-practices-for-managing-service-accou...", + "required": false + }, + "project_id": { + "name": "project_id", + "type": "string", + "description": "This is the Google Cloud project id that the service account is\nassociated with.", + "required": false + }, + "region": { + "name": "region", + "type": "string", + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported\nregions] for more details. Some models are only available in specific\nregions.\n\n[supported regions]:\nhttps://cloud.google.com/vertex-ai/docs/general/locations", + "required": false + } + } + }, + "serving.ListEndpointsResponse": { + "name": "ListEndpointsResponse", + "package": "serving", + "description": "", + "fields": { + "endpoints": { + "name": "endpoints", + "type": "[]ServingEndpoint", + "description": "The list of endpoints.", + "required": false + } + } + }, + "serving.ModelDataPlaneInfo": { + "name": "ModelDataPlaneInfo", + "package": "serving", + "description": "A representation of all DataPlaneInfo for operations that can be done on a\nmodel through Data Plane APIs.", + "fields": { + "query_info": { + "name": "query_info", + "type": "*DataPlaneInfo", + "description": "Information required to query DataPlane API 'query' endpoint.", + "required": false + } + } + }, + "serving.OpenAiConfig": { + "name": "OpenAiConfig", + "package": "serving", + "description": "Configs needed to create an OpenAI model route.", + "fields": { + "microsoft_entra_client_id": { + "name": "microsoft_entra_client_id", + "type": "string", + "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Client ID.", + "required": false + }, + "microsoft_entra_client_secret": { + "name": "microsoft_entra_client_secret", + "type": "string", + "description": "The Databricks secret key reference for a client secret used for\nMicrosoft Entra ID authentication. If you prefer to paste your client\nsecret directly, see `microsoft_entra_client_secret_plaintext`. You must\nprovide an API key using one of the following fields:\n`microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "required": false + }, + "microsoft_entra_client_secret_plaintext": { + "name": "microsoft_entra_client_secret_plaintext", + "type": "string", + "description": "The client secret used for Microsoft Entra ID authentication provided as\na plaintext string. If you prefer to reference your key using Databricks\nSecrets, see `microsoft_entra_client_secret`. You must provide an API key\nusing one of the following fields: `microsoft_entra_client_secret` or\n`microsoft_entra_client_secret_plaintext`.", + "required": false + }, + "microsoft_entra_tenant_id": { + "name": "microsoft_entra_tenant_id", + "type": "string", + "description": "This field is only required for Azure AD OpenAI and is the Microsoft\nEntra Tenant ID.", + "required": false + }, + "openai_api_base": { + "name": "openai_api_base", + "type": "string", + "description": "This is a field to provide a customized base URl for the OpenAI API. For\nAzure OpenAI, this field is required, and is the base URL for the Azure\nOpenAI API service provided by Azure. For other OpenAI API types, this\nfield is optional, and if left unspecified, the standard OpenAI base URL\nis used.", + "required": false + }, + "openai_api_key": { + "name": "openai_api_key", + "type": "string", + "description": "The Databricks secret key reference for an OpenAI API key using the\nOpenAI or Azure service. If you prefer to paste your API key directly,\nsee `openai_api_key_plaintext`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + "required": false + }, + "openai_api_key_plaintext": { + "name": "openai_api_key_plaintext", + "type": "string", + "description": "The OpenAI API key using the OpenAI or Azure service provided as a\nplaintext string. If you prefer to reference your key using Databricks\nSecrets, see `openai_api_key`. You must provide an API key using one of\nthe following fields: `openai_api_key` or `openai_api_key_plaintext`.", + "required": false + }, + "openai_api_type": { + "name": "openai_api_type", + "type": "string", + "description": "This is an optional field to specify the type of OpenAI API to use. For\nAzure OpenAI, this field is required, and adjust this parameter to\nrepresent the preferred security access validation protocol. For access\ntoken validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.", + "required": false + }, + "openai_api_version": { + "name": "openai_api_version", + "type": "string", + "description": "This is an optional field to specify the OpenAI API version. For Azure\nOpenAI, this field is required, and is the version of the Azure OpenAI\nservice to utilize, specified by a date.", + "required": false + }, + "openai_deployment_name": { + "name": "openai_deployment_name", + "type": "string", + "description": "This field is only required for Azure OpenAI and is the name of the\ndeployment resource for the Azure OpenAI service.", + "required": false + }, + "openai_organization": { + "name": "openai_organization", + "type": "string", + "description": "This is an optional field to specify the organization in OpenAI or Azure\nOpenAI.", + "required": false + } + } + }, + "serving.PaLmConfig": { + "name": "PaLmConfig", + "package": "serving", + "description": "", + "fields": { + "palm_api_key": { + "name": "palm_api_key", + "type": "string", + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to\npaste your API key directly, see `palm_api_key_plaintext`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "required": false + }, + "palm_api_key_plaintext": { + "name": "palm_api_key_plaintext", + "type": "string", + "description": "The PaLM API key provided as a plaintext string. If you prefer to\nreference your key using Databricks Secrets, see `palm_api_key`. You must\nprovide an API key using one of the following fields: `palm_api_key` or\n`palm_api_key_plaintext`.", + "required": false + } + } + }, + "serving.PatchServingEndpointTags": { + "name": "PatchServingEndpointTags", + "package": "serving", + "description": "", + "fields": { + "add_tags": { + "name": "add_tags", + "type": "[]EndpointTag", + "description": "List of endpoint tags to add", + "required": false + }, + "delete_tags": { + "name": "delete_tags", + "type": "[]string", + "description": "List of tag keys to delete", + "required": false + } + } + }, + "serving.PayloadTable": { + "name": "PayloadTable", + "package": "serving", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + }, + "status": { + "name": "status", + "type": "string", + "description": "", + "required": false + }, + "status_message": { + "name": "status_message", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.PtEndpointCoreConfig": { + "name": "PtEndpointCoreConfig", + "package": "serving", + "description": "", + "fields": { + "served_entities": { + "name": "served_entities", + "type": "[]PtServedModel", + "description": "The list of served entities under the serving endpoint config.", + "required": false + }, + "traffic_config": { + "name": "traffic_config", + "type": "*TrafficConfig", + "description": "", + "required": false + } + } + }, + "serving.PtServedModel": { + "name": "PtServedModel", + "package": "serving", + "description": "", + "fields": { + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "The name of the entity to be served. The entity may be a model in the\nDatabricks Model Registry, a model in the Unity Catalog (UC), or a\nfunction of type FEATURE_SPEC in the UC. If it is a UC object, the full\nname of the object should be given in the form of\n**catalog_name.schema_name.model_name**.", + "required": false + }, + "entity_version": { + "name": "entity_version", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of a served entity. It must be unique across an endpoint. A\nserved entity name can consist of alphanumeric characters, dashes, and\nunderscores. If not specified for an external model, this field defaults\nto external_model.name, with '.' and ':' replaced with '-', and if not\nspecified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "int64", + "description": "The number of model units to be provisioned.", + "required": false + } + } + }, + "serving.PutAiGatewayRequest": { + "name": "PutAiGatewayRequest", + "package": "serving", + "description": "", + "fields": { + "fallback_config": { + "name": "fallback_config", + "type": "*FallbackConfig", + "description": "Configuration for traffic fallback which auto fallbacks to other served\nentities if the request to a served entity fails with certain error\ncodes, to increase availability.", + "required": false + }, + "guardrails": { + "name": "guardrails", + "type": "*AiGatewayGuardrails", + "description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data\nin requests and responses.", + "required": false + }, + "inference_table_config": { + "name": "inference_table_config", + "type": "*AiGatewayInferenceTableConfig", + "description": "Configuration for payload logging using inference tables. Use these\ntables to monitor and audit data being sent to and received from model\nAPIs and to improve model quality.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "[]AiGatewayRateLimit", + "description": "Configuration for rate limits which can be set to limit endpoint traffic.", + "required": false + }, + "usage_tracking_config": { + "name": "usage_tracking_config", + "type": "*AiGatewayUsageTrackingConfig", + "description": "Configuration to enable usage tracking using system tables. These tables\nallow you to monitor operational usage on endpoints and their associated\ncosts.", + "required": false + } + } + }, + "serving.PutAiGatewayResponse": { + "name": "PutAiGatewayResponse", + "package": "serving", + "description": "", + "fields": { + "fallback_config": { + "name": "fallback_config", + "type": "*FallbackConfig", + "description": "Configuration for traffic fallback which auto fallbacks to other served\nentities if the request to a served entity fails with certain error\ncodes, to increase availability.", + "required": false + }, + "guardrails": { + "name": "guardrails", + "type": "*AiGatewayGuardrails", + "description": "Configuration for AI Guardrails to prevent unwanted data and unsafe data\nin requests and responses.", + "required": false + }, + "inference_table_config": { + "name": "inference_table_config", + "type": "*AiGatewayInferenceTableConfig", + "description": "Configuration for payload logging using inference tables. Use these\ntables to monitor and audit data being sent to and received from model\nAPIs and to improve model quality.", + "required": false + }, + "rate_limits": { + "name": "rate_limits", + "type": "[]AiGatewayRateLimit", + "description": "Configuration for rate limits which can be set to limit endpoint traffic.", + "required": false + }, + "usage_tracking_config": { + "name": "usage_tracking_config", + "type": "*AiGatewayUsageTrackingConfig", + "description": "Configuration to enable usage tracking using system tables. These tables\nallow you to monitor operational usage on endpoints and their associated\ncosts.", + "required": false + } + } + }, + "serving.PutRequest": { + "name": "PutRequest", + "package": "serving", + "description": "", + "fields": { + "rate_limits": { + "name": "rate_limits", + "type": "[]RateLimit", + "description": "The list of endpoint rate limits.", + "required": false + } + } + }, + "serving.PutResponse": { + "name": "PutResponse", + "package": "serving", + "description": "", + "fields": { + "rate_limits": { + "name": "rate_limits", + "type": "[]RateLimit", + "description": "The list of endpoint rate limits.", + "required": false + } + } + }, + "serving.QueryEndpointInput": { + "name": "QueryEndpointInput", + "package": "serving", + "description": "", + "fields": { + "client_request_id": { + "name": "client_request_id", + "type": "string", + "description": "Optional user-provided request identifier that will be recorded in the\ninference table and the usage tracking table.", + "required": false + }, + "dataframe_records": { + "name": "dataframe_records", + "type": "[]any", + "description": "Pandas Dataframe input in the records orientation.", + "required": false + }, + "dataframe_split": { + "name": "dataframe_split", + "type": "*DataframeSplitInput", + "description": "Pandas Dataframe input in the split orientation.", + "required": false + }, + "extra_params": { + "name": "extra_params", + "type": "map[string]string", + "description": "The extra parameters field used ONLY for __completions, chat,__ and\n__embeddings external \u0026 foundation model__ serving endpoints. This is a\nmap of strings and should only be used with other external/foundation\nmodel query fields.", + "required": false + }, + "input": { + "name": "input", + "type": "any", + "description": "The input string (or array of strings) field used ONLY for __embeddings\nexternal \u0026 foundation model__ serving endpoints and is the only field\n(along with extra_params if needed) used by embeddings queries.", + "required": false + }, + "inputs": { + "name": "inputs", + "type": "any", + "description": "Tensor-based input in columnar format.", + "required": false + }, + "instances": { + "name": "instances", + "type": "[]any", + "description": "Tensor-based input in row format.", + "required": false + }, + "max_tokens": { + "name": "max_tokens", + "type": "int", + "description": "The max tokens field used ONLY for __completions__ and __chat external \u0026\nfoundation model__ serving endpoints. This is an integer and should only\nbe used with other chat/completions query fields.", + "required": false + }, + "messages": { + "name": "messages", + "type": "[]ChatMessage", + "description": "The messages field used ONLY for __chat external \u0026 foundation model__\nserving endpoints. This is an array of ChatMessage objects and should\nonly be used with other chat query fields.", + "required": false + }, + "n": { + "name": "n", + "type": "int", + "description": "The n (number of candidates) field used ONLY for __completions__ and\n__chat external \u0026 foundation model__ serving endpoints. This is an\ninteger between 1 and 5 with a default of 1 and should only be used with\nother chat/completions query fields.", + "required": false + }, + "prompt": { + "name": "prompt", + "type": "any", + "description": "The prompt string (or array of strings) field used ONLY for __completions\nexternal \u0026 foundation model__ serving endpoints and should only be used\nwith other completions query fields.", + "required": false + }, + "stop": { + "name": "stop", + "type": "[]string", + "description": "The stop sequences field used ONLY for __completions__ and __chat\nexternal \u0026 foundation model__ serving endpoints. This is a list of\nstrings and should only be used with other chat/completions query fields.", + "required": false + }, + "stream": { + "name": "stream", + "type": "bool", + "description": "The stream field used ONLY for __completions__ and __chat external \u0026\nfoundation model__ serving endpoints. This is a boolean defaulting to\nfalse and should only be used with other chat/completions query fields.", + "required": false + }, + "temperature": { + "name": "temperature", + "type": "float64", + "description": "The temperature field used ONLY for __completions__ and __chat external \u0026\nfoundation model__ serving endpoints. This is a float between 0.0 and 2.0\nwith a default of 1.0 and should only be used with other chat/completions\nquery fields.", + "required": false + }, + "usage_context": { + "name": "usage_context", + "type": "map[string]string", + "description": "Optional user-provided context that will be recorded in the usage\ntracking table.", + "required": false + } + } + }, + "serving.QueryEndpointResponse": { + "name": "QueryEndpointResponse", + "package": "serving", + "description": "", + "fields": { + "choices": { + "name": "choices", + "type": "[]V1ResponseChoiceElement", + "description": "The list of choices returned by the __chat or completions\nexternal/foundation model__ serving endpoint.", + "required": false + }, + "created": { + "name": "created", + "type": "int64", + "description": "The timestamp in seconds when the query was created in Unix time returned\nby a __completions or chat external/foundation model__ serving endpoint.", + "required": false + }, + "data": { + "name": "data", + "type": "[]EmbeddingsV1ResponseEmbeddingElement", + "description": "The list of the embeddings returned by the __embeddings\nexternal/foundation model__ serving endpoint.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The ID of the query that may be returned by a __completions or chat\nexternal/foundation model__ serving endpoint.", + "required": false + }, + "model": { + "name": "model", + "type": "string", + "description": "The name of the __external/foundation model__ used for querying. This is\nthe name of the model that was specified in the endpoint config.", + "required": false + }, + "object": { + "name": "object", + "type": "QueryEndpointResponseObject", + "description": "The type of object returned by the __external/foundation model__ serving\nendpoint, one of [text_completion, chat.completion, list (of\nembeddings)].", + "required": false + }, + "predictions": { + "name": "predictions", + "type": "[]any", + "description": "The predictions returned by the serving endpoint.", + "required": false + }, + "usage": { + "name": "usage", + "type": "*ExternalModelUsageElement", + "description": "The usage object that may be returned by the __external/foundation\nmodel__ serving endpoint. This contains information about the number of\ntokens used in the prompt and response.", + "required": false + } + } + }, + "serving.RateLimit": { + "name": "RateLimit", + "package": "serving", + "description": "", + "fields": { + "calls": { + "name": "calls", + "type": "int64", + "description": "Used to specify how many calls are allowed for a key within the\nrenewal_period.", + "required": false + }, + "key": { + "name": "key", + "type": "RateLimitKey", + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and\n'endpoint' are supported, with 'endpoint' being the default if not\nspecified.", + "required": false + }, + "renewal_period": { + "name": "renewal_period", + "type": "RateLimitRenewalPeriod", + "description": "Renewal period field for a serving endpoint rate limit. Currently, only\n'minute' is supported.", + "required": false + } + } + }, + "serving.Route": { + "name": "Route", + "package": "serving", + "description": "", + "fields": { + "served_entity_name": { + "name": "served_entity_name", + "type": "string", + "description": "", + "required": false + }, + "served_model_name": { + "name": "served_model_name", + "type": "string", + "description": "The name of the served model this route configures traffic for.", + "required": false + }, + "traffic_percentage": { + "name": "traffic_percentage", + "type": "int", + "description": "The percentage of endpoint traffic to send to this route. It must be an\ninteger between 0 and 100 inclusive.", + "required": false + } + } + }, + "serving.ServedEntityInput": { + "name": "ServedEntityInput", + "package": "serving", + "description": "", + "fields": { + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "The name of the entity to be served. The entity may be a model in the\nDatabricks Model Registry, a model in the Unity Catalog (UC), or a\nfunction of type FEATURE_SPEC in the UC. If it is a UC object, the full\nname of the object should be given in the form of\n**catalog_name.schema_name.model_name**.", + "required": false + }, + "entity_version": { + "name": "entity_version", + "type": "string", + "description": "", + "required": false + }, + "environment_vars": { + "name": "environment_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs used for serving this entity. Note: this is an\nexperimental feature and subject to change. Example entity environment\nvariables that refer to Databricks secrets: `{\"OPENAI_API_KEY\":\n\"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\":\n\"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "external_model": { + "name": "external_model", + "type": "*ExternalModel", + "description": "The external model to be served. NOTE: Only one of external_model and\n(entity_name, entity_version, workload_size, workload_type, and\nscale_to_zero_enabled) can be specified with the latter set being used\nfor custom model serving for a Databricks registered model. For an\nexisting endpoint with external_model, it cannot be updated to an\nendpoint without external_model. If the endpoint is created without\nexternal_model, users cannot update it to add external_model later. The\ntask type of all ex...", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "ARN of the instance profile that the served entity uses to access AWS\nresources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "int", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do\nnot use if workload_size is specified.", + "required": false + }, + "max_provisioned_throughput": { + "name": "max_provisioned_throughput", + "type": "int", + "description": "The maximum tokens per second that the endpoint can scale up to.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "int", + "description": "The minimum provisioned concurrency that the endpoint can scale down to.\nDo not use if workload_size is specified.", + "required": false + }, + "min_provisioned_throughput": { + "name": "min_provisioned_throughput", + "type": "int", + "description": "The minimum tokens per second that the endpoint can scale down to.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of a served entity. It must be unique across an endpoint. A\nserved entity name can consist of alphanumeric characters, dashes, and\nunderscores. If not specified for an external model, this field defaults\nto external_model.name, with '.' and ':' replaced with '-', and if not\nspecified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "int64", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to\nzero.", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "string", + "description": "The workload size of the served entity. The workload size corresponds to\na range of provisioned concurrency that the compute autoscales between. A\nsingle unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency),\n\"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64\nprovisioned concurrency). Additional custom workload sizes can also be\nused when available in the workspace. If scale-to-zero is enabled, the\nlowe...", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "ServingModelWorkloadType", + "description": "The workload type of the served entity. The workload type selects which\ntype of compute to use in the endpoint. The default value for this\nparameter is \"CPU\". For deep learning workloads, GPU acceleration is\navailable by selecting workload types like GPU_SMALL and others. See the\navailable [GPU types].\n\n[GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types", + "required": false + } + } + }, + "serving.ServedEntityOutput": { + "name": "ServedEntityOutput", + "package": "serving", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "", + "required": false + }, + "creator": { + "name": "creator", + "type": "string", + "description": "", + "required": false + }, + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "The name of the entity to be served. The entity may be a model in the\nDatabricks Model Registry, a model in the Unity Catalog (UC), or a\nfunction of type FEATURE_SPEC in the UC. If it is a UC object, the full\nname of the object should be given in the form of\n**catalog_name.schema_name.model_name**.", + "required": false + }, + "entity_version": { + "name": "entity_version", + "type": "string", + "description": "", + "required": false + }, + "environment_vars": { + "name": "environment_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs used for serving this entity. Note: this is an\nexperimental feature and subject to change. Example entity environment\nvariables that refer to Databricks secrets: `{\"OPENAI_API_KEY\":\n\"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\":\n\"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "external_model": { + "name": "external_model", + "type": "*ExternalModel", + "description": "The external model to be served. NOTE: Only one of external_model and\n(entity_name, entity_version, workload_size, workload_type, and\nscale_to_zero_enabled) can be specified with the latter set being used\nfor custom model serving for a Databricks registered model. For an\nexisting endpoint with external_model, it cannot be updated to an\nendpoint without external_model. If the endpoint is created without\nexternal_model, users cannot update it to add external_model later. The\ntask type of all ex...", + "required": false + }, + "foundation_model": { + "name": "foundation_model", + "type": "*FoundationModel", + "description": "", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "ARN of the instance profile that the served entity uses to access AWS\nresources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "int", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do\nnot use if workload_size is specified.", + "required": false + }, + "max_provisioned_throughput": { + "name": "max_provisioned_throughput", + "type": "int", + "description": "The maximum tokens per second that the endpoint can scale up to.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "int", + "description": "The minimum provisioned concurrency that the endpoint can scale down to.\nDo not use if workload_size is specified.", + "required": false + }, + "min_provisioned_throughput": { + "name": "min_provisioned_throughput", + "type": "int", + "description": "The minimum tokens per second that the endpoint can scale down to.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of a served entity. It must be unique across an endpoint. A\nserved entity name can consist of alphanumeric characters, dashes, and\nunderscores. If not specified for an external model, this field defaults\nto external_model.name, with '.' and ':' replaced with '-', and if not\nspecified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "int64", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to\nzero.", + "required": false + }, + "state": { + "name": "state", + "type": "*ServedModelState", + "description": "", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "string", + "description": "The workload size of the served entity. The workload size corresponds to\na range of provisioned concurrency that the compute autoscales between. A\nsingle unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency),\n\"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64\nprovisioned concurrency). Additional custom workload sizes can also be\nused when available in the workspace. If scale-to-zero is enabled, the\nlowe...", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "ServingModelWorkloadType", + "description": "The workload type of the served entity. The workload type selects which\ntype of compute to use in the endpoint. The default value for this\nparameter is \"CPU\". For deep learning workloads, GPU acceleration is\navailable by selecting workload types like GPU_SMALL and others. See the\navailable [GPU types].\n\n[GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types", + "required": false + } + } + }, + "serving.ServedEntitySpec": { + "name": "ServedEntitySpec", + "package": "serving", + "description": "", + "fields": { + "entity_name": { + "name": "entity_name", + "type": "string", + "description": "", + "required": false + }, + "entity_version": { + "name": "entity_version", + "type": "string", + "description": "", + "required": false + }, + "external_model": { + "name": "external_model", + "type": "*ExternalModel", + "description": "", + "required": false + }, + "foundation_model": { + "name": "foundation_model", + "type": "*FoundationModel", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.ServedModelInput": { + "name": "ServedModelInput", + "package": "serving", + "description": "", + "fields": { + "environment_vars": { + "name": "environment_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs used for serving this entity. Note: this is an\nexperimental feature and subject to change. Example entity environment\nvariables that refer to Databricks secrets: `{\"OPENAI_API_KEY\":\n\"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\":\n\"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "ARN of the instance profile that the served entity uses to access AWS\nresources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "int", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do\nnot use if workload_size is specified.", + "required": false + }, + "max_provisioned_throughput": { + "name": "max_provisioned_throughput", + "type": "int", + "description": "The maximum tokens per second that the endpoint can scale up to.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "int", + "description": "The minimum provisioned concurrency that the endpoint can scale down to.\nDo not use if workload_size is specified.", + "required": false + }, + "min_provisioned_throughput": { + "name": "min_provisioned_throughput", + "type": "int", + "description": "The minimum tokens per second that the endpoint can scale down to.", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "", + "required": false + }, + "model_version": { + "name": "model_version", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of a served entity. It must be unique across an endpoint. A\nserved entity name can consist of alphanumeric characters, dashes, and\nunderscores. If not specified for an external model, this field defaults\nto external_model.name, with '.' and ':' replaced with '-', and if not\nspecified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "int64", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to\nzero.", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "string", + "description": "The workload size of the served entity. The workload size corresponds to\na range of provisioned concurrency that the compute autoscales between. A\nsingle unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency),\n\"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64\nprovisioned concurrency). Additional custom workload sizes can also be\nused when available in the workspace. If scale-to-zero is enabled, the\nlowe...", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "ServedModelInputWorkloadType", + "description": "The workload type of the served entity. The workload type selects which\ntype of compute to use in the endpoint. The default value for this\nparameter is \"CPU\". For deep learning workloads, GPU acceleration is\navailable by selecting workload types like GPU_SMALL and others. See the\navailable [GPU types].\n\n[GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types", + "required": false + } + } + }, + "serving.ServedModelInputWorkloadType": { + "name": "ServedModelInputWorkloadType", + "package": "serving", + "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", + "fields": {} + }, + "serving.ServedModelOutput": { + "name": "ServedModelOutput", + "package": "serving", + "description": "", + "fields": { + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "", + "required": false + }, + "creator": { + "name": "creator", + "type": "string", + "description": "", + "required": false + }, + "environment_vars": { + "name": "environment_vars", + "type": "map[string]string", + "description": "An object containing a set of optional, user-specified environment\nvariable key-value pairs used for serving this entity. Note: this is an\nexperimental feature and subject to change. Example entity environment\nvariables that refer to Databricks secrets: `{\"OPENAI_API_KEY\":\n\"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\":\n\"{{secrets/my_scope2/my_key2}}\"}`", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "ARN of the instance profile that the served entity uses to access AWS\nresources.", + "required": false + }, + "max_provisioned_concurrency": { + "name": "max_provisioned_concurrency", + "type": "int", + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do\nnot use if workload_size is specified.", + "required": false + }, + "min_provisioned_concurrency": { + "name": "min_provisioned_concurrency", + "type": "int", + "description": "The minimum provisioned concurrency that the endpoint can scale down to.\nDo not use if workload_size is specified.", + "required": false + }, + "model_name": { + "name": "model_name", + "type": "string", + "description": "", + "required": false + }, + "model_version": { + "name": "model_version", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of a served entity. It must be unique across an endpoint. A\nserved entity name can consist of alphanumeric characters, dashes, and\nunderscores. If not specified for an external model, this field defaults\nto external_model.name, with '.' and ':' replaced with '-', and if not\nspecified for other entities, it defaults to entity_name-entity_version.", + "required": false + }, + "provisioned_model_units": { + "name": "provisioned_model_units", + "type": "int64", + "description": "The number of model units provisioned.", + "required": false + }, + "scale_to_zero_enabled": { + "name": "scale_to_zero_enabled", + "type": "bool", + "description": "Whether the compute resources for the served entity should scale down to\nzero.", + "required": false + }, + "state": { + "name": "state", + "type": "*ServedModelState", + "description": "", + "required": false + }, + "workload_size": { + "name": "workload_size", + "type": "string", + "description": "The workload size of the served entity. The workload size corresponds to\na range of provisioned concurrency that the compute autoscales between. A\nsingle unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency),\n\"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64\nprovisioned concurrency). Additional custom workload sizes can also be\nused when available in the workspace. If scale-to-zero is enabled, the\nlowe...", + "required": false + }, + "workload_type": { + "name": "workload_type", + "type": "ServingModelWorkloadType", + "description": "The workload type of the served entity. The workload type selects which\ntype of compute to use in the endpoint. The default value for this\nparameter is \"CPU\". For deep learning workloads, GPU acceleration is\navailable by selecting workload types like GPU_SMALL and others. See the\navailable [GPU types].\n\n[GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types", + "required": false + } + } + }, + "serving.ServedModelSpec": { + "name": "ServedModelSpec", + "package": "serving", + "description": "", + "fields": { + "model_name": { + "name": "model_name", + "type": "string", + "description": "Only one of model_name and entity_name should be populated", + "required": false + }, + "model_version": { + "name": "model_version", + "type": "string", + "description": "Only one of model_version and entity_version should be populated", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.ServedModelState": { + "name": "ServedModelState", + "package": "serving", + "description": "", + "fields": { + "deployment": { + "name": "deployment", + "type": "ServedModelStateDeployment", + "description": "", + "required": false + }, + "deployment_state_message": { + "name": "deployment_state_message", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.ServerLogsResponse": { + "name": "ServerLogsResponse", + "package": "serving", + "description": "", + "fields": { + "logs": { + "name": "logs", + "type": "string", + "description": "The most recent log lines of the model server processing invocation\nrequests.", + "required": false + } + } + }, + "serving.ServingEndpoint": { + "name": "ServingEndpoint", + "package": "serving", + "description": "", + "fields": { + "ai_gateway": { + "name": "ai_gateway", + "type": "*AiGatewayConfig", + "description": "The AI Gateway configuration for the serving endpoint. NOTE: External\nmodel, provisioned throughput, and pay-per-token endpoints are fully\nsupported; agent endpoints currently only support inference tables.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The budget policy associated with the endpoint.", + "required": false + }, + "config": { + "name": "config", + "type": "*EndpointCoreConfigSummary", + "description": "The config that is currently being served by the endpoint.", + "required": false + }, + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "The timestamp when the endpoint was created in Unix time.", + "required": false + }, + "creator": { + "name": "creator", + "type": "string", + "description": "The email of the user who created the serving endpoint.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of the endpoint", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "System-generated ID of the endpoint, included to be used by the\nPermissions API.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "The timestamp when the endpoint was last updated by a user in Unix time.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the serving endpoint.", + "required": false + }, + "state": { + "name": "state", + "type": "*EndpointState", + "description": "Information corresponding to the state of the serving endpoint.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]EndpointTag", + "description": "Tags attached to the serving endpoint.", + "required": false + }, + "task": { + "name": "task", + "type": "string", + "description": "The task type of the serving endpoint.", + "required": false + }, + "usage_policy_id": { + "name": "usage_policy_id", + "type": "string", + "description": "The usage policy associated with serving endpoint.", + "required": false + } + } + }, + "serving.ServingEndpointAccessControlRequest": { + "name": "ServingEndpointAccessControlRequest", + "package": "serving", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ServingEndpointPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "serving.ServingEndpointAccessControlResponse": { + "name": "ServingEndpointAccessControlResponse", + "package": "serving", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]ServingEndpointPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "serving.ServingEndpointDetailed": { + "name": "ServingEndpointDetailed", + "package": "serving", + "description": "", + "fields": { + "ai_gateway": { + "name": "ai_gateway", + "type": "*AiGatewayConfig", + "description": "The AI Gateway configuration for the serving endpoint. NOTE: External\nmodel, provisioned throughput, and pay-per-token endpoints are fully\nsupported; agent endpoints currently only support inference tables.", + "required": false + }, + "budget_policy_id": { + "name": "budget_policy_id", + "type": "string", + "description": "The budget policy associated with the endpoint.", + "required": false + }, + "config": { + "name": "config", + "type": "*EndpointCoreConfigOutput", + "description": "The config that is currently being served by the endpoint.", + "required": false + }, + "creation_timestamp": { + "name": "creation_timestamp", + "type": "int64", + "description": "The timestamp when the endpoint was created in Unix time.", + "required": false + }, + "creator": { + "name": "creator", + "type": "string", + "description": "The email of the user who created the serving endpoint.", + "required": false + }, + "data_plane_info": { + "name": "data_plane_info", + "type": "*ModelDataPlaneInfo", + "description": "Information required to query DataPlane APIs.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Description of the serving model", + "required": false + }, + "email_notifications": { + "name": "email_notifications", + "type": "*EmailNotifications", + "description": "Email notification settings.", + "required": false + }, + "endpoint_url": { + "name": "endpoint_url", + "type": "string", + "description": "Endpoint invocation url if route optimization is enabled for endpoint", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "System-generated ID of the endpoint. This is used to refer to the\nendpoint in the Permissions API", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "The timestamp when the endpoint was last updated by a user in Unix time.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the serving endpoint.", + "required": false + }, + "pending_config": { + "name": "pending_config", + "type": "*EndpointPendingConfig", + "description": "The config that the endpoint is attempting to update to.", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ServingEndpointDetailedPermissionLevel", + "description": "The permission level of the principal making the request.", + "required": false + }, + "route_optimized": { + "name": "route_optimized", + "type": "bool", + "description": "Boolean representing if route optimization has been enabled for the\nendpoint", + "required": false + }, + "state": { + "name": "state", + "type": "*EndpointState", + "description": "Information corresponding to the state of the serving endpoint.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]EndpointTag", + "description": "Tags attached to the serving endpoint.", + "required": false + }, + "task": { + "name": "task", + "type": "string", + "description": "The task type of the serving endpoint.", + "required": false + } + } + }, + "serving.ServingEndpointPermission": { + "name": "ServingEndpointPermission", + "package": "serving", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ServingEndpointPermissionLevel", + "description": "", + "required": false + } + } + }, + "serving.ServingEndpointPermissions": { + "name": "ServingEndpointPermissions", + "package": "serving", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ServingEndpointAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.ServingEndpointPermissionsDescription": { + "name": "ServingEndpointPermissionsDescription", + "package": "serving", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "ServingEndpointPermissionLevel", + "description": "", + "required": false + } + } + }, + "serving.ServingEndpointPermissionsRequest": { + "name": "ServingEndpointPermissionsRequest", + "package": "serving", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]ServingEndpointAccessControlRequest", + "description": "", + "required": false + } + } + }, + "serving.ServingModelWorkloadType": { + "name": "ServingModelWorkloadType", + "package": "serving", + "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", + "fields": {} + }, + "serving.TrafficConfig": { + "name": "TrafficConfig", + "package": "serving", + "description": "", + "fields": { + "routes": { + "name": "routes", + "type": "[]Route", + "description": "The list of routes that define traffic to each served entity.", + "required": false + } + } + }, + "serving.UpdateInferenceEndpointNotifications": { + "name": "UpdateInferenceEndpointNotifications", + "package": "serving", + "description": "", + "fields": { + "email_notifications": { + "name": "email_notifications", + "type": "*EmailNotifications", + "description": "The email notification settings to update. Specify email addresses to\nnotify when endpoint state changes occur.", + "required": false + } + } + }, + "serving.UpdateInferenceEndpointNotificationsResponse": { + "name": "UpdateInferenceEndpointNotificationsResponse", + "package": "serving", + "description": "", + "fields": { + "email_notifications": { + "name": "email_notifications", + "type": "*EmailNotifications", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "serving.UpdateProvisionedThroughputEndpointConfigRequest": { + "name": "UpdateProvisionedThroughputEndpointConfigRequest", + "package": "serving", + "description": "", + "fields": { + "config": { + "name": "config", + "type": "PtEndpointCoreConfig", + "description": "", + "required": false + } + } + }, + "serving.V1ResponseChoiceElement": { + "name": "V1ResponseChoiceElement", + "package": "serving", + "description": "", + "fields": { + "finishReason": { + "name": "finishReason", + "type": "string", + "description": "The finish reason returned by the endpoint.", + "required": false + }, + "index": { + "name": "index", + "type": "int", + "description": "The index of the choice in the __chat or completions__ response.", + "required": false + }, + "logprobs": { + "name": "logprobs", + "type": "int", + "description": "The logprobs returned only by the __completions__ endpoint.", + "required": false + }, + "message": { + "name": "message", + "type": "*ChatMessage", + "description": "The message response from the __chat__ endpoint.", + "required": false + }, + "text": { + "name": "text", + "type": "string", + "description": "The text response from the __completions__ endpoint.", + "required": false + } + } + }, + "serving.WaitGetServingEndpointNotUpdating": { + "name": "WaitGetServingEndpointNotUpdating", + "package": "serving", + "description": "WaitGetServingEndpointNotUpdating is a wrapper that calls [ServingEndpointsAPI.WaitGetServingEndpointNotUpdating] and waits to reach NOT_UPDATING state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*ServingEndpointDetailed)) (*ServingEndpointDetailed, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*ServingEndpointDetailed)", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "serving.dataPlaneServiceImpl": { + "name": "dataPlaneServiceImpl", + "package": "serving", + "description": "", + "fields": { + "infos": { + "name": "infos", + "type": "map[string]*DataPlaneInfo", + "description": "", + "required": false + }, + "mu": { + "name": "mu", + "type": "sync.Mutex", + "description": "This class can be shared across multiple threads.\nThis mutex is used to synchronize access to the infos and tokens maps.", + "required": false + }, + "tokens": { + "name": "tokens", + "type": "map[string]*goauth.Token", + "description": "", + "required": false + } + } + }, + "serving.servingEndpointsDataPlaneImpl": { + "name": "servingEndpointsDataPlaneImpl", + "package": "serving", + "description": "unexported type that holds implementations of just ServingEndpointsDataPlane API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + }, + "controlPlane": { + "name": "controlPlane", + "type": "*ServingEndpointsAPI", + "description": "", + "required": false + }, + "dpts": { + "name": "dpts", + "type": "dataplane.EndpointTokenSource", + "description": "", + "required": false + }, + "infos": { + "name": "infos", + "type": "sync.Map", + "description": "", + "required": false + } + } + }, + "serving.servingEndpointsImpl": { + "name": "servingEndpointsImpl", + "package": "serving", + "description": "unexported type that holds implementations of just ServingEndpoints API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.AccessControl": { + "name": "AccessControl", + "package": "sql", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "PermissionLevel", + "description": "* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *\n`CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.Alert": { + "name": "Alert", + "package": "sql", + "description": "", + "fields": { + "condition": { + "name": "condition", + "type": "*AlertCondition", + "description": "Trigger conditions of the alert.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string", + "description": "The timestamp indicating when the alert was created.", + "required": false + }, + "custom_body": { + "name": "custom_body", + "type": "string", + "description": "Custom body of alert notification, if it exists. See [here] for custom\ntemplating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Custom subject of alert notification, if it exists. This can include\nemail subject entries and Slack notification headers, for example. See\n[here] for custom templating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the alert.", + "required": false + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "LifecycleState", + "description": "The workspace state of the alert. Used for tracking trashed status.", + "required": false + }, + "notify_on_ok": { + "name": "notify_on_ok", + "type": "bool", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has\nbeen deleted.", + "required": false + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "The workspace path of the folder containing the alert.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query attached to the alert.", + "required": false + }, + "seconds_to_retrigger": { + "name": "seconds_to_retrigger", + "type": "int", + "description": "Number of seconds an alert must wait after being triggered to rearm\nitself. After rearming, it can be triggered again. If 0 or not specified,\nthe alert will not be triggered again.", + "required": false + }, + "state": { + "name": "state", + "type": "AlertState", + "description": "Current state of the alert's trigger status. This field is set to UNKNOWN\nif the alert has not yet been evaluated or ran into an error during the\nlast evaluation.", + "required": false + }, + "trigger_time": { + "name": "trigger_time", + "type": "string", + "description": "Timestamp when the alert was last triggered, if the alert has been\ntriggered before.", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "The timestamp indicating when the alert was updated.", + "required": false + } + } + }, + "sql.AlertCondition": { + "name": "AlertCondition", + "package": "sql", + "description": "", + "fields": { + "empty_result_state": { + "name": "empty_result_state", + "type": "AlertState", + "description": "Alert state if result is empty.", + "required": false + }, + "op": { + "name": "op", + "type": "AlertOperator", + "description": "Operator used for comparison in alert evaluation.", + "required": false + }, + "operand": { + "name": "operand", + "type": "*AlertConditionOperand", + "description": "Name of the column from the query result to use for comparison in alert\nevaluation.", + "required": false + }, + "threshold": { + "name": "threshold", + "type": "*AlertConditionThreshold", + "description": "Threshold value used for comparison in alert evaluation.", + "required": false + } + } + }, + "sql.AlertConditionOperand": { + "name": "AlertConditionOperand", + "package": "sql", + "description": "", + "fields": { + "column": { + "name": "column", + "type": "*AlertOperandColumn", + "description": "", + "required": false + } + } + }, + "sql.AlertConditionThreshold": { + "name": "AlertConditionThreshold", + "package": "sql", + "description": "", + "fields": { + "value": { + "name": "value", + "type": "*AlertOperandValue", + "description": "", + "required": false + } + } + }, + "sql.AlertEvaluationState": { + "name": "AlertEvaluationState", + "package": "sql", + "description": "UNSPECIFIED - default unspecify value for proto enum, do not use it in the code\nUNKNOWN - alert not yet evaluated\nTRIGGERED - alert is triggered\nOK - alert is not triggered\nERROR - alert evaluation failed", + "fields": {} + }, + "sql.AlertOperandColumn": { + "name": "AlertOperandColumn", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.AlertOperandValue": { + "name": "AlertOperandValue", + "package": "sql", + "description": "", + "fields": { + "bool_value": { + "name": "bool_value", + "type": "bool", + "description": "", + "required": false + }, + "double_value": { + "name": "double_value", + "type": "float64", + "description": "", + "required": false + }, + "string_value": { + "name": "string_value", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.AlertOptions": { + "name": "AlertOptions", + "package": "sql", + "description": "Alert configuration options.", + "fields": { + "column": { + "name": "column", + "type": "string", + "description": "Name of column in the query result to compare in alert evaluation.", + "required": false + }, + "custom_body": { + "name": "custom_body", + "type": "string", + "description": "Custom body of alert notification, if it exists. See [here] for custom\ntemplating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Custom subject of alert notification, if it exists. This includes email\nsubject, Slack notification header, etc. See [here] for custom templating\ninstructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "empty_result_state": { + "name": "empty_result_state", + "type": "AlertOptionsEmptyResultState", + "description": "State that alert evaluates to when query result is empty.", + "required": false + }, + "muted": { + "name": "muted", + "type": "bool", + "description": "Whether or not the alert is muted. If an alert is muted, it will not\nnotify users and notification destinations when triggered.", + "required": false + }, + "op": { + "name": "op", + "type": "string", + "description": "Operator used to compare in alert evaluation: `\u003e`, `\u003e=`, `\u003c`, `\u003c=`, `==`,\n`!=`", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "Value used to compare in alert evaluation. Supported types include\nstrings (eg. 'foobar'), floats (eg. 123.4), and booleans (true).", + "required": false + } + } + }, + "sql.AlertQuery": { + "name": "AlertQuery", + "package": "sql", + "description": "", + "fields": { + "created_at": { + "name": "created_at", + "type": "string", + "description": "The timestamp when this query was created.", + "required": false + }, + "data_source_id": { + "name": "data_source_id", + "type": "string", + "description": "Data source ID maps to the ID of the data source used by the resource and\nis distinct from the warehouse ID. [Learn more]\n\n[Learn more]: https://docs.databricks.com/api/workspace/datasources/list", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Query ID.", + "required": false + }, + "is_archived": { + "name": "is_archived", + "type": "bool", + "description": "Indicates whether the query is trashed. Trashed queries can't be used in\ndashboards, or appear in search results. If this boolean is `true`, the\n`options` property for this query includes a `moved_to_trash_at`\ntimestamp. Trashed queries are permanently deleted after 30 days.", + "required": false + }, + "is_draft": { + "name": "is_draft", + "type": "bool", + "description": "Whether the query is a draft. Draft queries only appear in list views for\ntheir owners. Visualizations from draft queries cannot appear on\ndashboards.", + "required": false + }, + "is_safe": { + "name": "is_safe", + "type": "bool", + "description": "Text parameter types are not safe from SQL injection for all types of\ndata source. Set this Boolean parameter to `true` if a query either does\nnot use any text type parameters or uses a data source type where text\ntype parameters are handled safely.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The title of this query that appears in list views, widget headings, and\non the query page.", + "required": false + }, + "options": { + "name": "options", + "type": "*QueryOptions", + "description": "", + "required": false + }, + "query": { + "name": "query", + "type": "string", + "description": "The text of the query to be run.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "The timestamp at which this query was last updated.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "int", + "description": "The ID of the user who owns the query.", + "required": false + } + } + }, + "sql.AlertV2": { + "name": "AlertV2", + "package": "sql", + "description": "", + "fields": { + "create_time": { + "name": "create_time", + "type": "string", + "description": "The timestamp indicating when the alert was created.", + "required": false + }, + "custom_description": { + "name": "custom_description", + "type": "string", + "description": "Custom description for the alert. support mustache template.", + "required": false + }, + "custom_summary": { + "name": "custom_summary", + "type": "string", + "description": "Custom summary for the alert. support mustache template.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "effective_run_as": { + "name": "effective_run_as", + "type": "*AlertV2RunAs", + "description": "The actual identity that will be used to execute the alert. This is an\noutput-only field that shows the resolved run-as identity after applying\npermissions and defaults.", + "required": false + }, + "evaluation": { + "name": "evaluation", + "type": "AlertV2Evaluation", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the alert.", + "required": false + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "AlertLifecycleState", + "description": "Indicates whether the query is trashed.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has\nbeen deleted.", + "required": false + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "The workspace path of the folder containing the alert. Can only be set on\ncreate, and cannot be updated.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "Text of the query to be run.", + "required": false + }, + "run_as": { + "name": "run_as", + "type": "*AlertV2RunAs", + "description": "Specifies the identity that will be used to run the alert. This field\nallows you to configure alerts to run as a specific user or service\nprincipal. - For user identity: Set `user_name` to the email of an active\nworkspace user. Users can only set this to their own email. - For service\nprincipal: Set `service_principal_name` to the application ID. Requires\nthe `servicePrincipal/user` role. If not specified, the alert will run as\nthe request user.", + "required": false + }, + "run_as_user_name": { + "name": "run_as_user_name", + "type": "string", + "description": "The run as username or application ID of service principal. On Create and\nUpdate, this field can be set to application ID of an active service\nprincipal. Setting this field requires the servicePrincipal/user role.\nDeprecated: Use `run_as` field instead. This field will be removed in a\nfuture release.", + "required": false + }, + "schedule": { + "name": "schedule", + "type": "CronSchedule", + "description": "", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "The timestamp indicating when the alert was updated.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the alert.", + "required": false + } + } + }, + "sql.AlertV2Evaluation": { + "name": "AlertV2Evaluation", + "package": "sql", + "description": "", + "fields": { + "comparison_operator": { + "name": "comparison_operator", + "type": "ComparisonOperator", + "description": "Operator used for comparison in alert evaluation.", + "required": false + }, + "empty_result_state": { + "name": "empty_result_state", + "type": "AlertEvaluationState", + "description": "Alert state if result is empty. Please avoid setting this field to be\n`UNKNOWN` because `UNKNOWN` state is planned to be deprecated.", + "required": false + }, + "last_evaluated_at": { + "name": "last_evaluated_at", + "type": "string", + "description": "Timestamp of the last evaluation.", + "required": false + }, + "notification": { + "name": "notification", + "type": "*AlertV2Notification", + "description": "User or Notification Destination to notify when alert is triggered.", + "required": false + }, + "source": { + "name": "source", + "type": "AlertV2OperandColumn", + "description": "Source column from result to use to evaluate alert", + "required": false + }, + "state": { + "name": "state", + "type": "AlertEvaluationState", + "description": "Latest state of alert evaluation.", + "required": false + }, + "threshold": { + "name": "threshold", + "type": "*AlertV2Operand", + "description": "Threshold to user for alert evaluation, can be a column or a value.", + "required": false + } + } + }, + "sql.AlertV2Notification": { + "name": "AlertV2Notification", + "package": "sql", + "description": "", + "fields": { + "notify_on_ok": { + "name": "notify_on_ok", + "type": "bool", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "retrigger_seconds": { + "name": "retrigger_seconds", + "type": "int", + "description": "Number of seconds an alert waits after being triggered before it is\nallowed to send another notification. If set to 0 or omitted, the alert\nwill not send any further notifications after the first trigger Setting\nthis value to 1 allows the alert to send a notification on every\nevaluation where the condition is met, effectively making it always\nretrigger for notification purposes.", + "required": false + }, + "subscriptions": { + "name": "subscriptions", + "type": "[]AlertV2Subscription", + "description": "", + "required": false + } + } + }, + "sql.AlertV2Operand": { + "name": "AlertV2Operand", + "package": "sql", + "description": "", + "fields": { + "column": { + "name": "column", + "type": "*AlertV2OperandColumn", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "*AlertV2OperandValue", + "description": "", + "required": false + } + } + }, + "sql.AlertV2OperandColumn": { + "name": "AlertV2OperandColumn", + "package": "sql", + "description": "", + "fields": { + "aggregation": { + "name": "aggregation", + "type": "Aggregation", + "description": "If not set, the behavior is equivalent to using `First row` in the UI.", + "required": false + }, + "display": { + "name": "display", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.AlertV2OperandValue": { + "name": "AlertV2OperandValue", + "package": "sql", + "description": "", + "fields": { + "bool_value": { + "name": "bool_value", + "type": "bool", + "description": "", + "required": false + }, + "double_value": { + "name": "double_value", + "type": "float64", + "description": "", + "required": false + }, + "string_value": { + "name": "string_value", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.AlertV2RunAs": { + "name": "AlertV2RunAs", + "package": "sql", + "description": "", + "fields": { + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Application ID of an active service principal. Setting this field\nrequires the `servicePrincipal/user` role.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email of an active workspace user. Can only set this field to their\nown email.", + "required": false + } + } + }, + "sql.AlertV2Subscription": { + "name": "AlertV2Subscription", + "package": "sql", + "description": "", + "fields": { + "destination_id": { + "name": "destination_id", + "type": "string", + "description": "", + "required": false + }, + "user_email": { + "name": "user_email", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.BaseChunkInfo": { + "name": "BaseChunkInfo", + "package": "sql", + "description": "", + "fields": { + "byte_count": { + "name": "byte_count", + "type": "int64", + "description": "The number of bytes in the result chunk. This field is not available when\nusing `INLINE` disposition.", + "required": false + }, + "chunk_index": { + "name": "chunk_index", + "type": "int", + "description": "The position within the sequence of result set chunks.", + "required": false + }, + "row_count": { + "name": "row_count", + "type": "int64", + "description": "The number of rows within the result chunk.", + "required": false + }, + "row_offset": { + "name": "row_offset", + "type": "int64", + "description": "The starting row offset within the result set.", + "required": false + } + } + }, + "sql.Channel": { + "name": "Channel", + "package": "sql", + "description": "Configures the channel name and DBSQL version of the warehouse. CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified.", + "fields": { + "dbsql_version": { + "name": "dbsql_version", + "type": "string", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "ChannelName", + "description": "", + "required": false + } + } + }, + "sql.ChannelInfo": { + "name": "ChannelInfo", + "package": "sql", + "description": "Details about a Channel.", + "fields": { + "dbsql_version": { + "name": "dbsql_version", + "type": "string", + "description": "DB SQL Version the Channel is mapped to.", + "required": false + }, + "name": { + "name": "name", + "type": "ChannelName", + "description": "Name of the channel", + "required": false + } + } + }, + "sql.ClientConfig": { + "name": "ClientConfig", + "package": "sql", + "description": "", + "fields": { + "allow_custom_js_visualizations": { + "name": "allow_custom_js_visualizations", + "type": "bool", + "description": "", + "required": false + }, + "allow_downloads": { + "name": "allow_downloads", + "type": "bool", + "description": "", + "required": false + }, + "allow_external_shares": { + "name": "allow_external_shares", + "type": "bool", + "description": "", + "required": false + }, + "allow_subscriptions": { + "name": "allow_subscriptions", + "type": "bool", + "description": "", + "required": false + }, + "date_format": { + "name": "date_format", + "type": "string", + "description": "", + "required": false + }, + "date_time_format": { + "name": "date_time_format", + "type": "string", + "description": "", + "required": false + }, + "disable_publish": { + "name": "disable_publish", + "type": "bool", + "description": "", + "required": false + }, + "enable_legacy_autodetect_types": { + "name": "enable_legacy_autodetect_types", + "type": "bool", + "description": "", + "required": false + }, + "feature_show_permissions_control": { + "name": "feature_show_permissions_control", + "type": "bool", + "description": "", + "required": false + }, + "hide_plotly_mode_bar": { + "name": "hide_plotly_mode_bar", + "type": "bool", + "description": "", + "required": false + } + } + }, + "sql.ColumnInfo": { + "name": "ColumnInfo", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The name of the column.", + "required": false + }, + "position": { + "name": "position", + "type": "int", + "description": "The ordinal position of the column (starting at position 0).", + "required": false + }, + "type_interval_type": { + "name": "type_interval_type", + "type": "string", + "description": "The format of the interval type.", + "required": false + }, + "type_name": { + "name": "type_name", + "type": "ColumnInfoTypeName", + "description": "The name of the base data type. This doesn't include details for complex\ntypes such as STRUCT, MAP or ARRAY.", + "required": false + }, + "type_precision": { + "name": "type_precision", + "type": "int", + "description": "Specifies the number of digits in a number. This applies to the DECIMAL\ntype.", + "required": false + }, + "type_scale": { + "name": "type_scale", + "type": "int", + "description": "Specifies the number of digits to the right of the decimal point in a\nnumber. This applies to the DECIMAL type.", + "required": false + }, + "type_text": { + "name": "type_text", + "type": "string", + "description": "The full SQL type specification.", + "required": false + } + } + }, + "sql.CreateAlert": { + "name": "CreateAlert", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Name of the alert.", + "required": false + }, + "options": { + "name": "options", + "type": "AlertOptions", + "description": "Alert configuration options.", + "required": false + }, + "parent": { + "name": "parent", + "type": "string", + "description": "The identifier of the workspace folder containing the object.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "Query ID.", + "required": false + }, + "rearm": { + "name": "rearm", + "type": "int", + "description": "Number of seconds after being triggered before the alert rearms itself\nand can be triggered again. If `null`, alert will never be triggered\nagain.", + "required": false + } + } + }, + "sql.CreateAlertRequest": { + "name": "CreateAlertRequest", + "package": "sql", + "description": "", + "fields": { + "alert": { + "name": "alert", + "type": "*CreateAlertRequestAlert", + "description": "", + "required": false + }, + "auto_resolve_display_name": { + "name": "auto_resolve_display_name", + "type": "bool", + "description": "If true, automatically resolve alert display name conflicts. Otherwise,\nfail the request if the alert's display name conflicts with an existing\nalert's display name.", + "required": false + } + } + }, + "sql.CreateAlertRequestAlert": { + "name": "CreateAlertRequestAlert", + "package": "sql", + "description": "", + "fields": { + "condition": { + "name": "condition", + "type": "*AlertCondition", + "description": "Trigger conditions of the alert.", + "required": false + }, + "custom_body": { + "name": "custom_body", + "type": "string", + "description": "Custom body of alert notification, if it exists. See [here] for custom\ntemplating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Custom subject of alert notification, if it exists. This can include\nemail subject entries and Slack notification headers, for example. See\n[here] for custom templating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "notify_on_ok": { + "name": "notify_on_ok", + "type": "bool", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "The workspace path of the folder containing the alert.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query attached to the alert.", + "required": false + }, + "seconds_to_retrigger": { + "name": "seconds_to_retrigger", + "type": "int", + "description": "Number of seconds an alert must wait after being triggered to rearm\nitself. After rearming, it can be triggered again. If 0 or not specified,\nthe alert will not be triggered again.", + "required": false + } + } + }, + "sql.CreateAlertV2Request": { + "name": "CreateAlertV2Request", + "package": "sql", + "description": "", + "fields": { + "alert": { + "name": "alert", + "type": "AlertV2", + "description": "", + "required": false + } + } + }, + "sql.CreateQueryRequest": { + "name": "CreateQueryRequest", + "package": "sql", + "description": "", + "fields": { + "auto_resolve_display_name": { + "name": "auto_resolve_display_name", + "type": "bool", + "description": "If true, automatically resolve query display name conflicts. Otherwise,\nfail the request if the query's display name conflicts with an existing\nquery's display name.", + "required": false + }, + "query": { + "name": "query", + "type": "*CreateQueryRequestQuery", + "description": "", + "required": false + } + } + }, + "sql.CreateQueryRequestQuery": { + "name": "CreateQueryRequestQuery", + "package": "sql", + "description": "", + "fields": { + "apply_auto_limit": { + "name": "apply_auto_limit", + "type": "bool", + "description": "Whether to apply a 1000 row limit to the query result.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "Name of the catalog where this query will be executed.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the query that appears in list views, widget headings,\nand on the query page.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]QueryParameter", + "description": "List of query parameter definitions.", + "required": false + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "Workspace path of the workspace folder containing the object.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "Text of the query to be run.", + "required": false + }, + "run_as_mode": { + "name": "run_as_mode", + "type": "RunAsMode", + "description": "Sets the \"Run as\" role for the object.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Name of the schema where this query will be executed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the query.", + "required": false + } + } + }, + "sql.CreateQueryVisualizationsLegacyRequest": { + "name": "CreateQueryVisualizationsLegacyRequest", + "package": "sql", + "description": "Add visualization to a query", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "A short description of this visualization. This is not displayed in the\nUI.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the visualization that appears on dashboards and the query\nscreen.", + "required": false + }, + "options": { + "name": "options", + "type": "any", + "description": "The options object varies widely from one visualization type to the next\nand is unsupported. Databricks does not recommend modifying visualization\nsettings in JSON.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "The identifier returned by :method:queries/create", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of visualization: chart, table, pivot table, and so on.", + "required": false + } + } + }, + "sql.CreateVisualizationRequest": { + "name": "CreateVisualizationRequest", + "package": "sql", + "description": "", + "fields": { + "visualization": { + "name": "visualization", + "type": "*CreateVisualizationRequestVisualization", + "description": "", + "required": false + } + } + }, + "sql.CreateVisualizationRequestVisualization": { + "name": "CreateVisualizationRequestVisualization", + "package": "sql", + "description": "", + "fields": { + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the visualization.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query that the visualization is attached to.", + "required": false + }, + "serialized_options": { + "name": "serialized_options", + "type": "string", + "description": "The visualization options varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying\nvisualization options directly.", + "required": false + }, + "serialized_query_plan": { + "name": "serialized_query_plan", + "type": "string", + "description": "The visualization query plan varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying the\nvisualization query plan directly.", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of visualization: counter, table, funnel, and so on.", + "required": false + } + } + }, + "sql.CreateWarehouseRequest": { + "name": "CreateWarehouseRequest", + "package": "sql", + "description": "Creates a new SQL warehouse.", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", + "type": "int", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values: - Must be == 0 or \u003e= 10 mins - 0 indicates no autostop.\n\nDefaults to 120 mins", + "required": false + }, + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Channel Details", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "string", + "description": "Size of the clusters allocated for this warehouse. Increasing the size of\na spark cluster allows you to run larger queries on it. If you want to\nincrease the number of concurrent queries, please tune max_num_clusters.\n\nSupported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large\n- 2X-Large - 3X-Large - 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", + "type": "string", + "description": "warehouse creator name", + "required": false + }, + "enable_photon": { + "name": "enable_photon", + "type": "bool", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Configures whether the warehouse should use serverless compute", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", + "required": false + }, + "max_num_clusters": { + "name": "max_num_clusters", + "type": "int", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values: - Must be \u003e= min_num_clusters - Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false + }, + "min_num_clusters": { + "name": "min_num_clusters", + "type": "int", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters\nare always running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values: - Must be \u003e 0 - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Logical name for the cluster.\n\nSupported values: - Must be unique within an org. - Must be less than 100\ncharacters.", + "required": false + }, + "spot_instance_policy": { + "name": "spot_instance_policy", + "type": "SpotInstancePolicy", + "description": "Configurations whether the endpoint should use spot instances.", + "required": false + }, + "tags": { + "name": "tags", + "type": "*EndpointTags", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS\ninstances and EBS volumes) associated with this SQL warehouse.\n\nSupported values: - Number of tags \u003c 45.", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "CreateWarehouseRequestWarehouseType", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless\ncompute, you must set to `PRO` and also set the field\n`enable_serverless_compute` to `true`.", + "required": false + } + } + }, + "sql.CreateWarehouseResponse": { + "name": "CreateWarehouseResponse", + "package": "sql", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "Id for the SQL warehouse. This value is unique across all SQL warehouses.", + "required": false + } + } + }, + "sql.CreateWidget": { + "name": "CreateWidget", + "package": "sql", + "description": "", + "fields": { + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "Dashboard ID returned by :method:dashboards/create.", + "required": false + }, + "options": { + "name": "options", + "type": "WidgetOptions", + "description": "", + "required": false + }, + "text": { + "name": "text", + "type": "string", + "description": "If this is a textbox widget, the application displays this text. This\nfield is ignored if the widget contains a visualization in the\n`visualization` field.", + "required": false + }, + "visualization_id": { + "name": "visualization_id", + "type": "string", + "description": "Query Vizualization ID returned by :method:queryvisualizations/create.", + "required": false + }, + "width": { + "name": "width", + "type": "int", + "description": "Width of a widget", + "required": false + } + } + }, + "sql.CronSchedule": { + "name": "CronSchedule", + "package": "sql", + "description": "", + "fields": { + "pause_status": { + "name": "pause_status", + "type": "SchedulePauseStatus", + "description": "Indicate whether this schedule is paused or not.", + "required": false + }, + "quartz_cron_schedule": { + "name": "quartz_cron_schedule", + "type": "string", + "description": "A cron expression using quartz syntax that specifies the schedule for\nthis pipeline. Should use the quartz format described here:\nhttp://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html", + "required": false + }, + "timezone_id": { + "name": "timezone_id", + "type": "string", + "description": "A Java timezone id. The schedule will be resolved using this timezone.\nThis will be combined with the quartz_cron_schedule to determine the\nschedule. See\nhttps://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html\nfor details.", + "required": false + } + } + }, + "sql.Dashboard": { + "name": "Dashboard", + "package": "sql", + "description": "A JSON representing a dashboard containing widgets of visualizations and text\nboxes.", + "fields": { + "can_edit": { + "name": "can_edit", + "type": "bool", + "description": "Whether the authenticated user can edit the query definition.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "string", + "description": "Timestamp when this dashboard was created.", + "required": false + }, + "dashboard_filters_enabled": { + "name": "dashboard_filters_enabled", + "type": "bool", + "description": "In the web application, query filters that share a name are coupled to a\nsingle selection box if this value is `true`.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The ID for this dashboard.", + "required": false + }, + "is_archived": { + "name": "is_archived", + "type": "bool", + "description": "Indicates whether a dashboard is trashed. Trashed dashboards won't appear\nin list views. If this boolean is `true`, the `options` property for this\ndashboard includes a `moved_to_trash_at` timestamp. Items in trash are\npermanently deleted after 30 days.", + "required": false + }, + "is_draft": { + "name": "is_draft", + "type": "bool", + "description": "Whether a dashboard is a draft. Draft dashboards only appear in list\nviews for their owners.", + "required": false + }, + "is_favorite": { + "name": "is_favorite", + "type": "bool", + "description": "Indicates whether this query object appears in the current user's\nfavorites list. This flag determines whether the star icon for favorites\nis selected.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The title of the dashboard that appears in list views and at the top of\nthe dashboard page.", + "required": false + }, + "options": { + "name": "options", + "type": "*DashboardOptions", + "description": "", + "required": false + }, + "parent": { + "name": "parent", + "type": "string", + "description": "The identifier of the workspace folder containing the object.", + "required": false + }, + "permission_tier": { + "name": "permission_tier", + "type": "PermissionLevel", + "description": "* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *\n`CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query", + "required": false + }, + "slug": { + "name": "slug", + "type": "string", + "description": "URL slug. Usually mirrors the query name with dashes (`-`) instead of\nspaces. Appears in the URL for this query.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "Timestamp when this dashboard was last updated.", + "required": false + }, + "user": { + "name": "user", + "type": "*User", + "description": "", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "int", + "description": "The ID of the user who owns the dashboard.", + "required": false + }, + "widgets": { + "name": "widgets", + "type": "[]Widget", + "description": "", + "required": false + } + } + }, + "sql.DashboardEditContent": { + "name": "DashboardEditContent", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The title of this dashboard that appears in list views and at the top of\nthe dashboard page.", + "required": false + }, + "run_as_role": { + "name": "run_as_role", + "type": "RunAsRole", + "description": "Sets the **Run as** role for the object. Must be set to one of `\"viewer\"`\n(signifying \"run as viewer\" behavior) or `\"owner\"` (signifying \"run as\nowner\" behavior)", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "sql.DashboardOptions": { + "name": "DashboardOptions", + "package": "sql", + "description": "", + "fields": { + "moved_to_trash_at": { + "name": "moved_to_trash_at", + "type": "string", + "description": "The timestamp when this dashboard was moved to trash. Only present when\nthe `is_archived` property is `true`. Trashed items are deleted after\nthirty days.", + "required": false + } + } + }, + "sql.DataSource": { + "name": "DataSource", + "package": "sql", + "description": "A JSON object representing a DBSQL data source / SQL warehouse.", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "Data source ID maps to the ID of the data source used by the resource and\nis distinct from the warehouse ID. [Learn more]\n\n[Learn more]: https://docs.databricks.com/api/workspace/datasources/list", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The string name of this data source / SQL warehouse as it appears in the\nDatabricks SQL web application.", + "required": false + }, + "pause_reason": { + "name": "pause_reason", + "type": "string", + "description": "Reserved for internal use.", + "required": false + }, + "paused": { + "name": "paused", + "type": "int", + "description": "Reserved for internal use.", + "required": false + }, + "supports_auto_limit": { + "name": "supports_auto_limit", + "type": "bool", + "description": "Reserved for internal use.", + "required": false + }, + "syntax": { + "name": "syntax", + "type": "string", + "description": "Reserved for internal use.", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of data source. For SQL warehouses, this will be\n`databricks_internal`.", + "required": false + }, + "view_only": { + "name": "view_only", + "type": "bool", + "description": "Reserved for internal use.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "The ID of the associated SQL warehouse, if this data source is backed by\na SQL warehouse.", + "required": false + } + } + }, + "sql.DateRange": { + "name": "DateRange", + "package": "sql", + "description": "", + "fields": { + "end": { + "name": "end", + "type": "string", + "description": "", + "required": false + }, + "start": { + "name": "start", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.DateRangeValue": { + "name": "DateRangeValue", + "package": "sql", + "description": "", + "fields": { + "date_range_value": { + "name": "date_range_value", + "type": "*DateRange", + "description": "Manually specified date-time range value.", + "required": false + }, + "dynamic_date_range_value": { + "name": "dynamic_date_range_value", + "type": "DateRangeValueDynamicDateRange", + "description": "Dynamic date-time range value based on current date-time.", + "required": false + }, + "precision": { + "name": "precision", + "type": "DatePrecision", + "description": "Date-time precision to format the value into when the query is run.\nDefaults to DAY_PRECISION (YYYY-MM-DD).", + "required": false + }, + "start_day_of_week": { + "name": "start_day_of_week", + "type": "int", + "description": "", + "required": false + } + } + }, + "sql.DateValue": { + "name": "DateValue", + "package": "sql", + "description": "", + "fields": { + "date_value": { + "name": "date_value", + "type": "string", + "description": "Manually specified date-time value.", + "required": false + }, + "dynamic_date_value": { + "name": "dynamic_date_value", + "type": "DateValueDynamicDate", + "description": "Dynamic date-time value based on current date-time.", + "required": false + }, + "precision": { + "name": "precision", + "type": "DatePrecision", + "description": "Date-time precision to format the value into when the query is run.\nDefaults to DAY_PRECISION (YYYY-MM-DD).", + "required": false + } + } + }, + "sql.EditAlert": { + "name": "EditAlert", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "Name of the alert.", + "required": false + }, + "options": { + "name": "options", + "type": "AlertOptions", + "description": "Alert configuration options.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "Query ID.", + "required": false + }, + "rearm": { + "name": "rearm", + "type": "int", + "description": "Number of seconds after being triggered before the alert rearms itself\nand can be triggered again. If `null`, alert will never be triggered\nagain.", + "required": false + } + } + }, + "sql.EditWarehouseRequest": { + "name": "EditWarehouseRequest", + "package": "sql", + "description": "This is an incremental edit functionality, so all fields except id are\noptional. If a field is set, the corresponding configuration in the SQL\nwarehouse is modified. If a field is unset, the existing configuration value\nin the SQL warehouse is retained. Thus, this API is not idempotent.", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", + "type": "int", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values: - Must be == 0 or \u003e= 10 mins - 0 indicates no autostop.\n\nDefaults to 120 mins", + "required": false + }, + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Channel Details", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "string", + "description": "Size of the clusters allocated for this warehouse. Increasing the size of\na spark cluster allows you to run larger queries on it. If you want to\nincrease the number of concurrent queries, please tune max_num_clusters.\n\nSupported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large\n- 2X-Large - 3X-Large - 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", + "type": "string", + "description": "warehouse creator name", + "required": false + }, + "enable_photon": { + "name": "enable_photon", + "type": "bool", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Configures whether the warehouse should use serverless compute", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", + "required": false + }, + "max_num_clusters": { + "name": "max_num_clusters", + "type": "int", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values: - Must be \u003e= min_num_clusters - Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false + }, + "min_num_clusters": { + "name": "min_num_clusters", + "type": "int", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters\nare always running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values: - Must be \u003e 0 - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Logical name for the cluster.\n\nSupported values: - Must be unique within an org. - Must be less than 100\ncharacters.", + "required": false + }, + "spot_instance_policy": { + "name": "spot_instance_policy", + "type": "SpotInstancePolicy", + "description": "Configurations whether the endpoint should use spot instances.", + "required": false + }, + "tags": { + "name": "tags", + "type": "*EndpointTags", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS\ninstances and EBS volumes) associated with this SQL warehouse.\n\nSupported values: - Number of tags \u003c 45.", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "EditWarehouseRequestWarehouseType", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless\ncompute, you must set to `PRO` and also set the field\n`enable_serverless_compute` to `true`.", + "required": false + } + } + }, + "sql.EndpointConfPair": { + "name": "EndpointConfPair", + "package": "sql", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.EndpointHealth": { + "name": "EndpointHealth", + "package": "sql", + "description": "", + "fields": { + "details": { + "name": "details", + "type": "string", + "description": "Details about errors that are causing current degraded/failed status.", + "required": false + }, + "failure_reason": { + "name": "failure_reason", + "type": "*TerminationReason", + "description": "The reason for failure to bring up clusters for this warehouse. This is\navailable when status is 'FAILED' and sometimes when it is DEGRADED.", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "Deprecated. split into summary and details for security", + "required": false + }, + "status": { + "name": "status", + "type": "Status", + "description": "Health status of the endpoint.", + "required": false + }, + "summary": { + "name": "summary", + "type": "string", + "description": "A short summary of the health status in case of degraded/failed\nwarehouses.", + "required": false + } + } + }, + "sql.EndpointInfo": { + "name": "EndpointInfo", + "package": "sql", + "description": "", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", + "type": "int", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values: - Must be == 0 or \u003e= 10 mins - 0 indicates no autostop.\n\nDefaults to 120 mins", + "required": false + }, + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Channel Details", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "string", + "description": "Size of the clusters allocated for this warehouse. Increasing the size of\na spark cluster allows you to run larger queries on it. If you want to\nincrease the number of concurrent queries, please tune max_num_clusters.\n\nSupported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large\n- 2X-Large - 3X-Large - 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", + "type": "string", + "description": "warehouse creator name", + "required": false + }, + "enable_photon": { + "name": "enable_photon", + "type": "bool", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Configures whether the warehouse should use serverless compute", + "required": false + }, + "health": { + "name": "health", + "type": "*EndpointHealth", + "description": "Optional health status. Assume the warehouse is healthy if this field is\nnot set.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "unique identifier for warehouse", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", + "required": false + }, + "jdbc_url": { + "name": "jdbc_url", + "type": "string", + "description": "the jdbc connection string for this warehouse", + "required": false + }, + "max_num_clusters": { + "name": "max_num_clusters", + "type": "int", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values: - Must be \u003e= min_num_clusters - Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false + }, + "min_num_clusters": { + "name": "min_num_clusters", + "type": "int", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters\nare always running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values: - Must be \u003e 0 - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Logical name for the cluster.\n\nSupported values: - Must be unique within an org. - Must be less than 100\ncharacters.", + "required": false + }, + "num_active_sessions": { + "name": "num_active_sessions", + "type": "int64", + "description": "Deprecated. current number of active sessions for the warehouse", + "required": false + }, + "num_clusters": { + "name": "num_clusters", + "type": "int", + "description": "current number of clusters running for the service", + "required": false + }, + "odbc_params": { + "name": "odbc_params", + "type": "*OdbcParams", + "description": "ODBC parameters for the SQL warehouse", + "required": false + }, + "spot_instance_policy": { + "name": "spot_instance_policy", + "type": "SpotInstancePolicy", + "description": "Configurations whether the endpoint should use spot instances.", + "required": false + }, + "state": { + "name": "state", + "type": "State", + "description": "state of the endpoint", + "required": false + }, + "tags": { + "name": "tags", + "type": "*EndpointTags", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS\ninstances and EBS volumes) associated with this SQL warehouse.\n\nSupported values: - Number of tags \u003c 45.", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "EndpointInfoWarehouseType", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless\ncompute, you must set to `PRO` and also set the field\n`enable_serverless_compute` to `true`.", + "required": false + } + } + }, + "sql.EndpointTagPair": { + "name": "EndpointTagPair", + "package": "sql", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.EndpointTags": { + "name": "EndpointTags", + "package": "sql", + "description": "", + "fields": { + "custom_tags": { + "name": "custom_tags", + "type": "[]EndpointTagPair", + "description": "", + "required": false + } + } + }, + "sql.EnumValue": { + "name": "EnumValue", + "package": "sql", + "description": "", + "fields": { + "enum_options": { + "name": "enum_options", + "type": "string", + "description": "List of valid query parameter values, newline delimited.", + "required": false + }, + "multi_values_options": { + "name": "multi_values_options", + "type": "*MultiValuesOptions", + "description": "If specified, allows multiple values to be selected for this parameter.", + "required": false + }, + "values": { + "name": "values", + "type": "[]string", + "description": "List of selected query parameter values.", + "required": false + } + } + }, + "sql.ExecuteStatementRequest": { + "name": "ExecuteStatementRequest", + "package": "sql", + "description": "", + "fields": { + "byte_limit": { + "name": "byte_limit", + "type": "int64", + "description": "Applies the given byte limit to the statement's result size. Byte counts\nare based on internal data representations and might not match the final\nsize in the requested `format`. If the result was truncated due to the\nbyte limit, then `truncated` in the response is set to `true`. When using\n`EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is\napplied if `byte_limit` is not explicitly set.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "Sets default catalog for statement execution, similar to [`USE CATALOG`]\nin SQL.\n\n[`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html", + "required": false + }, + "disposition": { + "name": "disposition", + "type": "Disposition", + "description": "The fetch disposition provides two modes of fetching results: `INLINE`\nand `EXTERNAL_LINKS`.\n\nStatements executed with `INLINE` disposition will return result data\ninline, in `JSON_ARRAY` format, in a series of chunks. If a given\nstatement produces a result set with a size larger than 25 MiB, that\nstatement execution is aborted, and no result set will be available.\n\n**NOTE** Byte limits are computed based upon internal representations of\nthe result set data, and might not match the sizes visi...", + "required": false + }, + "format": { + "name": "format", + "type": "Format", + "description": "Statement execution supports three result formats: `JSON_ARRAY`\n(default), `ARROW_STREAM`, and `CSV`.\n\nImportant: The formats `ARROW_STREAM` and `CSV` are supported only with\n`EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and\n`EXTERNAL_LINKS` disposition.\n\nWhen specifying `format=JSON_ARRAY`, result data will be formatted as an\narray of arrays of values, where each value is either the *string\nrepresentation* of a value, or `null`. For example, the output of `SELECT\nconcat...", + "required": false + }, + "on_wait_timeout": { + "name": "on_wait_timeout", + "type": "ExecuteStatementRequestOnWaitTimeout", + "description": "When `wait_timeout \u003e 0s`, the call will block up to the specified time.\nIf the statement execution doesn't finish within this time,\n`on_wait_timeout` determines whether the execution should continue or be\ncanceled. When set to `CONTINUE`, the statement execution continues\nasynchronously and the call returns a statement ID which can be used for\npolling with :method:statementexecution/getStatement. When set to\n`CANCEL`, the statement execution is canceled and the call returns with a\n`CANCELED` ...", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]StatementParameterListItem", + "description": "A list of parameters to pass into a SQL statement containing parameter\nmarkers. A parameter consists of a name, a value, and optionally a type.\nTo represent a NULL value, the `value` field may be omitted or set to\n`null` explicitly. If the `type` field is omitted, the value is\ninterpreted as a string.\n\nIf the type is given, parameters will be checked for type correctness\naccording to the given type. A value is correct if the provided string\ncan be converted to the requested type using the `ca...", + "required": false + }, + "row_limit": { + "name": "row_limit", + "type": "int64", + "description": "Applies the given row limit to the statement's result set, but unlike the\n`LIMIT` clause in SQL, it also sets the `truncated` field in the response\nto indicate whether the result was trimmed due to the limit or not.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Sets default schema for statement execution, similar to [`USE SCHEMA`] in\nSQL.\n\n[`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html", + "required": false + }, + "statement": { + "name": "statement", + "type": "string", + "description": "The SQL statement to execute. The statement can optionally be\nparameterized, see `parameters`. The maximum query text size is 16 MiB.", + "required": false + }, + "wait_timeout": { + "name": "wait_timeout", + "type": "string", + "description": "The time in seconds the call will wait for the statement's result set as\n`Ns`, where `N` can be set to 0 or to a value between 5 and 50.\n\nWhen set to `0s`, the statement will execute in asynchronous mode and the\ncall will not wait for the execution to finish. In this case, the call\nreturns directly with `PENDING` state and a statement ID which can be\nused for polling with :method:statementexecution/getStatement.\n\nWhen set between 5 and 50 seconds, the call will behave synchronously up\nto this...", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Warehouse upon which to execute a statement. See also [What are SQL\nwarehouses?]\n\n[What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html", + "required": false + } + } + }, + "sql.ExternalLink": { + "name": "ExternalLink", + "package": "sql", + "description": "", + "fields": { + "byte_count": { + "name": "byte_count", + "type": "int64", + "description": "The number of bytes in the result chunk. This field is not available when\nusing `INLINE` disposition.", + "required": false + }, + "chunk_index": { + "name": "chunk_index", + "type": "int", + "description": "The position within the sequence of result set chunks.", + "required": false + }, + "expiration": { + "name": "expiration", + "type": "string", + "description": "Indicates the date-time that the given external link will expire and\nbecomes invalid, after which point a new `external_link` must be\nrequested.", + "required": false + }, + "external_link": { + "name": "external_link", + "type": "string", + "description": "A URL pointing to a chunk of result data, hosted by an external service,\nwith a short expiration time (\u003c= 15 minutes). As this URL contains a\ntemporary credential, it should be considered sensitive and the client\nshould not expose this URL in a log.", + "required": false + }, + "http_headers": { + "name": "http_headers", + "type": "map[string]string", + "description": "HTTP headers that must be included with a GET request to the\n`external_link`. Each header is provided as a key-value pair. Headers are\ntypically used to pass a decryption key to the external service. The\nvalues of these headers should be considered sensitive and the client\nshould not expose these values in a log.", + "required": false + }, + "next_chunk_index": { + "name": "next_chunk_index", + "type": "int", + "description": "When fetching, provides the `chunk_index` for the _next_ chunk. If\nabsent, indicates there are no more chunks. The next chunk can be fetched\nwith a :method:statementexecution/getstatementresultchunkn request.", + "required": false + }, + "next_chunk_internal_link": { + "name": "next_chunk_internal_link", + "type": "string", + "description": "When fetching, provides a link to fetch the _next_ chunk. If absent,\nindicates there are no more chunks. This link is an absolute `path` to be\njoined with your `$DATABRICKS_HOST`, and should be treated as an opaque\nlink. This is an alternative to using `next_chunk_index`.", + "required": false + }, + "row_count": { + "name": "row_count", + "type": "int64", + "description": "The number of rows within the result chunk.", + "required": false + }, + "row_offset": { + "name": "row_offset", + "type": "int64", + "description": "The starting row offset within the result set.", + "required": false + } + } + }, + "sql.ExternalQuerySource": { + "name": "ExternalQuerySource", + "package": "sql", + "description": "", + "fields": { + "alert_id": { + "name": "alert_id", + "type": "string", + "description": "The canonical identifier for this SQL alert", + "required": false + }, + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "The canonical identifier for this Lakeview dashboard", + "required": false + }, + "genie_space_id": { + "name": "genie_space_id", + "type": "string", + "description": "The canonical identifier for this Genie space", + "required": false + }, + "job_info": { + "name": "job_info", + "type": "*ExternalQuerySourceJobInfo", + "description": "", + "required": false + }, + "legacy_dashboard_id": { + "name": "legacy_dashboard_id", + "type": "string", + "description": "The canonical identifier for this legacy dashboard", + "required": false + }, + "notebook_id": { + "name": "notebook_id", + "type": "string", + "description": "The canonical identifier for this notebook", + "required": false + }, + "sql_query_id": { + "name": "sql_query_id", + "type": "string", + "description": "The canonical identifier for this SQL query", + "required": false + } + } + }, + "sql.ExternalQuerySourceJobInfo": { + "name": "ExternalQuerySourceJobInfo", + "package": "sql", + "description": "", + "fields": { + "job_id": { + "name": "job_id", + "type": "string", + "description": "The canonical identifier for this job.", + "required": false + }, + "job_run_id": { + "name": "job_run_id", + "type": "string", + "description": "The canonical identifier of the run. This ID is unique across all runs of\nall jobs.", + "required": false + }, + "job_task_run_id": { + "name": "job_task_run_id", + "type": "string", + "description": "The canonical identifier of the task run.", + "required": false + } + } + }, + "sql.GetResponse": { + "name": "GetResponse", + "package": "sql", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControl", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "An object's type and UUID, separated by a forward slash (/) character.", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "ObjectType", + "description": "A singular noun object type.", + "required": false + } + } + }, + "sql.GetWarehousePermissionLevelsResponse": { + "name": "GetWarehousePermissionLevelsResponse", + "package": "sql", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]WarehousePermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "sql.GetWarehouseResponse": { + "name": "GetWarehouseResponse", + "package": "sql", + "description": "", + "fields": { + "auto_stop_mins": { + "name": "auto_stop_mins", + "type": "int", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values: - Must be == 0 or \u003e= 10 mins - 0 indicates no autostop.\n\nDefaults to 120 mins", + "required": false + }, + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Channel Details", + "required": false + }, + "cluster_size": { + "name": "cluster_size", + "type": "string", + "description": "Size of the clusters allocated for this warehouse. Increasing the size of\na spark cluster allows you to run larger queries on it. If you want to\nincrease the number of concurrent queries, please tune max_num_clusters.\n\nSupported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large\n- 2X-Large - 3X-Large - 4X-Large", + "required": false + }, + "creator_name": { + "name": "creator_name", + "type": "string", + "description": "warehouse creator name", + "required": false + }, + "enable_photon": { + "name": "enable_photon", + "type": "bool", + "description": "Configures whether the warehouse should use Photon optimized clusters.\n\nDefaults to false.", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Configures whether the warehouse should use serverless compute", + "required": false + }, + "health": { + "name": "health", + "type": "*EndpointHealth", + "description": "Optional health status. Assume the warehouse is healthy if this field is\nnot set.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "unique identifier for warehouse", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "Deprecated. Instance profile used to pass IAM role to the cluster", + "required": false + }, + "jdbc_url": { + "name": "jdbc_url", + "type": "string", + "description": "the jdbc connection string for this warehouse", + "required": false + }, + "max_num_clusters": { + "name": "max_num_clusters", + "type": "int", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values: - Must be \u003e= min_num_clusters - Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "required": false + }, + "min_num_clusters": { + "name": "min_num_clusters", + "type": "int", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters\nare always running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values: - Must be \u003e 0 - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Logical name for the cluster.\n\nSupported values: - Must be unique within an org. - Must be less than 100\ncharacters.", + "required": false + }, + "num_active_sessions": { + "name": "num_active_sessions", + "type": "int64", + "description": "Deprecated. current number of active sessions for the warehouse", + "required": false + }, + "num_clusters": { + "name": "num_clusters", + "type": "int", + "description": "current number of clusters running for the service", + "required": false + }, + "odbc_params": { + "name": "odbc_params", + "type": "*OdbcParams", + "description": "ODBC parameters for the SQL warehouse", + "required": false + }, + "spot_instance_policy": { + "name": "spot_instance_policy", + "type": "SpotInstancePolicy", + "description": "Configurations whether the endpoint should use spot instances.", + "required": false + }, + "state": { + "name": "state", + "type": "State", + "description": "state of the endpoint", + "required": false + }, + "tags": { + "name": "tags", + "type": "*EndpointTags", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS\ninstances and EBS volumes) associated with this SQL warehouse.\n\nSupported values: - Number of tags \u003c 45.", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "GetWarehouseResponseWarehouseType", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless\ncompute, you must set to `PRO` and also set the field\n`enable_serverless_compute` to `true`.", + "required": false + } + } + }, + "sql.GetWorkspaceWarehouseConfigResponse": { + "name": "GetWorkspaceWarehouseConfigResponse", + "package": "sql", + "description": "", + "fields": { + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Optional: Channel selection details", + "required": false + }, + "config_param": { + "name": "config_param", + "type": "*RepeatedEndpointConfPairs", + "description": "Deprecated: Use sql_configuration_parameters", + "required": false + }, + "data_access_config": { + "name": "data_access_config", + "type": "[]EndpointConfPair", + "description": "Spark confs for external hive metastore configuration JSON serialized\nsize must be less than \u003c= 512K", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Enable Serverless compute for SQL warehouses", + "required": false + }, + "enabled_warehouse_types": { + "name": "enabled_warehouse_types", + "type": "[]WarehouseTypePair", + "description": "List of Warehouse Types allowed in this workspace (limits allowed value\nof the type field in CreateWarehouse and EditWarehouse). Note: Some types\ncannot be disabled, they don't need to be specified in\nSetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing\nwarehouses to be converted to another type. Used by frontend to save\nspecific type availability in the warehouse create and edit form UI.", + "required": false + }, + "global_param": { + "name": "global_param", + "type": "*RepeatedEndpointConfPairs", + "description": "Deprecated: Use sql_configuration_parameters", + "required": false + }, + "google_service_account": { + "name": "google_service_account", + "type": "string", + "description": "GCP only: Google Service Account used to pass to cluster to access Google\nCloud Storage", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "AWS Only: The instance profile used to pass an IAM role to the SQL\nwarehouses. This configuration is also applied to the workspace's\nserverless compute for notebooks and jobs.", + "required": false + }, + "security_policy": { + "name": "security_policy", + "type": "GetWorkspaceWarehouseConfigResponseSecurityPolicy", + "description": "Security policy for warehouses", + "required": false + }, + "sql_configuration_parameters": { + "name": "sql_configuration_parameters", + "type": "*RepeatedEndpointConfPairs", + "description": "SQL configuration parameters", + "required": false + } + } + }, + "sql.LegacyAlert": { + "name": "LegacyAlert", + "package": "sql", + "description": "", + "fields": { + "created_at": { + "name": "created_at", + "type": "string", + "description": "Timestamp when the alert was created.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Alert ID.", + "required": false + }, + "last_triggered_at": { + "name": "last_triggered_at", + "type": "string", + "description": "Timestamp when the alert was last triggered.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Name of the alert.", + "required": false + }, + "options": { + "name": "options", + "type": "*AlertOptions", + "description": "Alert configuration options.", + "required": false + }, + "parent": { + "name": "parent", + "type": "string", + "description": "The identifier of the workspace folder containing the object.", + "required": false + }, + "query": { + "name": "query", + "type": "*AlertQuery", + "description": "", + "required": false + }, + "rearm": { + "name": "rearm", + "type": "int", + "description": "Number of seconds after being triggered before the alert rearms itself\nand can be triggered again. If `null`, alert will never be triggered\nagain.", + "required": false + }, + "state": { + "name": "state", + "type": "LegacyAlertState", + "description": "State of the alert. Possible values are: `unknown` (yet to be evaluated),\n`triggered` (evaluated and fulfilled trigger conditions), or `ok`\n(evaluated and did not fulfill trigger conditions).", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "Timestamp when the alert was last updated.", + "required": false + }, + "user": { + "name": "user", + "type": "*User", + "description": "", + "required": false + } + } + }, + "sql.LegacyQuery": { + "name": "LegacyQuery", + "package": "sql", + "description": "", + "fields": { + "can_edit": { + "name": "can_edit", + "type": "bool", + "description": "Describes whether the authenticated user is allowed to edit the\ndefinition of this query.", + "required": false + }, + "created_at": { + "name": "created_at", + "type": "string", + "description": "The timestamp when this query was created.", + "required": false + }, + "data_source_id": { + "name": "data_source_id", + "type": "string", + "description": "Data source ID maps to the ID of the data source used by the resource and\nis distinct from the warehouse ID. [Learn more]\n\n[Learn more]: https://docs.databricks.com/api/workspace/datasources/list", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "Query ID.", + "required": false + }, + "is_archived": { + "name": "is_archived", + "type": "bool", + "description": "Indicates whether the query is trashed. Trashed queries can't be used in\ndashboards, or appear in search results. If this boolean is `true`, the\n`options` property for this query includes a `moved_to_trash_at`\ntimestamp. Trashed queries are permanently deleted after 30 days.", + "required": false + }, + "is_draft": { + "name": "is_draft", + "type": "bool", + "description": "Whether the query is a draft. Draft queries only appear in list views for\ntheir owners. Visualizations from draft queries cannot appear on\ndashboards.", + "required": false + }, + "is_favorite": { + "name": "is_favorite", + "type": "bool", + "description": "Whether this query object appears in the current user's favorites list.\nThis flag determines whether the star icon for favorites is selected.", + "required": false + }, + "is_safe": { + "name": "is_safe", + "type": "bool", + "description": "Text parameter types are not safe from SQL injection for all types of\ndata source. Set this Boolean parameter to `true` if a query either does\nnot use any text type parameters or uses a data source type where text\ntype parameters are handled safely.", + "required": false + }, + "last_modified_by": { + "name": "last_modified_by", + "type": "*User", + "description": "", + "required": false + }, + "last_modified_by_id": { + "name": "last_modified_by_id", + "type": "int", + "description": "The ID of the user who last saved changes to this query.", + "required": false + }, + "latest_query_data_id": { + "name": "latest_query_data_id", + "type": "string", + "description": "If there is a cached result for this query and user, this field includes\nthe query result ID. If this query uses parameters, this field is always\nnull.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The title of this query that appears in list views, widget headings, and\non the query page.", + "required": false + }, + "options": { + "name": "options", + "type": "*QueryOptions", + "description": "", + "required": false + }, + "parent": { + "name": "parent", + "type": "string", + "description": "The identifier of the workspace folder containing the object.", + "required": false + }, + "permission_tier": { + "name": "permission_tier", + "type": "PermissionLevel", + "description": "* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *\n`CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query", + "required": false + }, + "query": { + "name": "query", + "type": "string", + "description": "The text of the query to be run.", + "required": false + }, + "query_hash": { + "name": "query_hash", + "type": "string", + "description": "A SHA-256 hash of the query text along with the authenticated user ID.", + "required": false + }, + "run_as_role": { + "name": "run_as_role", + "type": "RunAsRole", + "description": "Sets the **Run as** role for the object. Must be set to one of `\"viewer\"`\n(signifying \"run as viewer\" behavior) or `\"owner\"` (signifying \"run as\nowner\" behavior)", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "The timestamp at which this query was last updated.", + "required": false + }, + "user": { + "name": "user", + "type": "*User", + "description": "", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "int", + "description": "The ID of the user who owns the query.", + "required": false + }, + "visualizations": { + "name": "visualizations", + "type": "[]LegacyVisualization", + "description": "", + "required": false + } + } + }, + "sql.LegacyVisualization": { + "name": "LegacyVisualization", + "package": "sql", + "description": "The visualization description API changes frequently and is unsupported. You\ncan duplicate a visualization by copying description objects received _from\nthe API_ and then using them to create a new one with a POST request to the\nsame endpoint. Databricks does not recommend constructing ad-hoc\nvisualizations entirely in JSON.", + "fields": { + "created_at": { + "name": "created_at", + "type": "string", + "description": "", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "A short description of this visualization. This is not displayed in the\nUI.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "The UUID for this visualization.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The name of the visualization that appears on dashboards and the query\nscreen.", + "required": false + }, + "options": { + "name": "options", + "type": "any", + "description": "The options object varies widely from one visualization type to the next\nand is unsupported. Databricks does not recommend modifying visualization\nsettings in JSON.", + "required": false + }, + "query": { + "name": "query", + "type": "*LegacyQuery", + "description": "", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of visualization: chart, table, pivot table, and so on.", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.ListAlertsResponse": { + "name": "ListAlertsResponse", + "package": "sql", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + }, + "results": { + "name": "results", + "type": "[]ListAlertsResponseAlert", + "description": "", + "required": false + } + } + }, + "sql.ListAlertsResponseAlert": { + "name": "ListAlertsResponseAlert", + "package": "sql", + "description": "", + "fields": { + "condition": { + "name": "condition", + "type": "*AlertCondition", + "description": "Trigger conditions of the alert.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string", + "description": "The timestamp indicating when the alert was created.", + "required": false + }, + "custom_body": { + "name": "custom_body", + "type": "string", + "description": "Custom body of alert notification, if it exists. See [here] for custom\ntemplating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Custom subject of alert notification, if it exists. This can include\nemail subject entries and Slack notification headers, for example. See\n[here] for custom templating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the alert.", + "required": false + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "LifecycleState", + "description": "The workspace state of the alert. Used for tracking trashed status.", + "required": false + }, + "notify_on_ok": { + "name": "notify_on_ok", + "type": "bool", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has\nbeen deleted.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query attached to the alert.", + "required": false + }, + "seconds_to_retrigger": { + "name": "seconds_to_retrigger", + "type": "int", + "description": "Number of seconds an alert must wait after being triggered to rearm\nitself. After rearming, it can be triggered again. If 0 or not specified,\nthe alert will not be triggered again.", + "required": false + }, + "state": { + "name": "state", + "type": "AlertState", + "description": "Current state of the alert's trigger status. This field is set to UNKNOWN\nif the alert has not yet been evaluated or ran into an error during the\nlast evaluation.", + "required": false + }, + "trigger_time": { + "name": "trigger_time", + "type": "string", + "description": "Timestamp when the alert was last triggered, if the alert has been\ntriggered before.", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "The timestamp indicating when the alert was updated.", + "required": false + } + } + }, + "sql.ListAlertsV2Response": { + "name": "ListAlertsV2Response", + "package": "sql", + "description": "", + "fields": { + "alerts": { + "name": "alerts", + "type": "[]AlertV2", + "description": "", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.ListQueriesResponse": { + "name": "ListQueriesResponse", + "package": "sql", + "description": "", + "fields": { + "has_next_page": { + "name": "has_next_page", + "type": "bool", + "description": "Whether there is another page of results.", + "required": false + }, + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token that can be used to get the next page of results.", + "required": false + }, + "res": { + "name": "res", + "type": "[]QueryInfo", + "description": "", + "required": false + } + } + }, + "sql.ListQueryObjectsResponse": { + "name": "ListQueryObjectsResponse", + "package": "sql", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + }, + "results": { + "name": "results", + "type": "[]ListQueryObjectsResponseQuery", + "description": "", + "required": false + } + } + }, + "sql.ListQueryObjectsResponseQuery": { + "name": "ListQueryObjectsResponseQuery", + "package": "sql", + "description": "", + "fields": { + "apply_auto_limit": { + "name": "apply_auto_limit", + "type": "bool", + "description": "Whether to apply a 1000 row limit to the query result.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "Name of the catalog where this query will be executed.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string", + "description": "Timestamp when this query was created.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the query that appears in list views, widget headings,\nand on the query page.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the query.", + "required": false + }, + "last_modifier_user_name": { + "name": "last_modifier_user_name", + "type": "string", + "description": "Username of the user who last saved changes to this query.", + "required": false + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "LifecycleState", + "description": "Indicates whether the query is trashed.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "Username of the user that owns the query.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]QueryParameter", + "description": "List of query parameter definitions.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "Text of the query to be run.", + "required": false + }, + "run_as_mode": { + "name": "run_as_mode", + "type": "RunAsMode", + "description": "Sets the \"Run as\" role for the object.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Name of the schema where this query will be executed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "Timestamp when this query was last updated.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the query.", + "required": false + } + } + }, + "sql.ListResponse": { + "name": "ListResponse", + "package": "sql", + "description": "", + "fields": { + "count": { + "name": "count", + "type": "int", + "description": "The total number of dashboards.", + "required": false + }, + "page": { + "name": "page", + "type": "int", + "description": "The current page being displayed.", + "required": false + }, + "page_size": { + "name": "page_size", + "type": "int", + "description": "The number of dashboards per page.", + "required": false + }, + "results": { + "name": "results", + "type": "[]Dashboard", + "description": "List of dashboards returned.", + "required": false + } + } + }, + "sql.ListVisualizationsForQueryResponse": { + "name": "ListVisualizationsForQueryResponse", + "package": "sql", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "", + "required": false + }, + "results": { + "name": "results", + "type": "[]Visualization", + "description": "", + "required": false + } + } + }, + "sql.ListWarehousesResponse": { + "name": "ListWarehousesResponse", + "package": "sql", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "A token, which can be sent as `page_token` to retrieve the next page. If\nthis field is omitted, there are no subsequent pages.", + "required": false + }, + "warehouses": { + "name": "warehouses", + "type": "[]EndpointInfo", + "description": "A list of warehouses and their configurations.", + "required": false + } + } + }, + "sql.MultiValuesOptions": { + "name": "MultiValuesOptions", + "package": "sql", + "description": "", + "fields": { + "prefix": { + "name": "prefix", + "type": "string", + "description": "Character that prefixes each selected parameter value.", + "required": false + }, + "separator": { + "name": "separator", + "type": "string", + "description": "Character that separates each selected parameter value. Defaults to a\ncomma.", + "required": false + }, + "suffix": { + "name": "suffix", + "type": "string", + "description": "Character that suffixes each selected parameter value.", + "required": false + } + } + }, + "sql.NumericValue": { + "name": "NumericValue", + "package": "sql", + "description": "", + "fields": { + "value": { + "name": "value", + "type": "float64", + "description": "", + "required": false + } + } + }, + "sql.OdbcParams": { + "name": "OdbcParams", + "package": "sql", + "description": "", + "fields": { + "hostname": { + "name": "hostname", + "type": "string", + "description": "", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "", + "required": false + }, + "port": { + "name": "port", + "type": "int", + "description": "", + "required": false + }, + "protocol": { + "name": "protocol", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.Parameter": { + "name": "Parameter", + "package": "sql", + "description": "", + "fields": { + "enumOptions": { + "name": "enumOptions", + "type": "string", + "description": "List of valid parameter values, newline delimited. Only applies for\ndropdown list parameters.", + "required": false + }, + "multiValuesOptions": { + "name": "multiValuesOptions", + "type": "*MultiValuesOptions", + "description": "If specified, allows multiple values to be selected for this parameter.\nOnly applies to dropdown list and query-based dropdown list parameters.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The literal parameter marker that appears between double curly braces in\nthe query text.", + "required": false + }, + "queryId": { + "name": "queryId", + "type": "string", + "description": "The UUID of the query that provides the parameter values. Only applies\nfor query-based dropdown list parameters.", + "required": false + }, + "title": { + "name": "title", + "type": "string", + "description": "The text displayed in a parameter picking widget.", + "required": false + }, + "type": { + "name": "type", + "type": "ParameterType", + "description": "Parameters can have several different types.", + "required": false + }, + "value": { + "name": "value", + "type": "any", + "description": "The default value for this parameter.", + "required": false + } + } + }, + "sql.Query": { + "name": "Query", + "package": "sql", + "description": "", + "fields": { + "apply_auto_limit": { + "name": "apply_auto_limit", + "type": "bool", + "description": "Whether to apply a 1000 row limit to the query result.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "Name of the catalog where this query will be executed.", + "required": false + }, + "create_time": { + "name": "create_time", + "type": "string", + "description": "Timestamp when this query was created.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the query that appears in list views, widget headings,\nand on the query page.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the query.", + "required": false + }, + "last_modifier_user_name": { + "name": "last_modifier_user_name", + "type": "string", + "description": "Username of the user who last saved changes to this query.", + "required": false + }, + "lifecycle_state": { + "name": "lifecycle_state", + "type": "LifecycleState", + "description": "Indicates whether the query is trashed.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "Username of the user that owns the query.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]QueryParameter", + "description": "List of query parameter definitions.", + "required": false + }, + "parent_path": { + "name": "parent_path", + "type": "string", + "description": "Workspace path of the workspace folder containing the object.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "Text of the query to be run.", + "required": false + }, + "run_as_mode": { + "name": "run_as_mode", + "type": "RunAsMode", + "description": "Sets the \"Run as\" role for the object.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Name of the schema where this query will be executed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "Timestamp when this query was last updated.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the query.", + "required": false + } + } + }, + "sql.QueryBackedValue": { + "name": "QueryBackedValue", + "package": "sql", + "description": "", + "fields": { + "multi_values_options": { + "name": "multi_values_options", + "type": "*MultiValuesOptions", + "description": "If specified, allows multiple values to be selected for this parameter.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query that provides the parameter values.", + "required": false + }, + "values": { + "name": "values", + "type": "[]string", + "description": "List of selected query parameter values.", + "required": false + } + } + }, + "sql.QueryEditContent": { + "name": "QueryEditContent", + "package": "sql", + "description": "", + "fields": { + "data_source_id": { + "name": "data_source_id", + "type": "string", + "description": "Data source ID maps to the ID of the data source used by the resource and\nis distinct from the warehouse ID. [Learn more]\n\n[Learn more]: https://docs.databricks.com/api/workspace/datasources/list", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The title of this query that appears in list views, widget headings, and\non the query page.", + "required": false + }, + "options": { + "name": "options", + "type": "any", + "description": "Exclusively used for storing a list parameter definitions. A parameter is\nan object with `title`, `name`, `type`, and `value` properties. The\n`value` field here is the default value. It can be overridden at runtime.", + "required": false + }, + "query": { + "name": "query", + "type": "string", + "description": "The text of the query to be run.", + "required": false + }, + "run_as_role": { + "name": "run_as_role", + "type": "RunAsRole", + "description": "Sets the **Run as** role for the object. Must be set to one of `\"viewer\"`\n(signifying \"run as viewer\" behavior) or `\"owner\"` (signifying \"run as\nowner\" behavior)", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "sql.QueryFilter": { + "name": "QueryFilter", + "package": "sql", + "description": "", + "fields": { + "query_start_time_range": { + "name": "query_start_time_range", + "type": "*TimeRange", + "description": "A range filter for query submitted time. The time range must be less than\nor equal to 30 days.", + "required": false + }, + "statement_ids": { + "name": "statement_ids", + "type": "[]string", + "description": "A list of statement IDs.", + "required": false + }, + "statuses": { + "name": "statuses", + "type": "[]QueryStatus", + "description": "A list of statuses (QUEUED, RUNNING, CANCELED, FAILED, FINISHED) to match\nquery results. Corresponds to the `status` field in the response.\nFiltering for multiple statuses is not recommended. Instead, opt to\nfilter by a single status multiple times and then combine the results.", + "required": false + }, + "user_ids": { + "name": "user_ids", + "type": "[]int64", + "description": "A list of user IDs who ran the queries.", + "required": false + }, + "warehouse_ids": { + "name": "warehouse_ids", + "type": "[]string", + "description": "A list of warehouse IDs.", + "required": false + } + } + }, + "sql.QueryInfo": { + "name": "QueryInfo", + "package": "sql", + "description": "", + "fields": { + "cache_query_id": { + "name": "cache_query_id", + "type": "string", + "description": "The ID of the cached query if this result retrieved from cache", + "required": false + }, + "channel_used": { + "name": "channel_used", + "type": "*ChannelInfo", + "description": "SQL Warehouse channel information at the time of query execution", + "required": false + }, + "client_application": { + "name": "client_application", + "type": "string", + "description": "Client application that ran the statement. For example: Databricks SQL\nEditor, Tableau, and Power BI. This field is derived from information\nprovided by client applications. While values are expected to remain\nstatic over time, this cannot be guaranteed.", + "required": false + }, + "duration": { + "name": "duration", + "type": "int64", + "description": "Total time of the statement execution. This value does not include the\ntime taken to retrieve the results, which can result in a discrepancy\nbetween this value and the start-to-finish wall-clock time.", + "required": false + }, + "endpoint_id": { + "name": "endpoint_id", + "type": "string", + "description": "Alias for `warehouse_id`.", + "required": false + }, + "error_message": { + "name": "error_message", + "type": "string", + "description": "Message describing why the query could not complete.", + "required": false + }, + "executed_as_user_id": { + "name": "executed_as_user_id", + "type": "int64", + "description": "The ID of the user whose credentials were used to run the query.", + "required": false + }, + "executed_as_user_name": { + "name": "executed_as_user_name", + "type": "string", + "description": "The email address or username of the user whose credentials were used to\nrun the query.", + "required": false + }, + "execution_end_time_ms": { + "name": "execution_end_time_ms", + "type": "int64", + "description": "The time execution of the query ended.", + "required": false + }, + "is_final": { + "name": "is_final", + "type": "bool", + "description": "Whether more updates for the query are expected.", + "required": false + }, + "lookup_key": { + "name": "lookup_key", + "type": "string", + "description": "A key that can be used to look up query details.", + "required": false + }, + "metrics": { + "name": "metrics", + "type": "*QueryMetrics", + "description": "Metrics about query execution.", + "required": false + }, + "plans_state": { + "name": "plans_state", + "type": "PlansState", + "description": "Whether plans exist for the execution, or the reason why they are missing", + "required": false + }, + "query_end_time_ms": { + "name": "query_end_time_ms", + "type": "int64", + "description": "The time the query ended.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "The query ID.", + "required": false + }, + "query_source": { + "name": "query_source", + "type": "*ExternalQuerySource", + "description": "A struct that contains key-value pairs representing Databricks entities\nthat were involved in the execution of this statement, such as jobs,\nnotebooks, or dashboards. This field only records Databricks entities.", + "required": false + }, + "query_start_time_ms": { + "name": "query_start_time_ms", + "type": "int64", + "description": "The time the query started.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "The text of the query.", + "required": false + }, + "rows_produced": { + "name": "rows_produced", + "type": "int64", + "description": "The number of results returned by the query.", + "required": false + }, + "spark_ui_url": { + "name": "spark_ui_url", + "type": "string", + "description": "URL to the Spark UI query plan.", + "required": false + }, + "statement_type": { + "name": "statement_type", + "type": "QueryStatementType", + "description": "Type of statement for this query", + "required": false + }, + "status": { + "name": "status", + "type": "QueryStatus", + "description": "Query status with one the following values:\n\n- `QUEUED`: Query has been received and queued. - `RUNNING`: Query has\nstarted. - `CANCELED`: Query has been cancelled by the user. - `FAILED`:\nQuery has failed. - `FINISHED`: Query has completed.", + "required": false + }, + "user_id": { + "name": "user_id", + "type": "int64", + "description": "The ID of the user who ran the query.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "The email address or username of the user who ran the query.", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "Warehouse ID.", + "required": false + } + } + }, + "sql.QueryList": { + "name": "QueryList", + "package": "sql", + "description": "", + "fields": { + "count": { + "name": "count", + "type": "int", + "description": "The total number of queries.", + "required": false + }, + "page": { + "name": "page", + "type": "int", + "description": "The page number that is currently displayed.", + "required": false + }, + "page_size": { + "name": "page_size", + "type": "int", + "description": "The number of queries per page.", + "required": false + }, + "results": { + "name": "results", + "type": "[]LegacyQuery", + "description": "List of queries returned.", + "required": false + } + } + }, + "sql.QueryMetrics": { + "name": "QueryMetrics", + "package": "sql", + "description": "A query metric that encapsulates a set of measurements for a single query.\nMetrics come from the driver and are stored in the history service database.", + "fields": { + "compilation_time_ms": { + "name": "compilation_time_ms", + "type": "int64", + "description": "Time spent loading metadata and optimizing the query, in milliseconds.", + "required": false + }, + "execution_time_ms": { + "name": "execution_time_ms", + "type": "int64", + "description": "Time spent executing the query, in milliseconds.", + "required": false + }, + "network_sent_bytes": { + "name": "network_sent_bytes", + "type": "int64", + "description": "Total amount of data sent over the network between executor nodes during\nshuffle, in bytes.", + "required": false + }, + "overloading_queue_start_timestamp": { + "name": "overloading_queue_start_timestamp", + "type": "int64", + "description": "Timestamp of when the query was enqueued waiting while the warehouse was\nat max load. This field is optional and will not appear if the query\nskipped the overloading queue.", + "required": false + }, + "photon_total_time_ms": { + "name": "photon_total_time_ms", + "type": "int64", + "description": "Total execution time for all individual Photon query engine tasks in the\nquery, in milliseconds.", + "required": false + }, + "projected_remaining_task_total_time_ms": { + "name": "projected_remaining_task_total_time_ms", + "type": "int64", + "description": "projected remaining work to be done aggregated across all stages in the\nquery, in milliseconds", + "required": false + }, + "projected_remaining_wallclock_time_ms": { + "name": "projected_remaining_wallclock_time_ms", + "type": "int64", + "description": "projected lower bound on remaining total task time based on\nprojected_remaining_task_total_time_ms / maximum concurrency", + "required": false + }, + "provisioning_queue_start_timestamp": { + "name": "provisioning_queue_start_timestamp", + "type": "int64", + "description": "Timestamp of when the query was enqueued waiting for a cluster to be\nprovisioned for the warehouse. This field is optional and will not appear\nif the query skipped the provisioning queue.", + "required": false + }, + "pruned_bytes": { + "name": "pruned_bytes", + "type": "int64", + "description": "Total number of file bytes in all tables not read due to pruning", + "required": false + }, + "pruned_files_count": { + "name": "pruned_files_count", + "type": "int64", + "description": "Total number of files from all tables not read due to pruning", + "required": false + }, + "query_compilation_start_timestamp": { + "name": "query_compilation_start_timestamp", + "type": "int64", + "description": "Timestamp of when the underlying compute started compilation of the\nquery.", + "required": false + }, + "read_bytes": { + "name": "read_bytes", + "type": "int64", + "description": "Total size of data read by the query, in bytes.", + "required": false + }, + "read_cache_bytes": { + "name": "read_cache_bytes", + "type": "int64", + "description": "Size of persistent data read from the cache, in bytes.", + "required": false + }, + "read_files_bytes": { + "name": "read_files_bytes", + "type": "int64", + "description": "Total number of file bytes in all tables read", + "required": false + }, + "read_files_count": { + "name": "read_files_count", + "type": "int64", + "description": "Number of files read after pruning", + "required": false + }, + "read_partitions_count": { + "name": "read_partitions_count", + "type": "int64", + "description": "Number of partitions read after pruning.", + "required": false + }, + "read_remote_bytes": { + "name": "read_remote_bytes", + "type": "int64", + "description": "Size of persistent data read from cloud object storage on your cloud\ntenant, in bytes.", + "required": false + }, + "remaining_task_count": { + "name": "remaining_task_count", + "type": "int64", + "description": "number of remaining tasks to complete this is based on the current status\nand could be bigger or smaller in the future based on future updates", + "required": false + }, + "result_fetch_time_ms": { + "name": "result_fetch_time_ms", + "type": "int64", + "description": "Time spent fetching the query results after the execution finished, in\nmilliseconds.", + "required": false + }, + "result_from_cache": { + "name": "result_from_cache", + "type": "bool", + "description": "`true` if the query result was fetched from cache, `false` otherwise.", + "required": false + }, + "rows_produced_count": { + "name": "rows_produced_count", + "type": "int64", + "description": "Total number of rows returned by the query.", + "required": false + }, + "rows_read_count": { + "name": "rows_read_count", + "type": "int64", + "description": "Total number of rows read by the query.", + "required": false + }, + "runnable_tasks": { + "name": "runnable_tasks", + "type": "int64", + "description": "number of remaining tasks to complete, calculated by autoscaler\nStatementAnalysis.scala deprecated: use remaining_task_count instead", + "required": false + }, + "spill_to_disk_bytes": { + "name": "spill_to_disk_bytes", + "type": "int64", + "description": "Size of data temporarily written to disk while executing the query, in\nbytes.", + "required": false + }, + "task_time_over_time_range": { + "name": "task_time_over_time_range", + "type": "*TaskTimeOverRange", + "description": "sum of task times completed in a range of wall clock time, approximated\nto a configurable number of points aggregated over all stages and jobs in\nthe query (based on task_total_time_ms)", + "required": false + }, + "task_total_time_ms": { + "name": "task_total_time_ms", + "type": "int64", + "description": "Sum of execution time for all of the query’s tasks, in milliseconds.", + "required": false + }, + "total_time_ms": { + "name": "total_time_ms", + "type": "int64", + "description": "Total execution time of the query from the client’s point of view, in\nmilliseconds.", + "required": false + }, + "work_to_be_done": { + "name": "work_to_be_done", + "type": "int64", + "description": "remaining work to be done across all stages in the query, calculated by\nautoscaler StatementAnalysis.scala, in milliseconds deprecated: using\nprojected_remaining_task_total_time_ms instead", + "required": false + }, + "write_remote_bytes": { + "name": "write_remote_bytes", + "type": "int64", + "description": "Size pf persistent data written to cloud object storage in your cloud\ntenant, in bytes.", + "required": false + } + } + }, + "sql.QueryOptions": { + "name": "QueryOptions", + "package": "sql", + "description": "", + "fields": { + "catalog": { + "name": "catalog", + "type": "string", + "description": "The name of the catalog to execute this query in.", + "required": false + }, + "moved_to_trash_at": { + "name": "moved_to_trash_at", + "type": "string", + "description": "The timestamp when this query was moved to trash. Only present when the\n`is_archived` property is `true`. Trashed items are deleted after thirty\ndays.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]Parameter", + "description": "", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "The name of the schema to execute this query in.", + "required": false + } + } + }, + "sql.QueryParameter": { + "name": "QueryParameter", + "package": "sql", + "description": "", + "fields": { + "date_range_value": { + "name": "date_range_value", + "type": "*DateRangeValue", + "description": "Date-range query parameter value. Can only specify one of\n`dynamic_date_range_value` or `date_range_value`.", + "required": false + }, + "date_value": { + "name": "date_value", + "type": "*DateValue", + "description": "Date query parameter value. Can only specify one of `dynamic_date_value`\nor `date_value`.", + "required": false + }, + "enum_value": { + "name": "enum_value", + "type": "*EnumValue", + "description": "Dropdown query parameter value.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "Literal parameter marker that appears between double curly braces in the\nquery text.", + "required": false + }, + "numeric_value": { + "name": "numeric_value", + "type": "*NumericValue", + "description": "Numeric query parameter value.", + "required": false + }, + "query_backed_value": { + "name": "query_backed_value", + "type": "*QueryBackedValue", + "description": "Query-based dropdown query parameter value.", + "required": false + }, + "text_value": { + "name": "text_value", + "type": "*TextValue", + "description": "Text query parameter value.", + "required": false + }, + "title": { + "name": "title", + "type": "string", + "description": "Text displayed in the user-facing parameter widget in the UI.", + "required": false + } + } + }, + "sql.QueryPostContent": { + "name": "QueryPostContent", + "package": "sql", + "description": "", + "fields": { + "data_source_id": { + "name": "data_source_id", + "type": "string", + "description": "Data source ID maps to the ID of the data source used by the resource and\nis distinct from the warehouse ID. [Learn more]\n\n[Learn more]: https://docs.databricks.com/api/workspace/datasources/list", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "The title of this query that appears in list views, widget headings, and\non the query page.", + "required": false + }, + "options": { + "name": "options", + "type": "any", + "description": "Exclusively used for storing a list parameter definitions. A parameter is\nan object with `title`, `name`, `type`, and `value` properties. The\n`value` field here is the default value. It can be overridden at runtime.", + "required": false + }, + "parent": { + "name": "parent", + "type": "string", + "description": "The identifier of the workspace folder containing the object.", + "required": false + }, + "query": { + "name": "query", + "type": "string", + "description": "The text of the query to be run.", + "required": false + }, + "run_as_role": { + "name": "run_as_role", + "type": "RunAsRole", + "description": "Sets the **Run as** role for the object. Must be set to one of `\"viewer\"`\n(signifying \"run as viewer\" behavior) or `\"owner\"` (signifying \"run as\nowner\" behavior)", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + } + } + }, + "sql.RepeatedEndpointConfPairs": { + "name": "RepeatedEndpointConfPairs", + "package": "sql", + "description": "", + "fields": { + "config_pair": { + "name": "config_pair", + "type": "[]EndpointConfPair", + "description": "Deprecated: Use configuration_pairs", + "required": false + }, + "configuration_pairs": { + "name": "configuration_pairs", + "type": "[]EndpointConfPair", + "description": "", + "required": false + } + } + }, + "sql.ResultData": { + "name": "ResultData", + "package": "sql", + "description": "Contains the result data of a single chunk when using `INLINE` disposition.\nWhen using `EXTERNAL_LINKS` disposition, the array `external_links` is used\ninstead to provide URLs to the result data in cloud storage. Exactly one of\nthese alternatives is used. (While the `external_links` array prepares the\nAPI to return multiple links in a single response. Currently only a single\nlink is returned.)", + "fields": { + "byte_count": { + "name": "byte_count", + "type": "int64", + "description": "The number of bytes in the result chunk. This field is not available when\nusing `INLINE` disposition.", + "required": false + }, + "chunk_index": { + "name": "chunk_index", + "type": "int", + "description": "The position within the sequence of result set chunks.", + "required": false + }, + "data_array": { + "name": "data_array", + "type": "[][]string", + "description": "The `JSON_ARRAY` format is an array of arrays of values, where each\nnon-null value is formatted as a string. Null values are encoded as JSON\n`null`.", + "required": false + }, + "external_links": { + "name": "external_links", + "type": "[]ExternalLink", + "description": "", + "required": false + }, + "next_chunk_index": { + "name": "next_chunk_index", + "type": "int", + "description": "When fetching, provides the `chunk_index` for the _next_ chunk. If\nabsent, indicates there are no more chunks. The next chunk can be fetched\nwith a :method:statementexecution/getstatementresultchunkn request.", + "required": false + }, + "next_chunk_internal_link": { + "name": "next_chunk_internal_link", + "type": "string", + "description": "When fetching, provides a link to fetch the _next_ chunk. If absent,\nindicates there are no more chunks. This link is an absolute `path` to be\njoined with your `$DATABRICKS_HOST`, and should be treated as an opaque\nlink. This is an alternative to using `next_chunk_index`.", + "required": false + }, + "row_count": { + "name": "row_count", + "type": "int64", + "description": "The number of rows within the result chunk.", + "required": false + }, + "row_offset": { + "name": "row_offset", + "type": "int64", + "description": "The starting row offset within the result set.", + "required": false + } + } + }, + "sql.ResultManifest": { + "name": "ResultManifest", + "package": "sql", + "description": "The result manifest provides schema and metadata for the result set.", + "fields": { + "chunks": { + "name": "chunks", + "type": "[]BaseChunkInfo", + "description": "Array of result set chunk metadata.", + "required": false + }, + "format": { + "name": "format", + "type": "Format", + "description": "", + "required": false + }, + "schema": { + "name": "schema", + "type": "*ResultSchema", + "description": "", + "required": false + }, + "total_byte_count": { + "name": "total_byte_count", + "type": "int64", + "description": "The total number of bytes in the result set. This field is not available\nwhen using `INLINE` disposition.", + "required": false + }, + "total_chunk_count": { + "name": "total_chunk_count", + "type": "int", + "description": "The total number of chunks that the result set has been divided into.", + "required": false + }, + "total_row_count": { + "name": "total_row_count", + "type": "int64", + "description": "The total number of rows in the result set.", + "required": false + }, + "truncated": { + "name": "truncated", + "type": "bool", + "description": "Indicates whether the result is truncated due to `row_limit` or\n`byte_limit`.", + "required": false + } + } + }, + "sql.ResultSchema": { + "name": "ResultSchema", + "package": "sql", + "description": "The schema is an ordered list of column descriptions.", + "fields": { + "column_count": { + "name": "column_count", + "type": "int", + "description": "", + "required": false + }, + "columns": { + "name": "columns", + "type": "[]ColumnInfo", + "description": "", + "required": false + } + } + }, + "sql.ServiceError": { + "name": "ServiceError", + "package": "sql", + "description": "", + "fields": { + "error_code": { + "name": "error_code", + "type": "ServiceErrorCode", + "description": "", + "required": false + }, + "message": { + "name": "message", + "type": "string", + "description": "A brief summary of the error condition.", + "required": false + } + } + }, + "sql.SetRequest": { + "name": "SetRequest", + "package": "sql", + "description": "Set object ACL", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControl", + "description": "", + "required": false + } + } + }, + "sql.SetResponse": { + "name": "SetResponse", + "package": "sql", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]AccessControl", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "An object's type and UUID, separated by a forward slash (/) character.", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "ObjectType", + "description": "A singular noun object type.", + "required": false + } + } + }, + "sql.SetWorkspaceWarehouseConfigRequest": { + "name": "SetWorkspaceWarehouseConfigRequest", + "package": "sql", + "description": "Sets the workspace level warehouse configuration that is shared by all SQL\nwarehouses in this workspace.\n\nThis is idempotent.", + "fields": { + "channel": { + "name": "channel", + "type": "*Channel", + "description": "Optional: Channel selection details", + "required": false + }, + "config_param": { + "name": "config_param", + "type": "*RepeatedEndpointConfPairs", + "description": "Deprecated: Use sql_configuration_parameters", + "required": false + }, + "data_access_config": { + "name": "data_access_config", + "type": "[]EndpointConfPair", + "description": "Spark confs for external hive metastore configuration JSON serialized\nsize must be less than \u003c= 512K", + "required": false + }, + "enable_serverless_compute": { + "name": "enable_serverless_compute", + "type": "bool", + "description": "Enable Serverless compute for SQL warehouses", + "required": false + }, + "enabled_warehouse_types": { + "name": "enabled_warehouse_types", + "type": "[]WarehouseTypePair", + "description": "List of Warehouse Types allowed in this workspace (limits allowed value\nof the type field in CreateWarehouse and EditWarehouse). Note: Some types\ncannot be disabled, they don't need to be specified in\nSetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing\nwarehouses to be converted to another type. Used by frontend to save\nspecific type availability in the warehouse create and edit form UI.", + "required": false + }, + "global_param": { + "name": "global_param", + "type": "*RepeatedEndpointConfPairs", + "description": "Deprecated: Use sql_configuration_parameters", + "required": false + }, + "google_service_account": { + "name": "google_service_account", + "type": "string", + "description": "GCP only: Google Service Account used to pass to cluster to access Google\nCloud Storage", + "required": false + }, + "instance_profile_arn": { + "name": "instance_profile_arn", + "type": "string", + "description": "AWS Only: The instance profile used to pass an IAM role to the SQL\nwarehouses. This configuration is also applied to the workspace's\nserverless compute for notebooks and jobs.", + "required": false + }, + "security_policy": { + "name": "security_policy", + "type": "SetWorkspaceWarehouseConfigRequestSecurityPolicy", + "description": "Security policy for warehouses", + "required": false + }, + "sql_configuration_parameters": { + "name": "sql_configuration_parameters", + "type": "*RepeatedEndpointConfPairs", + "description": "SQL configuration parameters", + "required": false + } + } + }, + "sql.SpotInstancePolicy": { + "name": "SpotInstancePolicy", + "package": "sql", + "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", + "fields": {} + }, + "sql.StatementParameterListItem": { + "name": "StatementParameterListItem", + "package": "sql", + "description": "", + "fields": { + "name": { + "name": "name", + "type": "string", + "description": "The name of a parameter marker to be substituted in the statement.", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The data type, given as a string. For example: `INT`, `STRING`,\n`DECIMAL(10,2)`. If no type is given the type is assumed to be `STRING`.\nComplex types, such as `ARRAY`, `MAP`, and `STRUCT` are not supported.\nFor valid types, refer to the section [Data types] of the SQL language\nreference.\n\n[Data types]: https://docs.databricks.com/sql/language-manual/functions/cast.html", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value to substitute, represented as a string. If omitted, the value\nis interpreted as NULL.", + "required": false + } + } + }, + "sql.StatementResponse": { + "name": "StatementResponse", + "package": "sql", + "description": "", + "fields": { + "manifest": { + "name": "manifest", + "type": "*ResultManifest", + "description": "", + "required": false + }, + "result": { + "name": "result", + "type": "*ResultData", + "description": "", + "required": false + }, + "statement_id": { + "name": "statement_id", + "type": "string", + "description": "The statement ID is returned upon successfully submitting a SQL\nstatement, and is a required reference for all subsequent calls.", + "required": false + }, + "status": { + "name": "status", + "type": "*StatementStatus", + "description": "", + "required": false + } + } + }, + "sql.StatementStatus": { + "name": "StatementStatus", + "package": "sql", + "description": "The status response includes execution state and if relevant, error\ninformation.", + "fields": { + "error": { + "name": "error", + "type": "*ServiceError", + "description": "", + "required": false + }, + "state": { + "name": "state", + "type": "StatementState", + "description": "Statement execution state: - `PENDING`: waiting for warehouse -\n`RUNNING`: running - `SUCCEEDED`: execution was successful, result data\navailable for fetch - `FAILED`: execution failed; reason for failure\ndescribed in accompanying error message - `CANCELED`: user canceled; can\ncome from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`\n- `CLOSED`: execution successful, and statement closed; result no longer\navailable for fetch", + "required": false + } + } + }, + "sql.Success": { + "name": "Success", + "package": "sql", + "description": "", + "fields": { + "message": { + "name": "message", + "type": "SuccessMessage", + "description": "", + "required": false + } + } + }, + "sql.TaskTimeOverRange": { + "name": "TaskTimeOverRange", + "package": "sql", + "description": "", + "fields": { + "entries": { + "name": "entries", + "type": "[]TaskTimeOverRangeEntry", + "description": "", + "required": false + }, + "interval": { + "name": "interval", + "type": "int64", + "description": "interval length for all entries (difference in start time and end time of\nan entry range) the same for all entries start time of first interval is\nquery_start_time_ms", + "required": false + } + } + }, + "sql.TaskTimeOverRangeEntry": { + "name": "TaskTimeOverRangeEntry", + "package": "sql", + "description": "", + "fields": { + "task_completed_time_ms": { + "name": "task_completed_time_ms", + "type": "int64", + "description": "total task completion time in this time range, aggregated over all stages\nand jobs in the query", + "required": false + } + } + }, + "sql.TerminationReason": { + "name": "TerminationReason", + "package": "sql", + "description": "", + "fields": { + "code": { + "name": "code", + "type": "TerminationReasonCode", + "description": "status code indicating why the cluster was terminated", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "map[string]string", + "description": "list of parameters that provide additional information about why the\ncluster was terminated", + "required": false + }, + "type": { + "name": "type", + "type": "TerminationReasonType", + "description": "type of the termination", + "required": false + } + } + }, + "sql.TextValue": { + "name": "TextValue", + "package": "sql", + "description": "", + "fields": { + "value": { + "name": "value", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.TimeRange": { + "name": "TimeRange", + "package": "sql", + "description": "", + "fields": { + "end_time_ms": { + "name": "end_time_ms", + "type": "int64", + "description": "The end time in milliseconds.", + "required": false + }, + "start_time_ms": { + "name": "start_time_ms", + "type": "int64", + "description": "The start time in milliseconds.", + "required": false + } + } + }, + "sql.TransferOwnershipObjectId": { + "name": "TransferOwnershipObjectId", + "package": "sql", + "description": "", + "fields": { + "new_owner": { + "name": "new_owner", + "type": "string", + "description": "Email address for the new owner, who must exist in the workspace.", + "required": false + } + } + }, + "sql.TransferOwnershipRequest": { + "name": "TransferOwnershipRequest", + "package": "sql", + "description": "", + "fields": { + "new_owner": { + "name": "new_owner", + "type": "string", + "description": "Email address for the new owner, who must exist in the workspace.", + "required": false + } + } + }, + "sql.UpdateAlertRequest": { + "name": "UpdateAlertRequest", + "package": "sql", + "description": "", + "fields": { + "alert": { + "name": "alert", + "type": "*UpdateAlertRequestAlert", + "description": "", + "required": false + }, + "auto_resolve_display_name": { + "name": "auto_resolve_display_name", + "type": "bool", + "description": "If true, automatically resolve alert display name conflicts. Otherwise,\nfail the request if the alert's display name conflicts with an existing\nalert's display name.", + "required": false + }, + "update_mask": { + "name": "update_mask", + "type": "string", + "description": "The field mask must be a single string, with multiple fields separated by\ncommas (no spaces). The field path is relative to the resource object,\nusing a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).\nSpecification of elements in sequence or map fields is not allowed, as\nonly the entire collection field can be specified. Field names must\nexactly match the resource field names.\n\nA field mask of `*` indicates full replacement. It’s recommended to\nalways explicitly list the field...", + "required": false + } + } + }, + "sql.UpdateAlertRequestAlert": { + "name": "UpdateAlertRequestAlert", + "package": "sql", + "description": "", + "fields": { + "condition": { + "name": "condition", + "type": "*AlertCondition", + "description": "Trigger conditions of the alert.", + "required": false + }, + "custom_body": { + "name": "custom_body", + "type": "string", + "description": "Custom body of alert notification, if it exists. See [here] for custom\ntemplating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "custom_subject": { + "name": "custom_subject", + "type": "string", + "description": "Custom subject of alert notification, if it exists. This can include\nemail subject entries and Slack notification headers, for example. See\n[here] for custom templating instructions.\n\n[here]: https://docs.databricks.com/sql/user/alerts/index.html", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the alert.", + "required": false + }, + "notify_on_ok": { + "name": "notify_on_ok", + "type": "bool", + "description": "Whether to notify alert subscribers when alert returns back to normal.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "The owner's username. This field is set to \"Unavailable\" if the user has\nbeen deleted.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query attached to the alert.", + "required": false + }, + "seconds_to_retrigger": { + "name": "seconds_to_retrigger", + "type": "int", + "description": "Number of seconds an alert must wait after being triggered to rearm\nitself. After rearming, it can be triggered again. If 0 or not specified,\nthe alert will not be triggered again.", + "required": false + } + } + }, + "sql.UpdateAlertV2Request": { + "name": "UpdateAlertV2Request", + "package": "sql", + "description": "", + "fields": { + "alert": { + "name": "alert", + "type": "AlertV2", + "description": "", + "required": false + } + } + }, + "sql.UpdateQueryRequest": { + "name": "UpdateQueryRequest", + "package": "sql", + "description": "", + "fields": { + "auto_resolve_display_name": { + "name": "auto_resolve_display_name", + "type": "bool", + "description": "If true, automatically resolve alert display name conflicts. Otherwise,\nfail the request if the alert's display name conflicts with an existing\nalert's display name.", + "required": false + }, + "query": { + "name": "query", + "type": "*UpdateQueryRequestQuery", + "description": "", + "required": false + }, + "update_mask": { + "name": "update_mask", + "type": "string", + "description": "The field mask must be a single string, with multiple fields separated by\ncommas (no spaces). The field path is relative to the resource object,\nusing a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).\nSpecification of elements in sequence or map fields is not allowed, as\nonly the entire collection field can be specified. Field names must\nexactly match the resource field names.\n\nA field mask of `*` indicates full replacement. It’s recommended to\nalways explicitly list the field...", + "required": false + } + } + }, + "sql.UpdateQueryRequestQuery": { + "name": "UpdateQueryRequestQuery", + "package": "sql", + "description": "", + "fields": { + "apply_auto_limit": { + "name": "apply_auto_limit", + "type": "bool", + "description": "Whether to apply a 1000 row limit to the query result.", + "required": false + }, + "catalog": { + "name": "catalog", + "type": "string", + "description": "Name of the catalog where this query will be executed.", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "General description that conveys additional information about this query\nsuch as usage notes.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the query that appears in list views, widget headings,\nand on the query page.", + "required": false + }, + "owner_user_name": { + "name": "owner_user_name", + "type": "string", + "description": "Username of the user that owns the query.", + "required": false + }, + "parameters": { + "name": "parameters", + "type": "[]QueryParameter", + "description": "List of query parameter definitions.", + "required": false + }, + "query_text": { + "name": "query_text", + "type": "string", + "description": "Text of the query to be run.", + "required": false + }, + "run_as_mode": { + "name": "run_as_mode", + "type": "RunAsMode", + "description": "Sets the \"Run as\" role for the object.", + "required": false + }, + "schema": { + "name": "schema", + "type": "string", + "description": "Name of the schema where this query will be executed.", + "required": false + }, + "tags": { + "name": "tags", + "type": "[]string", + "description": "", + "required": false + }, + "warehouse_id": { + "name": "warehouse_id", + "type": "string", + "description": "ID of the SQL warehouse attached to the query.", + "required": false + } + } + }, + "sql.UpdateVisualizationRequest": { + "name": "UpdateVisualizationRequest", + "package": "sql", + "description": "", + "fields": { + "update_mask": { + "name": "update_mask", + "type": "string", + "description": "The field mask must be a single string, with multiple fields separated by\ncommas (no spaces). The field path is relative to the resource object,\nusing a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).\nSpecification of elements in sequence or map fields is not allowed, as\nonly the entire collection field can be specified. Field names must\nexactly match the resource field names.\n\nA field mask of `*` indicates full replacement. It’s recommended to\nalways explicitly list the field...", + "required": false + }, + "visualization": { + "name": "visualization", + "type": "*UpdateVisualizationRequestVisualization", + "description": "", + "required": false + } + } + }, + "sql.UpdateVisualizationRequestVisualization": { + "name": "UpdateVisualizationRequestVisualization", + "package": "sql", + "description": "", + "fields": { + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the visualization.", + "required": false + }, + "serialized_options": { + "name": "serialized_options", + "type": "string", + "description": "The visualization options varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying\nvisualization options directly.", + "required": false + }, + "serialized_query_plan": { + "name": "serialized_query_plan", + "type": "string", + "description": "The visualization query plan varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying the\nvisualization query plan directly.", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of visualization: counter, table, funnel, and so on.", + "required": false + } + } + }, + "sql.UpdateWidgetRequest": { + "name": "UpdateWidgetRequest", + "package": "sql", + "description": "", + "fields": { + "dashboard_id": { + "name": "dashboard_id", + "type": "string", + "description": "Dashboard ID returned by :method:dashboards/create.", + "required": false + }, + "options": { + "name": "options", + "type": "WidgetOptions", + "description": "", + "required": false + }, + "text": { + "name": "text", + "type": "string", + "description": "If this is a textbox widget, the application displays this text. This\nfield is ignored if the widget contains a visualization in the\n`visualization` field.", + "required": false + }, + "visualization_id": { + "name": "visualization_id", + "type": "string", + "description": "Query Vizualization ID returned by :method:queryvisualizations/create.", + "required": false + }, + "width": { + "name": "width", + "type": "int", + "description": "Width of a widget", + "required": false + } + } + }, + "sql.User": { + "name": "User", + "package": "sql", + "description": "", + "fields": { + "email": { + "name": "email", + "type": "string", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "int", + "description": "", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.Visualization": { + "name": "Visualization", + "package": "sql", + "description": "", + "fields": { + "create_time": { + "name": "create_time", + "type": "string", + "description": "The timestamp indicating when the visualization was created.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "The display name of the visualization.", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "UUID identifying the visualization.", + "required": false + }, + "query_id": { + "name": "query_id", + "type": "string", + "description": "UUID of the query that the visualization is attached to.", + "required": false + }, + "serialized_options": { + "name": "serialized_options", + "type": "string", + "description": "The visualization options varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying\nvisualization options directly.", + "required": false + }, + "serialized_query_plan": { + "name": "serialized_query_plan", + "type": "string", + "description": "The visualization query plan varies widely from one visualization type to\nthe next and is unsupported. Databricks does not recommend modifying the\nvisualization query plan directly.", + "required": false + }, + "type": { + "name": "type", + "type": "string", + "description": "The type of visualization: counter, table, funnel, and so on.", + "required": false + }, + "update_time": { + "name": "update_time", + "type": "string", + "description": "The timestamp indicating when the visualization was updated.", + "required": false + } + } + }, + "sql.WaitGetWarehouseRunning": { + "name": "WaitGetWarehouseRunning", + "package": "sql", + "description": "WaitGetWarehouseRunning is a wrapper that calls [WarehousesAPI.WaitGetWarehouseRunning] and waits to reach RUNNING state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*GetWarehouseResponse)", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "sql.WaitGetWarehouseStopped": { + "name": "WaitGetWarehouseStopped", + "package": "sql", + "description": "WaitGetWarehouseStopped is a wrapper that calls [WarehousesAPI.WaitGetWarehouseStopped] and waits to reach STOPPED state.", + "fields": { + "Poll": { + "name": "Poll", + "type": "func(time.Duration, func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)", + "description": "", + "required": false + }, + "Response": { + "name": "Response", + "type": "*R", + "description": "", + "required": false + }, + "callback": { + "name": "callback", + "type": "func(*GetWarehouseResponse)", + "description": "", + "required": false + }, + "id": { + "name": "id", + "type": "string", + "description": "", + "required": false + }, + "timeout": { + "name": "timeout", + "type": "time.Duration", + "description": "", + "required": false + } + } + }, + "sql.WarehouseAccessControlRequest": { + "name": "WarehouseAccessControlRequest", + "package": "sql", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WarehousePermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "sql.WarehouseAccessControlResponse": { + "name": "WarehouseAccessControlResponse", + "package": "sql", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]WarehousePermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "sql.WarehousePermission": { + "name": "WarehousePermission", + "package": "sql", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WarehousePermissionLevel", + "description": "", + "required": false + } + } + }, + "sql.WarehousePermissions": { + "name": "WarehousePermissions", + "package": "sql", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]WarehouseAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "sql.WarehousePermissionsDescription": { + "name": "WarehousePermissionsDescription", + "package": "sql", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WarehousePermissionLevel", + "description": "", + "required": false + } + } + }, + "sql.WarehousePermissionsRequest": { + "name": "WarehousePermissionsRequest", + "package": "sql", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]WarehouseAccessControlRequest", + "description": "", + "required": false + } + } + }, + "sql.WarehouseTypePair": { + "name": "WarehouseTypePair", + "package": "sql", + "description": "* Configuration values to enable or disable the access to specific warehouse\ntypes in the workspace.", + "fields": { + "enabled": { + "name": "enabled", + "type": "bool", + "description": "If set to false the specific warehouse type will not be be allowed as a\nvalue for warehouse_type in CreateWarehouse and EditWarehouse", + "required": false + }, + "warehouse_type": { + "name": "warehouse_type", + "type": "WarehouseTypePairWarehouseType", + "description": "", + "required": false + } + } + }, + "sql.Widget": { + "name": "Widget", + "package": "sql", + "description": "", + "fields": { + "id": { + "name": "id", + "type": "string", + "description": "The unique ID for this widget.", + "required": false + }, + "options": { + "name": "options", + "type": "*WidgetOptions", + "description": "", + "required": false + }, + "visualization": { + "name": "visualization", + "type": "*LegacyVisualization", + "description": "The visualization description API changes frequently and is unsupported.\nYou can duplicate a visualization by copying description objects received\n_from the API_ and then using them to create a new one with a POST\nrequest to the same endpoint. Databricks does not recommend constructing\nad-hoc visualizations entirely in JSON.", + "required": false + }, + "width": { + "name": "width", + "type": "int", + "description": "Unused field.", + "required": false + } + } + }, + "sql.WidgetOptions": { + "name": "WidgetOptions", + "package": "sql", + "description": "", + "fields": { + "created_at": { + "name": "created_at", + "type": "string", + "description": "Timestamp when this object was created", + "required": false + }, + "description": { + "name": "description", + "type": "string", + "description": "Custom description of the widget", + "required": false + }, + "isHidden": { + "name": "isHidden", + "type": "bool", + "description": "Whether this widget is hidden on the dashboard.", + "required": false + }, + "parameterMappings": { + "name": "parameterMappings", + "type": "any", + "description": "How parameters used by the visualization in this widget relate to other\nwidgets on the dashboard. Databricks does not recommend modifying this\ndefinition in JSON.", + "required": false + }, + "position": { + "name": "position", + "type": "*WidgetPosition", + "description": "Coordinates of this widget on a dashboard. This portion of the API\nchanges frequently and is unsupported.", + "required": false + }, + "title": { + "name": "title", + "type": "string", + "description": "Custom title of the widget", + "required": false + }, + "updated_at": { + "name": "updated_at", + "type": "string", + "description": "Timestamp of the last time this object was updated.", + "required": false + } + } + }, + "sql.WidgetPosition": { + "name": "WidgetPosition", + "package": "sql", + "description": "Coordinates of this widget on a dashboard. This portion of the API changes\nfrequently and is unsupported.", + "fields": { + "autoHeight": { + "name": "autoHeight", + "type": "bool", + "description": "reserved for internal use", + "required": false + }, + "col": { + "name": "col", + "type": "int", + "description": "column in the dashboard grid. Values start with 0", + "required": false + }, + "row": { + "name": "row", + "type": "int", + "description": "row in the dashboard grid. Values start with 0", + "required": false + }, + "sizeX": { + "name": "sizeX", + "type": "int", + "description": "width of the widget measured in dashboard grid cells", + "required": false + }, + "sizeY": { + "name": "sizeY", + "type": "int", + "description": "height of the widget measured in dashboard grid cells", + "required": false + } + } + }, + "sql.alertsImpl": { + "name": "alertsImpl", + "package": "sql", + "description": "unexported type that holds implementations of just Alerts API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.alertsLegacyImpl": { + "name": "alertsLegacyImpl", + "package": "sql", + "description": "unexported type that holds implementations of just AlertsLegacy API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.alertsV2Impl": { + "name": "alertsV2Impl", + "package": "sql", + "description": "unexported type that holds implementations of just AlertsV2 API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.dashboardWidgetsImpl": { + "name": "dashboardWidgetsImpl", + "package": "sql", + "description": "unexported type that holds implementations of just DashboardWidgets API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.dashboardsImpl": { + "name": "dashboardsImpl", + "package": "sql", + "description": "unexported type that holds implementations of just Dashboards API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.dataSourcesImpl": { + "name": "dataSourcesImpl", + "package": "sql", + "description": "unexported type that holds implementations of just DataSources API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.dbsqlPermissionsImpl": { + "name": "dbsqlPermissionsImpl", + "package": "sql", + "description": "unexported type that holds implementations of just DbsqlPermissions API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.queriesImpl": { + "name": "queriesImpl", + "package": "sql", + "description": "unexported type that holds implementations of just Queries API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.queriesLegacyImpl": { + "name": "queriesLegacyImpl", + "package": "sql", + "description": "unexported type that holds implementations of just QueriesLegacy API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.queryHistoryImpl": { + "name": "queryHistoryImpl", + "package": "sql", + "description": "unexported type that holds implementations of just QueryHistory API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.queryVisualizationsImpl": { + "name": "queryVisualizationsImpl", + "package": "sql", + "description": "unexported type that holds implementations of just QueryVisualizations API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.queryVisualizationsLegacyImpl": { + "name": "queryVisualizationsLegacyImpl", + "package": "sql", + "description": "unexported type that holds implementations of just QueryVisualizationsLegacy API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.redashConfigImpl": { + "name": "redashConfigImpl", + "package": "sql", + "description": "unexported type that holds implementations of just RedashConfig API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.statementExecutionImpl": { + "name": "statementExecutionImpl", + "package": "sql", + "description": "unexported type that holds implementations of just StatementExecution API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "sql.warehousesImpl": { + "name": "warehousesImpl", + "package": "sql", + "description": "unexported type that holds implementations of just Warehouses API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "workspace.AclItem": { + "name": "AclItem", + "package": "workspace", + "description": "An item representing an ACL rule applied to the given principal (user or\ngroup) on the associated scope point.", + "fields": { + "permission": { + "name": "permission", + "type": "AclPermission", + "description": "The permission level applied to the principal.", + "required": false + }, + "principal": { + "name": "principal", + "type": "string", + "description": "The principal in which the permission is applied.", + "required": false + } + } + }, + "workspace.AzureKeyVaultSecretScopeMetadata": { + "name": "AzureKeyVaultSecretScopeMetadata", + "package": "workspace", + "description": "The metadata of the Azure KeyVault for a secret scope of type `AZURE_KEYVAULT`", + "fields": { + "dns_name": { + "name": "dns_name", + "type": "string", + "description": "The DNS of the KeyVault", + "required": false + }, + "resource_id": { + "name": "resource_id", + "type": "string", + "description": "The resource id of the azure KeyVault that user wants to associate the\nscope with.", + "required": false + } + } + }, + "workspace.CreateCredentialsRequest": { + "name": "CreateCredentialsRequest", + "package": "workspace", + "description": "", + "fields": { + "git_email": { + "name": "git_email", + "type": "string", + "description": "The authenticating email associated with your Git provider user account.\nUsed for authentication with the remote repository and also sets the\nauthor \u0026 committer identity for commits. Required for most Git providers\nexcept AWS CodeCommit. Learn more at\nhttps://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "Git provider. This field is case-insensitive. The available Git providers\nare `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`,\n`gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and\n`awsCodeCommit`.", + "required": false + }, + "git_username": { + "name": "git_username", + "type": "string", + "description": "The username provided with your Git provider account and associated with\nthe credential. For most Git providers it is only used to set the Git\ncommitter \u0026 author names for commits, however it may be required for\nauthentication depending on your Git provider / token requirements.\nRequired for AWS CodeCommit.", + "required": false + }, + "is_default_for_provider": { + "name": "is_default_for_provider", + "type": "bool", + "description": "if the credential is the default for the given provider", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "the name of the git credential, used for identification and ease of\nlookup", + "required": false + }, + "personal_access_token": { + "name": "personal_access_token", + "type": "string", + "description": "The personal access token used to authenticate to the corresponding Git\nprovider. For certain providers, support may exist for other types of\nscoped access tokens. [Learn more].\n\n[Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html", + "required": false + } + } + }, + "workspace.CreateCredentialsResponse": { + "name": "CreateCredentialsResponse", + "package": "workspace", + "description": "", + "fields": { + "credential_id": { + "name": "credential_id", + "type": "int64", + "description": "ID of the credential object in the workspace.", + "required": false + }, + "git_email": { + "name": "git_email", + "type": "string", + "description": "The authenticating email associated with your Git provider user account.\nUsed for authentication with the remote repository and also sets the\nauthor \u0026 committer identity for commits. Required for most Git providers\nexcept AWS CodeCommit. Learn more at\nhttps://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "The Git provider associated with the credential.", + "required": false + }, + "git_username": { + "name": "git_username", + "type": "string", + "description": "The username provided with your Git provider account and associated with\nthe credential. For most Git providers it is only used to set the Git\ncommitter \u0026 author names for commits, however it may be required for\nauthentication depending on your Git provider / token requirements.\nRequired for AWS CodeCommit.", + "required": false + }, + "is_default_for_provider": { + "name": "is_default_for_provider", + "type": "bool", + "description": "if the credential is the default for the given provider", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "the name of the git credential, used for identification and ease of\nlookup", + "required": false + } + } + }, + "workspace.CreateRepoRequest": { + "name": "CreateRepoRequest", + "package": "workspace", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "Desired path for the repo in the workspace. Almost any path in the\nworkspace can be chosen. If repo is created in `/Repos`, path must be in\nthe format `/Repos/{folder}/{repo-name}`.", + "required": false + }, + "provider": { + "name": "provider", + "type": "string", + "description": "Git provider. This field is case-insensitive. The available Git providers\nare `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`,\n`gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and\n`awsCodeCommit`.", + "required": false + }, + "sparse_checkout": { + "name": "sparse_checkout", + "type": "*SparseCheckout", + "description": "If specified, the repo will be created with sparse checkout enabled. You\ncannot enable/disable sparse checkout after the repo is created.", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the Git repository to be linked.", + "required": false + } + } + }, + "workspace.CreateRepoResponse": { + "name": "CreateRepoResponse", + "package": "workspace", + "description": "", + "fields": { + "branch": { + "name": "branch", + "type": "string", + "description": "Branch that the Git folder (repo) is checked out to.", + "required": false + }, + "head_commit_id": { + "name": "head_commit_id", + "type": "string", + "description": "SHA-1 hash representing the commit ID of the current HEAD of the Git\nfolder (repo).", + "required": false + }, + "id": { + "name": "id", + "type": "int64", + "description": "ID of the Git folder (repo) object in the workspace.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "Path of the Git folder (repo) in the workspace.", + "required": false + }, + "provider": { + "name": "provider", + "type": "string", + "description": "Git provider of the linked Git repository.", + "required": false + }, + "sparse_checkout": { + "name": "sparse_checkout", + "type": "*SparseCheckout", + "description": "Sparse checkout settings for the Git folder (repo).", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the linked Git repository.", + "required": false + } + } + }, + "workspace.CreateScope": { + "name": "CreateScope", + "package": "workspace", + "description": "", + "fields": { + "backend_azure_keyvault": { + "name": "backend_azure_keyvault", + "type": "*AzureKeyVaultSecretScopeMetadata", + "description": "The metadata for the secret scope if the type is ``AZURE_KEYVAULT``", + "required": false + }, + "initial_manage_principal": { + "name": "initial_manage_principal", + "type": "string", + "description": "The principal that is initially granted ``MANAGE`` permission to the\ncreated scope.", + "required": false + }, + "scope": { + "name": "scope", + "type": "string", + "description": "Scope name requested by the user. Scope names are unique.", + "required": false + }, + "scope_backend_type": { + "name": "scope_backend_type", + "type": "ScopeBackendType", + "description": "The backend type the scope will be created with. If not specified, will\ndefault to ``DATABRICKS``", + "required": false + } + } + }, + "workspace.CredentialInfo": { + "name": "CredentialInfo", + "package": "workspace", + "description": "", + "fields": { + "credential_id": { + "name": "credential_id", + "type": "int64", + "description": "ID of the credential object in the workspace.", + "required": false + }, + "git_email": { + "name": "git_email", + "type": "string", + "description": "The authenticating email associated with your Git provider user account.\nUsed for authentication with the remote repository and also sets the\nauthor \u0026 committer identity for commits. Required for most Git providers\nexcept AWS CodeCommit. Learn more at\nhttps://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "The Git provider associated with the credential.", + "required": false + }, + "git_username": { + "name": "git_username", + "type": "string", + "description": "The username provided with your Git provider account and associated with\nthe credential. For most Git providers it is only used to set the Git\ncommitter \u0026 author names for commits, however it may be required for\nauthentication depending on your Git provider / token requirements.\nRequired for AWS CodeCommit.", + "required": false + }, + "is_default_for_provider": { + "name": "is_default_for_provider", + "type": "bool", + "description": "if the credential is the default for the given provider", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "the name of the git credential, used for identification and ease of\nlookup", + "required": false + } + } + }, + "workspace.Delete": { + "name": "Delete", + "package": "workspace", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the notebook or directory.", + "required": false + }, + "recursive": { + "name": "recursive", + "type": "bool", + "description": "The flag that specifies whether to delete the object recursively. It is\n`false` by default. Please note this deleting directory is not atomic. If\nit fails in the middle, some of objects under this directory may be\ndeleted and cannot be undone.", + "required": false + } + } + }, + "workspace.DeleteAcl": { + "name": "DeleteAcl", + "package": "workspace", + "description": "", + "fields": { + "principal": { + "name": "principal", + "type": "string", + "description": "The principal to remove an existing ACL from.", + "required": false + }, + "scope": { + "name": "scope", + "type": "string", + "description": "The name of the scope to remove permissions from.", + "required": false + } + } + }, + "workspace.DeleteScope": { + "name": "DeleteScope", + "package": "workspace", + "description": "", + "fields": { + "scope": { + "name": "scope", + "type": "string", + "description": "Name of the scope to delete.", + "required": false + } + } + }, + "workspace.DeleteSecret": { + "name": "DeleteSecret", + "package": "workspace", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "Name of the secret to delete.", + "required": false + }, + "scope": { + "name": "scope", + "type": "string", + "description": "The name of the scope that contains the secret to delete.", + "required": false + } + } + }, + "workspace.ExportResponse": { + "name": "ExportResponse", + "package": "workspace", + "description": "The request field `direct_download` determines whether a JSON response or\nbinary contents are returned by this endpoint.", + "fields": { + "content": { + "name": "content", + "type": "string", + "description": "The base64-encoded content. If the limit (10MB) is exceeded, exception\nwith error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown.", + "required": false + }, + "file_type": { + "name": "file_type", + "type": "string", + "description": "The file type of the exported file.", + "required": false + } + } + }, + "workspace.GetCredentialsResponse": { + "name": "GetCredentialsResponse", + "package": "workspace", + "description": "", + "fields": { + "credential_id": { + "name": "credential_id", + "type": "int64", + "description": "ID of the credential object in the workspace.", + "required": false + }, + "git_email": { + "name": "git_email", + "type": "string", + "description": "The authenticating email associated with your Git provider user account.\nUsed for authentication with the remote repository and also sets the\nauthor \u0026 committer identity for commits. Required for most Git providers\nexcept AWS CodeCommit. Learn more at\nhttps://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "The Git provider associated with the credential.", + "required": false + }, + "git_username": { + "name": "git_username", + "type": "string", + "description": "The username provided with your Git provider account and associated with\nthe credential. For most Git providers it is only used to set the Git\ncommitter \u0026 author names for commits, however it may be required for\nauthentication depending on your Git provider / token requirements.\nRequired for AWS CodeCommit.", + "required": false + }, + "is_default_for_provider": { + "name": "is_default_for_provider", + "type": "bool", + "description": "if the credential is the default for the given provider", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "the name of the git credential, used for identification and ease of\nlookup", + "required": false + } + } + }, + "workspace.GetRepoPermissionLevelsResponse": { + "name": "GetRepoPermissionLevelsResponse", + "package": "workspace", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]RepoPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "workspace.GetRepoResponse": { + "name": "GetRepoResponse", + "package": "workspace", + "description": "", + "fields": { + "branch": { + "name": "branch", + "type": "string", + "description": "Branch that the local version of the repo is checked out to.", + "required": false + }, + "head_commit_id": { + "name": "head_commit_id", + "type": "string", + "description": "SHA-1 hash representing the commit ID of the current HEAD of the repo.", + "required": false + }, + "id": { + "name": "id", + "type": "int64", + "description": "ID of the Git folder (repo) object in the workspace.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "Path of the Git folder (repo) in the workspace.", + "required": false + }, + "provider": { + "name": "provider", + "type": "string", + "description": "Git provider of the linked Git repository.", + "required": false + }, + "sparse_checkout": { + "name": "sparse_checkout", + "type": "*SparseCheckout", + "description": "Sparse checkout settings for the Git folder (repo).", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the linked Git repository.", + "required": false + } + } + }, + "workspace.GetSecretResponse": { + "name": "GetSecretResponse", + "package": "workspace", + "description": "", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "A unique name to identify the secret.", + "required": false + }, + "value": { + "name": "value", + "type": "string", + "description": "The value of the secret in its byte representation.", + "required": false + } + } + }, + "workspace.GetWorkspaceObjectPermissionLevelsResponse": { + "name": "GetWorkspaceObjectPermissionLevelsResponse", + "package": "workspace", + "description": "", + "fields": { + "permission_levels": { + "name": "permission_levels", + "type": "[]WorkspaceObjectPermissionsDescription", + "description": "Specific permission levels", + "required": false + } + } + }, + "workspace.Import": { + "name": "Import", + "package": "workspace", + "description": "", + "fields": { + "content": { + "name": "content", + "type": "string", + "description": "The base64-encoded content. This has a limit of 10 MB.\n\nIf the limit (10MB) is exceeded, exception with error code\n**MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. This parameter might be absent,\nand instead a posted file is used.", + "required": false + }, + "format": { + "name": "format", + "type": "ImportFormat", + "description": "This specifies the format of the file to be imported.\n\nThe value is case sensitive.\n\n- `AUTO`: The item is imported depending on an analysis of the item's\nextension and the header content provided in the request. If the item is\nimported as a notebook, then the item's extension is automatically\nremoved. - `SOURCE`: The notebook or directory is imported as source\ncode. - `HTML`: The notebook is imported as an HTML file. - `JUPYTER`:\nThe notebook is imported as a Jupyter/IPython Notebook file. -...", + "required": false + }, + "language": { + "name": "language", + "type": "Language", + "description": "The language of the object. This value is set only if the object type is\n`NOTEBOOK`.", + "required": false + }, + "overwrite": { + "name": "overwrite", + "type": "bool", + "description": "The flag that specifies whether to overwrite existing object. It is\n`false` by default. For `DBC` format, `overwrite` is not supported since\nit may contain a directory.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the object or directory. Importing a directory is\nonly supported for the `DBC` and `SOURCE` formats.", + "required": false + } + } + }, + "workspace.ListAclsResponse": { + "name": "ListAclsResponse", + "package": "workspace", + "description": "", + "fields": { + "items": { + "name": "items", + "type": "[]AclItem", + "description": "The associated ACLs rule applied to principals in the given scope.", + "required": false + } + } + }, + "workspace.ListCredentialsResponse": { + "name": "ListCredentialsResponse", + "package": "workspace", + "description": "", + "fields": { + "credentials": { + "name": "credentials", + "type": "[]CredentialInfo", + "description": "List of credentials.", + "required": false + } + } + }, + "workspace.ListReposResponse": { + "name": "ListReposResponse", + "package": "workspace", + "description": "", + "fields": { + "next_page_token": { + "name": "next_page_token", + "type": "string", + "description": "Token that can be specified as a query parameter to the `GET /repos`\nendpoint to retrieve the next page of results.", + "required": false + }, + "repos": { + "name": "repos", + "type": "[]RepoInfo", + "description": "List of Git folders (repos).", + "required": false + } + } + }, + "workspace.ListResponse": { + "name": "ListResponse", + "package": "workspace", + "description": "", + "fields": { + "objects": { + "name": "objects", + "type": "[]ObjectInfo", + "description": "List of objects.", + "required": false + } + } + }, + "workspace.ListScopesResponse": { + "name": "ListScopesResponse", + "package": "workspace", + "description": "", + "fields": { + "scopes": { + "name": "scopes", + "type": "[]SecretScope", + "description": "The available secret scopes.", + "required": false + } + } + }, + "workspace.ListSecretsResponse": { + "name": "ListSecretsResponse", + "package": "workspace", + "description": "", + "fields": { + "secrets": { + "name": "secrets", + "type": "[]SecretMetadata", + "description": "Metadata information of all secrets contained within the given scope.", + "required": false + } + } + }, + "workspace.Mkdirs": { + "name": "Mkdirs", + "package": "workspace", + "description": "", + "fields": { + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the directory. If the parent directories do not\nexist, it will also create them. If the directory already exists, this\ncommand will do nothing and succeed.", + "required": false + } + } + }, + "workspace.ObjectInfo": { + "name": "ObjectInfo", + "package": "workspace", + "description": "The information of the object in workspace. It will be returned by “list“\nand “get-status“.", + "fields": { + "created_at": { + "name": "created_at", + "type": "int64", + "description": "Only applicable to files. The creation UTC timestamp.", + "required": false + }, + "language": { + "name": "language", + "type": "Language", + "description": "The language of the object. This value is set only if the object type is\n``NOTEBOOK``.", + "required": false + }, + "modified_at": { + "name": "modified_at", + "type": "int64", + "description": "Only applicable to files, the last modified UTC timestamp.", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "int64", + "description": "Unique identifier for the object.", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "ObjectType", + "description": "The type of the object in workspace.\n\n- `NOTEBOOK`: document that contains runnable code, visualizations, and\nexplanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`:\nfile - `REPO`: repository - `DASHBOARD`: Lakeview dashboard", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "The absolute path of the object.", + "required": false + }, + "resource_id": { + "name": "resource_id", + "type": "string", + "description": "A unique identifier for the object that is consistent across all\nDatabricks APIs.", + "required": false + }, + "size": { + "name": "size", + "type": "int64", + "description": "Only applicable to files. The file size in bytes can be returned.", + "required": false + } + } + }, + "workspace.PutAcl": { + "name": "PutAcl", + "package": "workspace", + "description": "", + "fields": { + "permission": { + "name": "permission", + "type": "AclPermission", + "description": "The permission level applied to the principal.", + "required": false + }, + "principal": { + "name": "principal", + "type": "string", + "description": "The principal in which the permission is applied.", + "required": false + }, + "scope": { + "name": "scope", + "type": "string", + "description": "The name of the scope to apply permissions to.", + "required": false + } + } + }, + "workspace.PutSecret": { + "name": "PutSecret", + "package": "workspace", + "description": "", + "fields": { + "bytes_value": { + "name": "bytes_value", + "type": "string", + "description": "If specified, value will be stored as bytes.", + "required": false + }, + "key": { + "name": "key", + "type": "string", + "description": "A unique name to identify the secret.", + "required": false + }, + "scope": { + "name": "scope", + "type": "string", + "description": "The name of the scope to which the secret will be associated with.", + "required": false + }, + "string_value": { + "name": "string_value", + "type": "string", + "description": "If specified, note that the value will be stored in UTF-8 (MB4) form.", + "required": false + } + } + }, + "workspace.RepoAccessControlRequest": { + "name": "RepoAccessControlRequest", + "package": "workspace", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RepoPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "workspace.RepoAccessControlResponse": { + "name": "RepoAccessControlResponse", + "package": "workspace", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]RepoPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "workspace.RepoInfo": { + "name": "RepoInfo", + "package": "workspace", + "description": "Git folder (repo) information.", + "fields": { + "branch": { + "name": "branch", + "type": "string", + "description": "Name of the current git branch of the git folder (repo).", + "required": false + }, + "head_commit_id": { + "name": "head_commit_id", + "type": "string", + "description": "Current git commit id of the git folder (repo).", + "required": false + }, + "id": { + "name": "id", + "type": "int64", + "description": "Id of the git folder (repo) in the Workspace.", + "required": false + }, + "path": { + "name": "path", + "type": "string", + "description": "Root path of the git folder (repo) in the Workspace.", + "required": false + }, + "provider": { + "name": "provider", + "type": "string", + "description": "Git provider of the remote git repository, e.g. `gitHub`.", + "required": false + }, + "sparse_checkout": { + "name": "sparse_checkout", + "type": "*SparseCheckout", + "description": "Sparse checkout config for the git folder (repo).", + "required": false + }, + "url": { + "name": "url", + "type": "string", + "description": "URL of the remote git repository.", + "required": false + } + } + }, + "workspace.RepoPermission": { + "name": "RepoPermission", + "package": "workspace", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RepoPermissionLevel", + "description": "", + "required": false + } + } + }, + "workspace.RepoPermissions": { + "name": "RepoPermissions", + "package": "workspace", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]RepoAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "workspace.RepoPermissionsDescription": { + "name": "RepoPermissionsDescription", + "package": "workspace", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "RepoPermissionLevel", + "description": "", + "required": false + } + } + }, + "workspace.RepoPermissionsRequest": { + "name": "RepoPermissionsRequest", + "package": "workspace", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]RepoAccessControlRequest", + "description": "", + "required": false + } + } + }, + "workspace.ScopeBackendType": { + "name": "ScopeBackendType", + "package": "workspace", + "description": "The types of secret scope backends in the Secret Manager. Azure KeyVault backed secret scopes\nwill be supported in a later release.", + "fields": {} + }, + "workspace.SecretMetadata": { + "name": "SecretMetadata", + "package": "workspace", + "description": "The metadata about a secret. Returned when listing secrets. Does not contain\nthe actual secret value.", + "fields": { + "key": { + "name": "key", + "type": "string", + "description": "A unique name to identify the secret.", + "required": false + }, + "last_updated_timestamp": { + "name": "last_updated_timestamp", + "type": "int64", + "description": "The last updated timestamp (in milliseconds) for the secret.", + "required": false + } + } + }, + "workspace.SecretScope": { + "name": "SecretScope", + "package": "workspace", + "description": "An organizational resource for storing secrets. Secret scopes can be\ndifferent types (Databricks-managed, Azure KeyVault backed, etc), and ACLs\ncan be applied to control permissions for all secrets within a scope.", + "fields": { + "backend_type": { + "name": "backend_type", + "type": "ScopeBackendType", + "description": "The type of secret scope backend.", + "required": false + }, + "keyvault_metadata": { + "name": "keyvault_metadata", + "type": "*AzureKeyVaultSecretScopeMetadata", + "description": "The metadata for the secret scope if the type is ``AZURE_KEYVAULT``", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "A unique name to identify the secret scope.", + "required": false + } + } + }, + "workspace.SparseCheckout": { + "name": "SparseCheckout", + "package": "workspace", + "description": "Sparse checkout configuration, it contains options like cone patterns.", + "fields": { + "patterns": { + "name": "patterns", + "type": "[]string", + "description": "List of sparse checkout cone patterns, see [cone mode handling] for\ndetails.\n\n[cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling", + "required": false + } + } + }, + "workspace.SparseCheckoutUpdate": { + "name": "SparseCheckoutUpdate", + "package": "workspace", + "description": "Sparse checkout configuration, it contains options like cone patterns.", + "fields": { + "patterns": { + "name": "patterns", + "type": "[]string", + "description": "List of sparse checkout cone patterns, see [cone mode handling] for\ndetails.\n\n[cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling", + "required": false + } + } + }, + "workspace.UpdateCredentialsRequest": { + "name": "UpdateCredentialsRequest", + "package": "workspace", + "description": "", + "fields": { + "git_email": { + "name": "git_email", + "type": "string", + "description": "The authenticating email associated with your Git provider user account.\nUsed for authentication with the remote repository and also sets the\nauthor \u0026 committer identity for commits. Required for most Git providers\nexcept AWS CodeCommit. Learn more at\nhttps://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider", + "required": false + }, + "git_provider": { + "name": "git_provider", + "type": "string", + "description": "Git provider. This field is case-insensitive. The available Git providers\nare `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`,\n`gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and\n`awsCodeCommit`.", + "required": false + }, + "git_username": { + "name": "git_username", + "type": "string", + "description": "The username provided with your Git provider account and associated with\nthe credential. For most Git providers it is only used to set the Git\ncommitter \u0026 author names for commits, however it may be required for\nauthentication depending on your Git provider / token requirements.\nRequired for AWS CodeCommit.", + "required": false + }, + "is_default_for_provider": { + "name": "is_default_for_provider", + "type": "bool", + "description": "if the credential is the default for the given provider", + "required": false + }, + "name": { + "name": "name", + "type": "string", + "description": "the name of the git credential, used for identification and ease of\nlookup", + "required": false + }, + "personal_access_token": { + "name": "personal_access_token", + "type": "string", + "description": "The personal access token used to authenticate to the corresponding Git\nprovider. For certain providers, support may exist for other types of\nscoped access tokens. [Learn more].\n\n[Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html", + "required": false + } + } + }, + "workspace.UpdateRepoRequest": { + "name": "UpdateRepoRequest", + "package": "workspace", + "description": "", + "fields": { + "branch": { + "name": "branch", + "type": "string", + "description": "Branch that the local version of the repo is checked out to.", + "required": false + }, + "sparse_checkout": { + "name": "sparse_checkout", + "type": "*SparseCheckoutUpdate", + "description": "If specified, update the sparse checkout settings. The update will fail\nif sparse checkout is not enabled for the repo.", + "required": false + }, + "tag": { + "name": "tag", + "type": "string", + "description": "Tag that the local version of the repo is checked out to. Updating the\nrepo to a tag puts the repo in a detached HEAD state. Before committing\nnew changes, you must update the repo to a branch instead of the detached\nHEAD.", + "required": false + } + } + }, + "workspace.WorkspaceObjectAccessControlRequest": { + "name": "WorkspaceObjectAccessControlRequest", + "package": "workspace", + "description": "", + "fields": { + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WorkspaceObjectPermissionLevel", + "description": "", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "application ID of a service principal", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "workspace.WorkspaceObjectAccessControlResponse": { + "name": "WorkspaceObjectAccessControlResponse", + "package": "workspace", + "description": "", + "fields": { + "all_permissions": { + "name": "all_permissions", + "type": "[]WorkspaceObjectPermission", + "description": "All permissions.", + "required": false + }, + "display_name": { + "name": "display_name", + "type": "string", + "description": "Display name of the user or service principal.", + "required": false + }, + "group_name": { + "name": "group_name", + "type": "string", + "description": "name of the group", + "required": false + }, + "service_principal_name": { + "name": "service_principal_name", + "type": "string", + "description": "Name of the service principal.", + "required": false + }, + "user_name": { + "name": "user_name", + "type": "string", + "description": "name of the user", + "required": false + } + } + }, + "workspace.WorkspaceObjectPermission": { + "name": "WorkspaceObjectPermission", + "package": "workspace", + "description": "", + "fields": { + "inherited": { + "name": "inherited", + "type": "bool", + "description": "", + "required": false + }, + "inherited_from_object": { + "name": "inherited_from_object", + "type": "[]string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WorkspaceObjectPermissionLevel", + "description": "", + "required": false + } + } + }, + "workspace.WorkspaceObjectPermissions": { + "name": "WorkspaceObjectPermissions", + "package": "workspace", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]WorkspaceObjectAccessControlResponse", + "description": "", + "required": false + }, + "object_id": { + "name": "object_id", + "type": "string", + "description": "", + "required": false + }, + "object_type": { + "name": "object_type", + "type": "string", + "description": "", + "required": false + } + } + }, + "workspace.WorkspaceObjectPermissionsDescription": { + "name": "WorkspaceObjectPermissionsDescription", + "package": "workspace", + "description": "", + "fields": { + "description": { + "name": "description", + "type": "string", + "description": "", + "required": false + }, + "permission_level": { + "name": "permission_level", + "type": "WorkspaceObjectPermissionLevel", + "description": "", + "required": false + } + } + }, + "workspace.WorkspaceObjectPermissionsRequest": { + "name": "WorkspaceObjectPermissionsRequest", + "package": "workspace", + "description": "", + "fields": { + "access_control_list": { + "name": "access_control_list", + "type": "[]WorkspaceObjectAccessControlRequest", + "description": "", + "required": false + } + } + }, + "workspace.gitCredentialsImpl": { + "name": "gitCredentialsImpl", + "package": "workspace", + "description": "unexported type that holds implementations of just GitCredentials API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "workspace.reposImpl": { + "name": "reposImpl", + "package": "workspace", + "description": "unexported type that holds implementations of just Repos API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "workspace.secretsImpl": { + "name": "secretsImpl", + "package": "workspace", + "description": "unexported type that holds implementations of just Secrets API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + }, + "workspace.workspaceImpl": { + "name": "workspaceImpl", + "package": "workspace", + "description": "unexported type that holds implementations of just Workspace API methods", + "fields": { + "client": { + "name": "client", + "type": "*client.DatabricksClient", + "description": "", + "required": false + } + } + } + }, + "enums": { + "apps.AppDeploymentMode": { + "name": "AppDeploymentMode", + "package": "apps", + "description": "", + "values": [ + "AutoSync", + "Snapshot" + ] + }, + "apps.AppDeploymentState": { + "name": "AppDeploymentState", + "package": "apps", + "description": "", + "values": [ + "Cancelled", + "Failed", + "InProgress", + "Succeeded" + ] + }, + "apps.AppManifestAppResourceExperimentSpecExperimentPermission": { + "name": "AppManifestAppResourceExperimentSpecExperimentPermission", + "package": "apps", + "description": "", + "values": [ + "CanEdit", + "CanManage", + "CanRead" + ] + }, + "apps.AppManifestAppResourceJobSpecJobPermission": { + "name": "AppManifestAppResourceJobSpecJobPermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanManageRun", + "CanView", + "IsOwner" + ] + }, + "apps.AppManifestAppResourceSecretSpecSecretPermission": { + "name": "AppManifestAppResourceSecretSpecSecretPermission", + "package": "apps", + "description": "Permission to grant on the secret scope. Supported permissions are: \"READ\",\n\"WRITE\", \"MANAGE\".", + "values": [ + "Manage", + "Read", + "Write" + ] + }, + "apps.AppManifestAppResourceServingEndpointSpecServingEndpointPermission": { + "name": "AppManifestAppResourceServingEndpointSpecServingEndpointPermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanQuery", + "CanView" + ] + }, + "apps.AppManifestAppResourceSqlWarehouseSpecSqlWarehousePermission": { + "name": "AppManifestAppResourceSqlWarehouseSpecSqlWarehousePermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanUse", + "IsOwner" + ] + }, + "apps.AppManifestAppResourceUcSecurableSpecUcSecurablePermission": { + "name": "AppManifestAppResourceUcSecurableSpecUcSecurablePermission", + "package": "apps", + "description": "", + "values": [ + "Execute", + "Manage", + "ReadVolume", + "Select", + "UseConnection", + "WriteVolume" + ] + }, + "apps.AppManifestAppResourceUcSecurableSpecUcSecurableType": { + "name": "AppManifestAppResourceUcSecurableSpecUcSecurableType", + "package": "apps", + "description": "", + "values": [ + "Connection", + "Function", + "Table", + "Volume" + ] + }, + "apps.AppPermissionLevel": { + "name": "AppPermissionLevel", + "package": "apps", + "description": "Permission level", + "values": [ + "CanManage", + "CanUse" + ] + }, + "apps.AppResourceDatabaseDatabasePermission": { + "name": "AppResourceDatabaseDatabasePermission", + "package": "apps", + "description": "", + "values": [ + "CanConnectAndCreate" + ] + }, + "apps.AppResourceExperimentExperimentPermission": { + "name": "AppResourceExperimentExperimentPermission", + "package": "apps", + "description": "", + "values": [ + "CanEdit", + "CanManage", + "CanRead" + ] + }, + "apps.AppResourceGenieSpaceGenieSpacePermission": { + "name": "AppResourceGenieSpaceGenieSpacePermission", + "package": "apps", + "description": "", + "values": [ + "CanEdit", + "CanManage", + "CanRun", + "CanView" + ] + }, + "apps.AppResourceJobJobPermission": { + "name": "AppResourceJobJobPermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanManageRun", + "CanView", + "IsOwner" + ] + }, + "apps.AppResourceSecretSecretPermission": { + "name": "AppResourceSecretSecretPermission", + "package": "apps", + "description": "Permission to grant on the secret scope. Supported permissions are: \"READ\",\n\"WRITE\", \"MANAGE\".", + "values": [ + "Manage", + "Read", + "Write" + ] + }, + "apps.AppResourceServingEndpointServingEndpointPermission": { + "name": "AppResourceServingEndpointServingEndpointPermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanQuery", + "CanView" + ] + }, + "apps.AppResourceSqlWarehouseSqlWarehousePermission": { + "name": "AppResourceSqlWarehouseSqlWarehousePermission", + "package": "apps", + "description": "", + "values": [ + "CanManage", + "CanUse", + "IsOwner" + ] + }, + "apps.AppResourceUcSecurableUcSecurablePermission": { + "name": "AppResourceUcSecurableUcSecurablePermission", + "package": "apps", + "description": "", + "values": [ + "Execute", + "ReadVolume", + "Select", + "UseConnection", + "WriteVolume" + ] + }, + "apps.AppResourceUcSecurableUcSecurableType": { + "name": "AppResourceUcSecurableUcSecurableType", + "package": "apps", + "description": "", + "values": [ + "Connection", + "Function", + "Table", + "Volume" + ] + }, + "apps.AppUpdateUpdateStatusUpdateState": { + "name": "AppUpdateUpdateStatusUpdateState", + "package": "apps", + "description": "", + "values": [ + "Failed", + "InProgress", + "NotUpdated", + "Succeeded" + ] + }, + "apps.ApplicationState": { + "name": "ApplicationState", + "package": "apps", + "description": "", + "values": [ + "Crashed", + "Deploying", + "Running", + "Unavailable" + ] + }, + "apps.ComputeSize": { + "name": "ComputeSize", + "package": "apps", + "description": "", + "values": [ + "Large", + "Medium" + ] + }, + "apps.ComputeState": { + "name": "ComputeState", + "package": "apps", + "description": "", + "values": [ + "Active", + "Deleting", + "Error", + "Starting", + "Stopped", + "Stopping", + "Updating" + ] + }, + "catalog.ArtifactType": { + "name": "ArtifactType", + "package": "catalog", + "description": "The artifact type", + "values": [ + "InitScript", + "LibraryJar", + "LibraryMaven" + ] + }, + "catalog.CatalogIsolationMode": { + "name": "CatalogIsolationMode", + "package": "catalog", + "description": "", + "values": [ + "Isolated", + "Open" + ] + }, + "catalog.CatalogType": { + "name": "CatalogType", + "package": "catalog", + "description": "The type of the catalog.", + "values": [ + "DeltasharingCatalog", + "ForeignCatalog", + "InternalCatalog", + "ManagedCatalog", + "ManagedOnlineCatalog", + "SystemCatalog" + ] + }, + "catalog.ColumnTypeName": { + "name": "ColumnTypeName", + "package": "catalog", + "description": "", + "values": [ + "Array", + "Binary", + "Boolean", + "Byte", + "Char", + "Date", + "Decimal", + "Double", + "Float", + "Geography", + "Geometry", + "Int", + "Interval", + "Long", + "Map", + "Null", + "Short", + "String", + "Struct", + "TableType", + "Timestamp", + "TimestampNtz", + "UserDefinedType", + "Variant" + ] + }, + "catalog.ConnectionType": { + "name": "ConnectionType", + "package": "catalog", + "description": "Next Id: 53", + "values": [ + "Bigquery", + "Databricks", + "Ga4RawData", + "Glue", + "HiveMetastore", + "Http", + "Mysql", + "Oracle", + "Postgresql", + "PowerBi", + "Redshift", + "Salesforce", + "SalesforceDataCloud", + "Servicenow", + "Snowflake", + "Sqldw", + "Sqlserver", + "Teradata", + "UnknownConnectionType", + "WorkdayRaas" + ] + }, + "catalog.CreateFunctionParameterStyle": { + "name": "CreateFunctionParameterStyle", + "package": "catalog", + "description": "", + "values": [ + "S" + ] + }, + "catalog.CreateFunctionRoutineBody": { + "name": "CreateFunctionRoutineBody", + "package": "catalog", + "description": "", + "values": [ + "External", + "Sql" + ] + }, + "catalog.CreateFunctionSecurityType": { + "name": "CreateFunctionSecurityType", + "package": "catalog", + "description": "", + "values": [ + "Definer" + ] + }, + "catalog.CreateFunctionSqlDataAccess": { + "name": "CreateFunctionSqlDataAccess", + "package": "catalog", + "description": "", + "values": [ + "ContainsSql", + "NoSql", + "ReadsSqlData" + ] + }, + "catalog.CredentialPurpose": { + "name": "CredentialPurpose", + "package": "catalog", + "description": "", + "values": [ + "Service", + "Storage" + ] + }, + "catalog.CredentialType": { + "name": "CredentialType", + "package": "catalog", + "description": "Next Id: 14", + "values": [ + "AnyStaticCredential", + "BearerToken", + "OauthAccessToken", + "OauthM2m", + "OauthMtls", + "OauthRefreshToken", + "OauthResourceOwnerPassword", + "OauthU2m", + "OauthU2mMapping", + "OidcToken", + "PemPrivateKey", + "ServiceCredential", + "UnknownCredentialType", + "UsernamePassword" + ] + }, + "catalog.DataSourceFormat": { + "name": "DataSourceFormat", + "package": "catalog", + "description": "Data source format", + "values": [ + "Avro", + "BigqueryFormat", + "Csv", + "DatabricksFormat", + "DatabricksRowStoreFormat", + "Delta", + "DeltaUniformHudi", + "DeltaUniformIceberg", + "Deltasharing", + "Hive", + "Iceberg", + "Json", + "MongodbFormat", + "MysqlFormat", + "NetsuiteFormat", + "OracleFormat", + "Orc", + "Parquet", + "PostgresqlFormat", + "RedshiftFormat", + "SalesforceDataCloudFormat", + "SalesforceFormat", + "SnowflakeFormat", + "SqldwFormat", + "SqlserverFormat", + "TeradataFormat", + "Text", + "UnityCatalog", + "VectorIndexFormat", + "WorkdayRaasFormat" + ] + }, + "catalog.DeltaSharingScopeEnum": { + "name": "DeltaSharingScopeEnum", + "package": "catalog", + "description": "", + "values": [ + "Internal", + "InternalAndExternal" + ] + }, + "catalog.DestinationType": { + "name": "DestinationType", + "package": "catalog", + "description": "", + "values": [ + "Email", + "GenericWebhook", + "MicrosoftTeams", + "Slack", + "Url" + ] + }, + "catalog.EffectivePredictiveOptimizationFlagInheritedFromType": { + "name": "EffectivePredictiveOptimizationFlagInheritedFromType", + "package": "catalog", + "description": "", + "values": [ + "Catalog", + "Schema" + ] + }, + "catalog.EnablePredictiveOptimization": { + "name": "EnablePredictiveOptimization", + "package": "catalog", + "description": "", + "values": [ + "Disable", + "Enable", + "Inherit" + ] + }, + "catalog.FunctionInfoParameterStyle": { + "name": "FunctionInfoParameterStyle", + "package": "catalog", + "description": "", + "values": [ + "S" + ] + }, + "catalog.FunctionInfoRoutineBody": { + "name": "FunctionInfoRoutineBody", + "package": "catalog", + "description": "", + "values": [ + "External", + "Sql" + ] + }, + "catalog.FunctionInfoSecurityType": { + "name": "FunctionInfoSecurityType", + "package": "catalog", + "description": "", + "values": [ + "Definer" + ] + }, + "catalog.FunctionInfoSqlDataAccess": { + "name": "FunctionInfoSqlDataAccess", + "package": "catalog", + "description": "", + "values": [ + "ContainsSql", + "NoSql", + "ReadsSqlData" + ] + }, + "catalog.FunctionParameterMode": { + "name": "FunctionParameterMode", + "package": "catalog", + "description": "", + "values": [ + "In" + ] + }, + "catalog.FunctionParameterType": { + "name": "FunctionParameterType", + "package": "catalog", + "description": "", + "values": [ + "Column", + "Param" + ] + }, + "catalog.IsolationMode": { + "name": "IsolationMode", + "package": "catalog", + "description": "", + "values": [ + "IsolationModeIsolated", + "IsolationModeOpen" + ] + }, + "catalog.LineageDirection": { + "name": "LineageDirection", + "package": "catalog", + "description": "", + "values": [ + "Downstream", + "Upstream" + ] + }, + "catalog.MatchType": { + "name": "MatchType", + "package": "catalog", + "description": "The artifact pattern matching type", + "values": [ + "PrefixMatch" + ] + }, + "catalog.ModelVersionInfoStatus": { + "name": "ModelVersionInfoStatus", + "package": "catalog", + "description": "", + "values": [ + "FailedRegistration", + "ModelVersionStatusUnknown", + "PendingRegistration", + "Ready" + ] + }, + "catalog.MonitorCronSchedulePauseStatus": { + "name": "MonitorCronSchedulePauseStatus", + "package": "catalog", + "description": "Source link:\nhttps://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto\nMonitoring workflow schedule pause status.", + "values": [ + "Paused", + "Unpaused", + "Unspecified" + ] + }, + "catalog.MonitorInferenceLogProblemType": { + "name": "MonitorInferenceLogProblemType", + "package": "catalog", + "description": "", + "values": [ + "ProblemTypeClassification", + "ProblemTypeRegression" + ] + }, + "catalog.MonitorInfoStatus": { + "name": "MonitorInfoStatus", + "package": "catalog", + "description": "", + "values": [ + "MonitorStatusActive", + "MonitorStatusDeletePending", + "MonitorStatusError", + "MonitorStatusFailed", + "MonitorStatusPending" + ] + }, + "catalog.MonitorMetricType": { + "name": "MonitorMetricType", + "package": "catalog", + "description": "Can only be one of “\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"“,\n“\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"“, or “\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"“. The\n“\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"“ and “\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"“\nmetrics are computed on a single table, whereas the\n“\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"“ compare metrics across baseline and input\ntable, or across the two consecutive time windows. -\nCUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your\ntable - CUSTOM_METRIC...", + "values": [ + "CustomMetricTypeAggregate", + "CustomMetricTypeDerived", + "CustomMetricTypeDrift" + ] + }, + "catalog.MonitorRefreshInfoState": { + "name": "MonitorRefreshInfoState", + "package": "catalog", + "description": "The current state of the refresh.", + "values": [ + "Canceled", + "Failed", + "Pending", + "Running", + "Success", + "Unknown" + ] + }, + "catalog.MonitorRefreshInfoTrigger": { + "name": "MonitorRefreshInfoTrigger", + "package": "catalog", + "description": "", + "values": [ + "Manual", + "Schedule", + "UnknownTrigger" + ] + }, + "catalog.OnlineTableState": { + "name": "OnlineTableState", + "package": "catalog", + "description": "The state of an online table.", + "values": [ + "Offline", + "OfflineFailed", + "Online", + "OnlineContinuousUpdate", + "OnlineNoPendingUpdate", + "OnlinePipelineFailed", + "OnlineTriggeredUpdate", + "OnlineUpdatingPipelineResources", + "Provisioning", + "ProvisioningInitialSnapshot", + "ProvisioningPipelineResources" + ] + }, + "catalog.OptionSpecOauthStage": { + "name": "OptionSpecOauthStage", + "package": "catalog", + "description": "During the OAuth flow, specifies which stage the option should be displayed\nin the UI. OAUTH_STAGE_UNSPECIFIED is the default value for options unrelated\nto the OAuth flow. BEFORE_AUTHORIZATION_CODE corresponds to options necessary\nto initiate the OAuth process. BEFORE_ACCESS_TOKEN corresponds to options\nthat are necessary to create a foreign connection, but that should be\ndisplayed after the authorization code has already been received.", + "values": [ + "BeforeAccessToken", + "BeforeAuthorizationCode" + ] + }, + "catalog.OptionSpecOptionType": { + "name": "OptionSpecOptionType", + "package": "catalog", + "description": "Type of the option, we purposely follow JavaScript types so that the UI can\nmap the options to JS types. https://www.w3schools.com/js/js_datatypes.asp\nEnum is a special case that it's just string with selections.", + "values": [ + "OptionBigint", + "OptionBoolean", + "OptionEnum", + "OptionMultilineString", + "OptionNumber", + "OptionServiceCredential", + "OptionString" + ] + }, + "catalog.PathOperation": { + "name": "PathOperation", + "package": "catalog", + "description": "", + "values": [ + "PathCreateTable", + "PathRead", + "PathReadWrite" + ] + }, + "catalog.PolicyType": { + "name": "PolicyType", + "package": "catalog", + "description": "", + "values": [ + "PolicyTypeColumnMask", + "PolicyTypeRowFilter" + ] + }, + "catalog.PrincipalType": { + "name": "PrincipalType", + "package": "catalog", + "description": "", + "values": [ + "GroupPrincipal", + "ServicePrincipal", + "UserPrincipal" + ] + }, + "catalog.Privilege": { + "name": "Privilege", + "package": "catalog", + "description": "", + "values": [ + "Access", + "AllPrivileges", + "ApplyTag", + "Browse", + "Create", + "CreateCatalog", + "CreateCleanRoom", + "CreateConnection", + "CreateExternalLocation", + "CreateExternalTable", + "CreateExternalVolume", + "CreateForeignCatalog", + "CreateForeignSecurable", + "CreateFunction", + "CreateManagedStorage", + "CreateMaterializedView", + "CreateModel", + "CreateProvider", + "CreateRecipient", + "CreateSchema", + "CreateServiceCredential", + "CreateShare", + "CreateStorageCredential", + "CreateTable", + "CreateView", + "CreateVolume", + "Execute", + "ExecuteCleanRoomTask", + "ExternalUseSchema", + "Manage", + "ManageAllowlist", + "Modify", + "ModifyCleanRoom", + "ReadFiles", + "ReadPrivateFiles", + "ReadVolume", + "Refresh", + "Select", + "SetSharePermission", + "Usage", + "UseCatalog", + "UseConnection", + "UseMarketplaceAssets", + "UseProvider", + "UseRecipient", + "UseSchema", + "UseShare", + "WriteFiles", + "WritePrivateFiles", + "WriteVolume" + ] + }, + "catalog.ProvisioningInfoState": { + "name": "ProvisioningInfoState", + "package": "catalog", + "description": "", + "values": [ + "Active", + "Degraded", + "Deleting", + "Failed", + "Provisioning", + "Updating" + ] + }, + "catalog.SecurableKind": { + "name": "SecurableKind", + "package": "catalog", + "description": "Latest kind: TABLE_DELTASHARING_OPEN_DIR_BASED = 290; Next id:291", + "values": [ + "TableDbStorage", + "TableDelta", + "TableDeltaExternal", + "TableDeltaIcebergDeltasharing", + "TableDeltaIcebergManaged", + "TableDeltaUniformHudiExternal", + "TableDeltaUniformIcebergExternal", + "TableDeltaUniformIcebergForeignDeltasharing", + "TableDeltaUniformIcebergForeignHiveMetastoreExternal", + "TableDeltaUniformIcebergForeignHiveMetastoreManaged", + "TableDeltaUniformIcebergForeignSnowflake", + "TableDeltasharing", + "TableDeltasharingMutable", + "TableDeltasharingOpenDirBased", + "TableExternal", + "TableFeatureStore", + "TableFeatureStoreExternal", + "TableForeignBigquery", + "TableForeignDatabricks", + "TableForeignDeltasharing", + "TableForeignHiveMetastore", + "TableForeignHiveMetastoreDbfsExternal", + "TableForeignHiveMetastoreDbfsManaged", + "TableForeignHiveMetastoreDbfsShallowCloneExternal", + "TableForeignHiveMetastoreDbfsShallowCloneManaged", + "TableForeignHiveMetastoreDbfsView", + "TableForeignHiveMetastoreExternal", + "TableForeignHiveMetastoreManaged", + "TableForeignHiveMetastoreShallowCloneExternal", + "TableForeignHiveMetastoreShallowCloneManaged", + "TableForeignHiveMetastoreView", + "TableForeignMongodb", + "TableForeignMysql", + "TableForeignNetsuite", + "TableForeignOracle", + "TableForeignPostgresql", + "TableForeignRedshift", + "TableForeignSalesforce", + "TableForeignSalesforceDataCloud", + "TableForeignSalesforceDataCloudFileSharing", + "TableForeignSalesforceDataCloudFileSharingView", + "TableForeignSnowflake", + "TableForeignSqldw", + "TableForeignSqlserver", + "TableForeignTeradata", + "TableForeignWorkdayRaas", + "TableIcebergUniformManaged", + "TableInternal", + "TableManagedPostgresql", + "TableMaterializedView", + "TableMaterializedViewDeltasharing", + "TableMetricView", + "TableMetricViewDeltasharing", + "TableOnlineVectorIndexDirect", + "TableOnlineVectorIndexReplica", + "TableOnlineView", + "TableStandard", + "TableStreamingLiveTable", + "TableStreamingLiveTableDeltasharing", + "TableSystem", + "TableSystemDeltasharing", + "TableView", + "TableViewDeltasharing" + ] + }, + "catalog.SecurableType": { + "name": "SecurableType", + "package": "catalog", + "description": "The type of Unity Catalog securable.", + "values": [ + "Catalog", + "CleanRoom", + "Connection", + "Credential", + "ExternalLocation", + "ExternalMetadata", + "Function", + "Metastore", + "Pipeline", + "Provider", + "Recipient", + "Schema", + "Share", + "StagingTable", + "StorageCredential", + "Table", + "Volume" + ] + }, + "catalog.SpecialDestination": { + "name": "SpecialDestination", + "package": "catalog", + "description": "", + "values": [ + "SpecialDestinationCatalogOwner", + "SpecialDestinationConnectionOwner", + "SpecialDestinationCredentialOwner", + "SpecialDestinationExternalLocationOwner", + "SpecialDestinationMetastoreOwner" + ] + }, + "catalog.SseEncryptionDetailsAlgorithm": { + "name": "SseEncryptionDetailsAlgorithm", + "package": "catalog", + "description": "", + "values": [ + "AwsSseKms", + "AwsSseS3" + ] + }, + "catalog.SystemType": { + "name": "SystemType", + "package": "catalog", + "description": "", + "values": [ + "AmazonRedshift", + "AzureSynapse", + "Confluent", + "Databricks", + "GoogleBigquery", + "Kafka", + "Looker", + "MicrosoftFabric", + "MicrosoftSqlServer", + "Mongodb", + "Mysql", + "Oracle", + "Other", + "Postgresql", + "PowerBi", + "Salesforce", + "Sap", + "Servicenow", + "Snowflake", + "StreamNative", + "Tableau", + "Teradata", + "Workday" + ] + }, + "catalog.TableOperation": { + "name": "TableOperation", + "package": "catalog", + "description": "", + "values": [ + "Read", + "ReadWrite" + ] + }, + "catalog.TableType": { + "name": "TableType", + "package": "catalog", + "description": "", + "values": [ + "External", + "ExternalShallowClone", + "Foreign", + "Managed", + "ManagedShallowClone", + "MaterializedView", + "MetricView", + "StreamingTable", + "View" + ] + }, + "catalog.ValidateCredentialResult": { + "name": "ValidateCredentialResult", + "package": "catalog", + "description": "A enum represents the result of the file operation", + "values": [ + "Fail", + "Pass", + "Skip" + ] + }, + "catalog.ValidationResultOperation": { + "name": "ValidationResultOperation", + "package": "catalog", + "description": "A enum represents the file operation performed on the external location with\nthe storage credential", + "values": [ + "Delete", + "List", + "PathExists", + "Read", + "Write" + ] + }, + "catalog.ValidationResultResult": { + "name": "ValidationResultResult", + "package": "catalog", + "description": "A enum represents the result of the file operation", + "values": [ + "Fail", + "Pass", + "Skip" + ] + }, + "catalog.VolumeType": { + "name": "VolumeType", + "package": "catalog", + "description": "", + "values": [ + "External", + "Managed" + ] + }, + "catalog.WorkspaceBindingBindingType": { + "name": "WorkspaceBindingBindingType", + "package": "catalog", + "description": "Using `BINDING_TYPE_` prefix here to avoid conflict with `TableOperation`\nenum in `credentials_common.proto`.", + "values": [ + "BindingTypeReadOnly", + "BindingTypeReadWrite" + ] + }, + "compute.AwsAvailability": { + "name": "AwsAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand`\nones.\n\nNote: If `first_on_demand` is zero, this availability type will be used for\nthe entire cluster.", + "values": [ + "OnDemand", + "Spot", + "SpotWithFallback" + ] + }, + "compute.AzureAvailability": { + "name": "AzureAvailability", + "package": "compute", + "description": "Availability type used for all subsequent nodes past the `first_on_demand`\nones. Note: If `first_on_demand` is zero, this availability type will be used\nfor the entire cluster.", + "values": [ + "OnDemandAzure", + "SpotAzure", + "SpotWithFallbackAzure" + ] + }, + "compute.CloudProviderNodeStatus": { + "name": "CloudProviderNodeStatus", + "package": "compute", + "description": "", + "values": [ + "NotAvailableInRegion", + "NotEnabledOnSubscription" + ] + }, + "compute.ClusterPermissionLevel": { + "name": "ClusterPermissionLevel", + "package": "compute", + "description": "Permission level", + "values": [ + "CanAttachTo", + "CanManage", + "CanRestart" + ] + }, + "compute.ClusterPolicyPermissionLevel": { + "name": "ClusterPolicyPermissionLevel", + "package": "compute", + "description": "Permission level", + "values": [ + "CanUse" + ] + }, + "compute.ClusterSource": { + "name": "ClusterSource", + "package": "compute", + "description": "Determines whether the cluster was created by a user through the UI, created\nby the Databricks Jobs Scheduler, or through an API request. This is the same\nas cluster_creator, but read only.", + "values": [ + "Api", + "Job", + "Models", + "Pipeline", + "PipelineMaintenance", + "Sql", + "Ui" + ] + }, + "compute.CommandStatus": { + "name": "CommandStatus", + "package": "compute", + "description": "", + "values": [ + "Cancelled", + "Cancelling", + "Error", + "Finished", + "Queued", + "Running" + ] + }, + "compute.ContextStatus": { + "name": "ContextStatus", + "package": "compute", + "description": "", + "values": [ + "Error", + "Pending", + "Running" + ] + }, + "compute.DataPlaneEventDetailsEventType": { + "name": "DataPlaneEventDetailsEventType", + "package": "compute", + "description": "", + "values": [ + "NodeBlacklisted", + "NodeExcludedDecommissioned" + ] + }, + "compute.DataSecurityMode": { + "name": "DataSecurityMode", + "package": "compute", + "description": "Data security mode decides what data governance model to use when accessing\ndata from a cluster.\n\nThe following modes can only be used when `kind = CLASSIC_PREVIEW`. *\n`DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access\nmode depending on your compute configuration. *\n`DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. *\n`DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.\n\nThe following modes can be used regardless of `kind`. * `NONE`: No security\nisolatio...", + "values": [ + "DataSecurityModeAuto", + "DataSecurityModeDedicated", + "DataSecurityModeStandard", + "LegacyPassthrough", + "LegacySingleUser", + "LegacySingleUserStandard", + "LegacyTableAcl", + "None", + "SingleUser", + "UserIsolation" + ] + }, + "compute.DiskTypeAzureDiskVolumeType": { + "name": "DiskTypeAzureDiskVolumeType", + "package": "compute", + "description": "All Azure Disk types that Databricks supports. See\nhttps://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks", + "values": [ + "PremiumLrs", + "StandardLrs" + ] + }, + "compute.DiskTypeEbsVolumeType": { + "name": "DiskTypeEbsVolumeType", + "package": "compute", + "description": "All EBS volume types that Databricks supports. See\nhttps://aws.amazon.com/ebs/details/ for details.", + "values": [ + "GeneralPurposeSsd", + "ThroughputOptimizedHdd" + ] + }, + "compute.EbsVolumeType": { + "name": "EbsVolumeType", + "package": "compute", + "description": "All EBS volume types that Databricks supports. See\nhttps://aws.amazon.com/ebs/details/ for details.", + "values": [ + "GeneralPurposeSsd", + "ThroughputOptimizedHdd" + ] + }, + "compute.EventDetailsCause": { + "name": "EventDetailsCause", + "package": "compute", + "description": "The cause of a change in target size.", + "values": [ + "Autorecovery", + "Autoscale", + "AutoscaleV2", + "ReplaceBadNodes", + "UserRequest" + ] + }, + "compute.EventType": { + "name": "EventType", + "package": "compute", + "description": "", + "values": [ + "AddNodesFailed", + "AutomaticClusterUpdate", + "AutoscalingBackoff", + "AutoscalingFailed", + "AutoscalingStatsReport", + "ClusterMigrated", + "Creating", + "DbfsDown", + "DecommissionEnded", + "DecommissionStarted", + "DidNotExpandDisk", + "DriverHealthy", + "DriverNotResponding", + "DriverUnavailable", + "Edited", + "ExpandedDisk", + "FailedToExpandDisk", + "InitScriptsFinished", + "InitScriptsStarted", + "MetastoreDown", + "NodeBlacklisted", + "NodeExcludedDecommissioned", + "NodesLost", + "Pinned", + "Resizing", + "Restarting", + "Running", + "SparkException", + "Starting", + "Terminating", + "Unpinned", + "UpsizeCompleted" + ] + }, + "compute.GcpAvailability": { + "name": "GcpAvailability", + "package": "compute", + "description": "This field determines whether the instance pool will contain preemptible VMs,\non-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the\nformer is unavailable.", + "values": [ + "OnDemandGcp", + "PreemptibleGcp", + "PreemptibleWithFallbackGcp" + ] + }, + "compute.GetEventsOrder": { + "name": "GetEventsOrder", + "package": "compute", + "description": "", + "values": [ + "Asc", + "Desc" + ] + }, + "compute.InitScriptExecutionDetailsInitScriptExecutionStatus": { + "name": "InitScriptExecutionDetailsInitScriptExecutionStatus", + "package": "compute", + "description": "Result of attempted script execution", + "values": [ + "FailedExecution", + "FailedFetch", + "FuseMountFailed", + "NotExecuted", + "Skipped", + "Succeeded", + "Unknown" + ] + }, + "compute.InstancePoolAwsAttributesAvailability": { + "name": "InstancePoolAwsAttributesAvailability", + "package": "compute", + "description": "The set of AWS availability types supported when setting up nodes for a\ncluster.", + "values": [ + "OnDemand", + "Spot" + ] + }, + "compute.InstancePoolAzureAttributesAvailability": { + "name": "InstancePoolAzureAttributesAvailability", + "package": "compute", + "description": "The set of Azure availability types supported when setting up nodes for a\ncluster.", + "values": [ + "OnDemandAzure", + "SpotAzure" + ] + }, + "compute.InstancePoolPermissionLevel": { + "name": "InstancePoolPermissionLevel", + "package": "compute", + "description": "Permission level", + "values": [ + "CanAttachTo", + "CanManage" + ] + }, + "compute.InstancePoolState": { + "name": "InstancePoolState", + "package": "compute", + "description": "The state of a Cluster. The current allowable state transitions are as\nfollows:\n\n- “ACTIVE“ -\u003e “STOPPED“ - “ACTIVE“ -\u003e “DELETED“ - “STOPPED“ -\u003e\n“ACTIVE“ - “STOPPED“ -\u003e “DELETED“", + "values": [ + "Active", + "Deleted", + "Stopped" + ] + }, + "compute.Kind": { + "name": "Kind", + "package": "compute", + "description": "The kind of compute described by this compute specification.\n\nDepending on `kind`, different validations and default values will be\napplied.\n\nClusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas\nclusters with no specified `kind` do not. *\n[is_single_node](/api/workspace/clusters/create#is_single_node) *\n[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *\n[data_security_mode](/api/workspace/clusters/create#data_security_mode) set\nto `DATA_SECURITY_MODE_AUT...", + "values": [ + "ClassicPreview" + ] + }, + "compute.Language": { + "name": "Language", + "package": "compute", + "description": "", + "values": [ + "Python", + "R", + "Scala", + "Sql" + ] + }, + "compute.LibraryInstallStatus": { + "name": "LibraryInstallStatus", + "package": "compute", + "description": "The status of a library on a specific cluster.", + "values": [ + "Failed", + "Installed", + "Installing", + "Pending", + "Resolving", + "Restored", + "Skipped", + "UninstallOnRestart" + ] + }, + "compute.ListClustersSortByDirection": { + "name": "ListClustersSortByDirection", + "package": "compute", + "description": "", + "values": [ + "Asc", + "Desc" + ] + }, + "compute.ListClustersSortByField": { + "name": "ListClustersSortByField", + "package": "compute", + "description": "", + "values": [ + "ClusterName", + "Default" + ] + }, + "compute.ListSortColumn": { + "name": "ListSortColumn", + "package": "compute", + "description": "", + "values": [ + "PolicyCreationTime", + "PolicyName" + ] + }, + "compute.ListSortOrder": { + "name": "ListSortOrder", + "package": "compute", + "description": "", + "values": [ + "Asc", + "Desc" + ] + }, + "compute.ResultType": { + "name": "ResultType", + "package": "compute", + "description": "", + "values": [ + "Error", + "Image", + "Images", + "Table", + "Text" + ] + }, + "compute.RuntimeEngine": { + "name": "RuntimeEngine", + "package": "compute", + "description": "", + "values": [ + "Null", + "Photon", + "Standard" + ] + }, + "compute.State": { + "name": "State", + "package": "compute", + "description": "The state of a Cluster. The current allowable state transitions are as\nfollows:\n\n- `PENDING` -\u003e `RUNNING` - `PENDING` -\u003e `TERMINATING` - `RUNNING` -\u003e\n`RESIZING` - `RUNNING` -\u003e `RESTARTING` - `RUNNING` -\u003e `TERMINATING` -\n`RESTARTING` -\u003e `RUNNING` - `RESTARTING` -\u003e `TERMINATING` - `RESIZING` -\u003e\n`RUNNING` - `RESIZING` -\u003e `TERMINATING` - `TERMINATING` -\u003e `TERMINATED`", + "values": [ + "Error", + "Pending", + "Resizing", + "Restarting", + "Running", + "Terminated", + "Terminating", + "Unknown" + ] + }, + "compute.TerminationReasonCode": { + "name": "TerminationReasonCode", + "package": "compute", + "description": "The status code indicating why the cluster was terminated", + "values": [ + "AbuseDetected", + "AccessTokenFailure", + "AllocationTimeout", + "AllocationTimeoutNoHealthyAndWarmedUpClusters", + "AllocationTimeoutNoHealthyClusters", + "AllocationTimeoutNoMatchedClusters", + "AllocationTimeoutNoReadyClusters", + "AllocationTimeoutNoUnallocatedClusters", + "AllocationTimeoutNoWarmedUpClusters", + "AllocationTimeoutNodeDaemonNotReady", + "AttachProjectFailure", + "AwsAuthorizationFailure", + "AwsInaccessibleKmsKeyFailure", + "AwsInstanceProfileUpdateFailure", + "AwsInsufficientFreeAddressesInSubnetFailure", + "AwsInsufficientInstanceCapacityFailure", + "AwsInvalidKeyPair", + "AwsInvalidKmsKeyState", + "AwsMaxSpotInstanceCountExceededFailure", + "AwsRequestLimitExceeded", + "AwsResourceQuotaExceeded", + "AwsUnsupportedFailure", + "AzureByokKeyPermissionFailure", + "AzureEphemeralDiskFailure", + "AzureInvalidDeploymentTemplate", + "AzureOperationNotAllowedException", + "AzurePackedDeploymentPartialFailure", + "AzureQuotaExceededException", + "AzureResourceManagerThrottling", + "AzureResourceProviderThrottling", + "AzureUnexpectedDeploymentTemplateFailure", + "AzureVmExtensionFailure", + "AzureVnetConfigurationFailure", + "BootstrapTimeout", + "BootstrapTimeoutCloudProviderException", + "BootstrapTimeoutDueToMisconfig", + "BudgetPolicyLimitEnforcementActivated", + "BudgetPolicyResolutionFailure", + "CloudAccountPodQuotaExceeded", + "CloudAccountSetupFailure", + "CloudOperationCancelled", + "CloudProviderDiskSetupFailure", + "CloudProviderInstanceNotLaunched", + "CloudProviderLaunchFailure", + "CloudProviderLaunchFailureDueToMisconfig", + "CloudProviderResourceStockout", + "CloudProviderResourceStockoutDueToMisconfig", + "CloudProviderShutdown", + "ClusterOperationThrottled", + "ClusterOperationTimeout", + "CommunicationLost", + "ContainerLaunchFailure", + "ControlPlaneConnectionFailure", + "ControlPlaneConnectionFailureDueToMisconfig", + "ControlPlaneRequestFailure", + "ControlPlaneRequestFailureDueToMisconfig", + "DataAccessConfigChanged", + "DatabaseConnectionFailure", + "DbfsComponentUnhealthy", + "DbrImageResolutionFailure", + "DisasterRecoveryReplication", + "DnsResolutionError", + "DockerContainerCreationException", + "DockerImagePullFailure", + "DockerImageTooLargeForInstanceException", + "DockerInvalidOsException", + "DriverEviction", + "DriverLaunchTimeout", + "DriverNodeUnreachable", + "DriverOutOfDisk", + "DriverOutOfMemory", + "DriverPodCreationFailure", + "DriverUnexpectedFailure", + "DriverUnhealthy", + "DriverUnreachable", + "DriverUnresponsive", + "DynamicSparkConfSizeExceeded", + "EosSparkImage", + "ExecutionComponentUnhealthy", + "ExecutorPodUnscheduled", + "GcpApiRateQuotaExceeded", + "GcpDeniedByOrgPolicy", + "GcpForbidden", + "GcpIamTimeout", + "GcpInaccessibleKmsKeyFailure", + "GcpInsufficientCapacity", + "GcpIpSpaceExhausted", + "GcpKmsKeyPermissionDenied", + "GcpNotFound", + "GcpQuotaExceeded", + "GcpResourceQuotaExceeded", + "GcpServiceAccountAccessDenied", + "GcpServiceAccountDeleted", + "GcpServiceAccountNotFound", + "GcpSubnetNotReady", + "GcpTrustedImageProjectsViolated", + "GkeBasedClusterTermination", + "GlobalInitScriptFailure", + "HiveMetastoreProvisioningFailure", + "ImagePullPermissionDenied", + "InPenaltyBox", + "Inactivity", + "InitContainerNotFinished", + "InitScriptFailure", + "InstancePoolClusterFailure", + "InstancePoolMaxCapacityReached", + "InstancePoolNotFound", + "InstanceUnreachable", + "InstanceUnreachableDueToMisconfig", + "InternalCapacityFailure", + "InternalError", + "InvalidArgument", + "InvalidAwsParameter", + "InvalidInstancePlacementProtocol", + "InvalidSparkImage", + "InvalidWorkerImageFailure", + "IpExhaustionFailure", + "JobFinished", + "K8sActivePodQuotaExceeded", + "K8sAutoscalingFailure", + "K8sDbrClusterLaunchTimeout", + "LazyAllocationTimeout", + "MaintenanceMode", + "MetastoreComponentUnhealthy", + "NephosResourceManagement", + "NetvisorSetupTimeout", + "NetworkCheckControlPlaneFailure", + "NetworkCheckControlPlaneFailureDueToMisconfig", + "NetworkCheckDnsServerFailure", + "NetworkCheckDnsServerFailureDueToMisconfig", + "NetworkCheckMetadataEndpointFailure", + "NetworkCheckMetadataEndpointFailureDueToMisconfig", + "NetworkCheckMultipleComponentsFailure", + "NetworkCheckMultipleComponentsFailureDueToMisconfig", + "NetworkCheckNicFailure", + "NetworkCheckNicFailureDueToMisconfig", + "NetworkCheckStorageFailure", + "NetworkCheckStorageFailureDueToMisconfig", + "NetworkConfigurationFailure", + "NfsMountFailure", + "NoMatchedK8s", + "NoMatchedK8sTestingTag", + "NpipTunnelSetupFailure", + "NpipTunnelTokenFailure", + "PodAssignmentFailure", + "PodSchedulingFailure", + "RequestRejected", + "RequestThrottled", + "ResourceUsageBlocked", + "SecretCreationFailure", + "SecretPermissionDenied", + "SecretResolutionError", + "SecurityDaemonRegistrationException", + "SelfBootstrapFailure", + "ServerlessLongRunningTerminated", + "SkippedSlowNodes", + "SlowImageDownload", + "SparkError", + "SparkImageDownloadFailure", + "SparkImageDownloadThrottled", + "SparkImageNotFound", + "SparkStartupFailure", + "SpotInstanceTermination", + "SshBootstrapFailure", + "StorageDownloadFailure", + "StorageDownloadFailureDueToMisconfig", + "StorageDownloadFailureSlow", + "StorageDownloadFailureThrottled", + "StsClientSetupFailure", + "SubnetExhaustedFailure", + "TemporarilyUnavailable", + "TrialExpired", + "UnexpectedLaunchFailure", + "UnexpectedPodRecreation", + "Unknown", + "UnsupportedInstanceType", + "UpdateInstanceProfileFailure", + "UsagePolicyEntitlementDenied", + "UserInitiatedVmTermination", + "UserRequest", + "WorkerSetupFailure", + "WorkspaceCancelledError", + "WorkspaceConfigurationError", + "WorkspaceUpdate" + ] + }, + "compute.TerminationReasonType": { + "name": "TerminationReasonType", + "package": "compute", + "description": "type of the termination", + "values": [ + "ClientError", + "CloudFailure", + "ServiceFault", + "Success" + ] + }, + "files.FileMode": { + "name": "FileMode", + "package": "files", + "description": "FileMode conveys user intent when opening a file.", + "values": [ + "Overwrite", + "Read", + "Write" + ] + }, + "iam.GetSortOrder": { + "name": "GetSortOrder", + "package": "iam", + "description": "", + "values": [ + "Ascending", + "Descending" + ] + }, + "iam.GroupSchema": { + "name": "GroupSchema", + "package": "iam", + "description": "", + "values": [ + "UrnIetfParamsScimSchemasCore20Group" + ] + }, + "iam.ListResponseSchema": { + "name": "ListResponseSchema", + "package": "iam", + "description": "", + "values": [ + "UrnIetfParamsScimApiMessages20ListResponse" + ] + }, + "iam.ListSortOrder": { + "name": "ListSortOrder", + "package": "iam", + "description": "", + "values": [ + "Ascending", + "Descending" + ] + }, + "iam.PasswordPermissionLevel": { + "name": "PasswordPermissionLevel", + "package": "iam", + "description": "Permission level", + "values": [ + "CanUse" + ] + }, + "iam.PatchOp": { + "name": "PatchOp", + "package": "iam", + "description": "Type of patch operation.", + "values": [ + "Add", + "Remove", + "Replace" + ] + }, + "iam.PatchSchema": { + "name": "PatchSchema", + "package": "iam", + "description": "", + "values": [ + "UrnIetfParamsScimApiMessages20PatchOp" + ] + }, + "iam.PermissionLevel": { + "name": "PermissionLevel", + "package": "iam", + "description": "Permission level", + "values": [ + "CanAttachTo", + "CanBind", + "CanCreate", + "CanEdit", + "CanEditMetadata", + "CanManage", + "CanManageProductionVersions", + "CanManageRun", + "CanManageStagingVersions", + "CanMonitor", + "CanMonitorOnly", + "CanQuery", + "CanRead", + "CanRestart", + "CanRun", + "CanUse", + "CanView", + "CanViewMetadata", + "IsOwner" + ] + }, + "iam.RequestAuthzIdentity": { + "name": "RequestAuthzIdentity", + "package": "iam", + "description": "Defines the identity to be used for authZ of the request on the server side.\nSee one pager for for more information: http://go/acl/service-identity", + "values": [ + "RequestAuthzIdentityServiceIdentity", + "RequestAuthzIdentityUserContext" + ] + }, + "iam.ServicePrincipalSchema": { + "name": "ServicePrincipalSchema", + "package": "iam", + "description": "", + "values": [ + "UrnIetfParamsScimSchemasCore20ServicePrincipal" + ] + }, + "iam.UserSchema": { + "name": "UserSchema", + "package": "iam", + "description": "", + "values": [ + "UrnIetfParamsScimSchemasCore20User", + "UrnIetfParamsScimSchemasExtensionWorkspace20User" + ] + }, + "iam.WorkspacePermission": { + "name": "WorkspacePermission", + "package": "iam", + "description": "", + "values": [ + "Admin", + "Unknown", + "User" + ] + }, + "jobs.AuthenticationMethod": { + "name": "AuthenticationMethod", + "package": "jobs", + "description": "", + "values": [ + "Oauth", + "Pat" + ] + }, + "jobs.CleanRoomTaskRunLifeCycleState": { + "name": "CleanRoomTaskRunLifeCycleState", + "package": "jobs", + "description": "Copied from elastic-spark-common/api/messages/runs.proto. Using the original\ndefinition to remove coupling with jobs API definition", + "values": [ + "Blocked", + "InternalError", + "Pending", + "Queued", + "RunLifeCycleStateUnspecified", + "Running", + "Skipped", + "Terminated", + "Terminating", + "WaitingForRetry" + ] + }, + "jobs.CleanRoomTaskRunResultState": { + "name": "CleanRoomTaskRunResultState", + "package": "jobs", + "description": "Copied from elastic-spark-common/api/messages/runs.proto. Using the original\ndefinition to avoid cyclic dependency.", + "values": [ + "Canceled", + "Disabled", + "Evicted", + "Excluded", + "Failed", + "MaximumConcurrentRunsReached", + "RunResultStateUnspecified", + "Success", + "SuccessWithFailures", + "Timedout", + "UpstreamCanceled", + "UpstreamEvicted", + "UpstreamFailed" + ] + }, + "jobs.Condition": { + "name": "Condition", + "package": "jobs", + "description": "", + "values": [ + "AllUpdated", + "AnyUpdated" + ] + }, + "jobs.ConditionTaskOp": { + "name": "ConditionTaskOp", + "package": "jobs", + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their\noperands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`\noperators perform numeric comparison of their operands. `“12.0” \u003e=\n“12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to\n`false`.\n\nThe boolean comparison to task values can be implemented with operators\n`EQUAL_TO`, `NOT_EQUAL`. If a task value w...", + "values": [ + "EqualTo", + "GreaterThan", + "GreaterThanOrEqual", + "LessThan", + "LessThanOrEqual", + "NotEqual" + ] + }, + "jobs.DbtPlatformRunStatus": { + "name": "DbtPlatformRunStatus", + "package": "jobs", + "description": "Response enumeration from calling the dbt platform API, for inclusion in\noutput", + "values": [ + "Cancelled", + "Error", + "Queued", + "Running", + "Starting", + "Success" + ] + }, + "jobs.Format": { + "name": "Format", + "package": "jobs", + "description": "", + "values": [ + "MultiTask", + "SingleTask" + ] + }, + "jobs.GitProvider": { + "name": "GitProvider", + "package": "jobs", + "description": "", + "values": [ + "AwsCodeCommit", + "AzureDevOpsServices", + "BitbucketCloud", + "BitbucketServer", + "GitHub", + "GitHubEnterprise", + "GitLab", + "GitLabEnterpriseEdition" + ] + }, + "jobs.JobDeploymentKind": { + "name": "JobDeploymentKind", + "package": "jobs", + "description": "* `BUNDLE`: The job is managed by Databricks Asset Bundle.", + "values": [ + "Bundle" + ] + }, + "jobs.JobEditMode": { + "name": "JobEditMode", + "package": "jobs", + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified. *\n`EDITABLE`: The job is in an editable state and can be modified.", + "values": [ + "Editable", + "UiLocked" + ] + }, + "jobs.JobPermissionLevel": { + "name": "JobPermissionLevel", + "package": "jobs", + "description": "Permission level", + "values": [ + "CanManage", + "CanManageRun", + "CanView", + "IsOwner" + ] + }, + "jobs.JobSourceDirtyState": { + "name": "JobSourceDirtyState", + "package": "jobs", + "description": "Dirty state indicates the job is not fully synced with the job specification\nin the remote repository.\n\nPossible values are: * `NOT_SYNCED`: The job is not yet synced with the\nremote job specification. Import the remote job specification from UI to make\nthe job fully synced. * `DISCONNECTED`: The job is temporary disconnected\nfrom the remote job specification and is allowed for live edit. Import the\nremote job specification again from UI to make the job fully synced.", + "values": [ + "Disconnected", + "NotSynced" + ] + }, + "jobs.JobsHealthMetric": { + "name": "JobsHealthMetric", + "package": "jobs", + "description": "Specifies the health metric that is being evaluated for a particular health\nrule.\n\n* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. *\n`STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting\nto be consumed across all streams. This metric is in Public Preview. *\n`STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all\nstreams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An\nestimate of the maximum consumer delay acr...", + "values": [ + "RunDurationSeconds", + "StreamingBacklogBytes", + "StreamingBacklogFiles", + "StreamingBacklogRecords", + "StreamingBacklogSeconds" + ] + }, + "jobs.JobsHealthOperator": { + "name": "JobsHealthOperator", + "package": "jobs", + "description": "Specifies the operator used to compare the health metric value with the\nspecified threshold.", + "values": [ + "GreaterThan" + ] + }, + "jobs.ModelTriggerConfigurationCondition": { + "name": "ModelTriggerConfigurationCondition", + "package": "jobs", + "description": "", + "values": [ + "ModelAliasSet", + "ModelCreated", + "ModelVersionReady" + ] + }, + "jobs.PauseStatus": { + "name": "PauseStatus", + "package": "jobs", + "description": "", + "values": [ + "Paused", + "Unpaused" + ] + }, + "jobs.PerformanceTarget": { + "name": "PerformanceTarget", + "package": "jobs", + "description": "PerformanceTarget defines how performant (lower latency) or cost efficient\nthe execution of run on serverless compute should be. The performance mode on\nthe job or pipeline should map to a performance setting that is passed to\nCluster Manager (see cluster-common PerformanceTarget).", + "values": [ + "PerformanceOptimized", + "Standard" + ] + }, + "jobs.PeriodicTriggerConfigurationTimeUnit": { + "name": "PeriodicTriggerConfigurationTimeUnit", + "package": "jobs", + "description": "", + "values": [ + "Days", + "Hours", + "Weeks" + ] + }, + "jobs.QueueDetailsCodeCode": { + "name": "QueueDetailsCodeCode", + "package": "jobs", + "description": "The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was\nqueued due to reaching the workspace limit of active task runs. *\n`MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the per-job\nlimit of concurrent job runs. * `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run\nwas queued due to reaching the workspace limit of active run job tasks.", + "values": [ + "ActiveRunJobTasksLimitReached", + "ActiveRunsLimitReached", + "MaxConcurrentRunsReached" + ] + }, + "jobs.RepairHistoryItemType": { + "name": "RepairHistoryItemType", + "package": "jobs", + "description": "The repair history item type. Indicates whether a run is the original run or\na repair run.", + "values": [ + "Original", + "Repair" + ] + }, + "jobs.RunIf": { + "name": "RunIf", + "package": "jobs", + "description": "An optional value indicating the condition that determines whether the task\nshould be run once its dependencies have been completed. When omitted,\ndefaults to `ALL_SUCCESS`.\n\nPossible values are: * `ALL_SUCCESS`: All dependencies have executed and\nsucceeded * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded *\n`NONE_FAILED`: None of the dependencies have failed and at least one was\nexecuted * `ALL_DONE`: All dependencies have been completed *\n`AT_LEAST_ONE_FAILED`: At least one de...", + "values": [ + "AllDone", + "AllFailed", + "AllSuccess", + "AtLeastOneFailed", + "AtLeastOneSuccess", + "NoneFailed" + ] + }, + "jobs.RunLifeCycleState": { + "name": "RunLifeCycleState", + "package": "jobs", + "description": "A value indicating the run's lifecycle state. The possible values are: *\n`QUEUED`: The run is queued. * `PENDING`: The run is waiting to be executed\nwhile the cluster and execution context are being prepared. * `RUNNING`: The\ntask of this run is being executed. * `TERMINATING`: The task of this run has\ncompleted, and the cluster and execution context are being cleaned up. *\n`TERMINATED`: The task of this run has completed, and the cluster and\nexecution context have been cleaned up. This state...", + "values": [ + "Blocked", + "InternalError", + "Pending", + "Queued", + "Running", + "Skipped", + "Terminated", + "Terminating", + "WaitingForRetry" + ] + }, + "jobs.RunLifecycleStateV2State": { + "name": "RunLifecycleStateV2State", + "package": "jobs", + "description": "The current state of the run.", + "values": [ + "Blocked", + "Pending", + "Queued", + "Running", + "Terminated", + "Terminating", + "Waiting" + ] + }, + "jobs.RunResultState": { + "name": "RunResultState", + "package": "jobs", + "description": "A value indicating the run's result. The possible values are: * `SUCCESS`:\nThe task completed successfully. * `FAILED`: The task completed with an\nerror. * `TIMEDOUT`: The run was stopped after reaching the timeout. *\n`CANCELED`: The run was canceled at user request. *\n`MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum\nconcurrent runs were reached. * `EXCLUDED`: The run was skipped because the\nnecessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run\nco...", + "values": [ + "Canceled", + "Disabled", + "Excluded", + "Failed", + "MaximumConcurrentRunsReached", + "Success", + "SuccessWithFailures", + "Timedout", + "UpstreamCanceled", + "UpstreamFailed" + ] + }, + "jobs.RunType": { + "name": "RunType", + "package": "jobs", + "description": "The type of a run. * `JOB_RUN`: Normal job run. A run created with\n:method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with\n[dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with\n:method:jobs/submit.\n\n[dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow", + "values": [ + "JobRun", + "SubmitRun", + "WorkflowRun" + ] + }, + "jobs.Source": { + "name": "Source", + "package": "jobs", + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file\nwill be retrieved\\ from the local Databricks workspace. When set to `GIT`,\nthe SQL file will be retrieved from a Git repository defined in `git_source`.\nIf the value is empty, the task will use `GIT` if `git_source` is defined and\n`WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL file\nis located in cloud Git provider.", + "values": [ + "Git", + "Workspace" + ] + }, + "jobs.SqlAlertState": { + "name": "SqlAlertState", + "package": "jobs", + "description": "The state of the SQL alert.\n\n* UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not\nfulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled trigger\nconditions", + "values": [ + "Ok", + "Triggered", + "Unknown" + ] + }, + "jobs.SqlDashboardWidgetOutputStatus": { + "name": "SqlDashboardWidgetOutputStatus", + "package": "jobs", + "description": "", + "values": [ + "Cancelled", + "Failed", + "Pending", + "Running", + "Success" + ] + }, + "jobs.StorageMode": { + "name": "StorageMode", + "package": "jobs", + "description": "", + "values": [ + "DirectQuery", + "Dual", + "Import" + ] + }, + "jobs.TaskRetryMode": { + "name": "TaskRetryMode", + "package": "jobs", + "description": "task retry mode of the continuous job * NEVER: The failed task will not be\nretried. * ON_FAILURE: Retry a failed task if at least one other task in the\njob is still running its first attempt. When this condition is no longer met\nor the retry limit is reached, the job run is cancelled and a new run is\nstarted.", + "values": [ + "Never", + "OnFailure" + ] + }, + "jobs.TerminationCodeCode": { + "name": "TerminationCodeCode", + "package": "jobs", + "description": "The code indicates why the run was terminated. Additional codes might be\nintroduced in future releases. * `SUCCESS`: The run was completed\nsuccessfully. * `SUCCESS_WITH_FAILURES`: The run was completed successfully\nbut some child runs failed. * `USER_CANCELED`: The run was successfully\ncanceled during execution by a user. * `CANCELED`: The run was canceled\nduring execution by the Databricks platform; for example, if the maximum run\nduration was exceeded. * `SKIPPED`: Run was never executed, f...", + "values": [ + "BudgetPolicyLimitExceeded", + "Canceled", + "CloudFailure", + "ClusterError", + "ClusterRequestLimitExceeded", + "Disabled", + "DriverError", + "FeatureDisabled", + "InternalError", + "InvalidClusterRequest", + "InvalidRunConfiguration", + "LibraryInstallationError", + "MaxConcurrentRunsExceeded", + "MaxJobQueueSizeExceeded", + "MaxSparkContextsExceeded", + "RepositoryCheckoutFailed", + "ResourceNotFound", + "RunExecutionError", + "Skipped", + "StorageAccessError", + "Success", + "SuccessWithFailures", + "UnauthorizedError", + "UserCanceled", + "WorkspaceRunLimitExceeded" + ] + }, + "jobs.TerminationTypeType": { + "name": "TerminationTypeType", + "package": "jobs", + "description": "* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An\nerror occurred in the Databricks platform. Please look at the [status page]\nor contact support if the issue persists. * `CLIENT_ERROR`: The run was\nterminated because of an error caused by user input or the job configuration.\n* `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud\nprovider.\n\n[status page]: https://status.databricks.com/", + "values": [ + "ClientError", + "CloudFailure", + "InternalError", + "Success" + ] + }, + "jobs.TriggerType": { + "name": "TriggerType", + "package": "jobs", + "description": "The type of trigger that fired this run.\n\n* `PERIODIC`: Schedules that periodically trigger runs, such as a cron\nscheduler. * `ONE_TIME`: One time triggers that fire a single run. This\noccurs you triggered a single run on demand through the UI or the API. *\n`RETRY`: Indicates a run that is triggered as a retry of a previously failed\nrun. This occurs when you request to re-run the job in case of failures. *\n`RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. *\n`FILE_ARRIVAL...", + "values": [ + "Continuous", + "ContinuousRestart", + "FileArrival", + "OneTime", + "Periodic", + "Retry", + "RunJobTask", + "Table" + ] + }, + "jobs.ViewType": { + "name": "ViewType", + "package": "jobs", + "description": "* `NOTEBOOK`: Notebook view item. * `DASHBOARD`: Dashboard view item.", + "values": [ + "Dashboard", + "Notebook" + ] + }, + "jobs.ViewsToExport": { + "name": "ViewsToExport", + "package": "jobs", + "description": "* `CODE`: Code view of the notebook. * `DASHBOARDS`: All dashboard views of\nthe notebook. * `ALL`: All views of the notebook.", + "values": [ + "All", + "Code", + "Dashboards" + ] + }, + "ml.ActivityAction": { + "name": "ActivityAction", + "package": "ml", + "description": "An action that a user (with sufficient permissions) could take on an activity\nor comment.\n\nFor activities, valid values are: * `APPROVE_TRANSITION_REQUEST`: Approve a\ntransition request\n\n* `REJECT_TRANSITION_REQUEST`: Reject a transition request\n\n* `CANCEL_TRANSITION_REQUEST`: Cancel (delete) a transition request\n\nFor comments, valid values are: * `EDIT_COMMENT`: Edit the comment\n\n* `DELETE_COMMENT`: Delete the comment", + "values": [ + "ApproveTransitionRequest", + "CancelTransitionRequest", + "DeleteComment", + "EditComment", + "RejectTransitionRequest" + ] + }, + "ml.ActivityType": { + "name": "ActivityType", + "package": "ml", + "description": "Type of activity. Valid values are: * `APPLIED_TRANSITION`: User applied the\ncorresponding stage transition.\n\n* `REQUESTED_TRANSITION`: User requested the corresponding stage transition.\n\n* `CANCELLED_REQUEST`: User cancelled an existing transition request.\n\n* `APPROVED_REQUEST`: User approved the corresponding stage transition.\n\n* `REJECTED_REQUEST`: User rejected the coressponding stage transition.\n\n* `SYSTEM_TRANSITION`: For events performed as a side effect, such as\narchiving existing mod...", + "values": [ + "AppliedTransition", + "ApprovedRequest", + "CancelledRequest", + "NewComment", + "RejectedRequest", + "RequestedTransition", + "SystemTransition" + ] + }, + "ml.CommentActivityAction": { + "name": "CommentActivityAction", + "package": "ml", + "description": "An action that a user (with sufficient permissions) could take on an activity\nor comment.\n\nFor activities, valid values are: * `APPROVE_TRANSITION_REQUEST`: Approve a\ntransition request\n\n* `REJECT_TRANSITION_REQUEST`: Reject a transition request\n\n* `CANCEL_TRANSITION_REQUEST`: Cancel (delete) a transition request\n\nFor comments, valid values are: * `EDIT_COMMENT`: Edit the comment\n\n* `DELETE_COMMENT`: Delete the comment", + "values": [ + "ApproveTransitionRequest", + "CancelTransitionRequest", + "DeleteComment", + "EditComment", + "RejectTransitionRequest" + ] + }, + "ml.ExperimentPermissionLevel": { + "name": "ExperimentPermissionLevel", + "package": "ml", + "description": "Permission level", + "values": [ + "CanEdit", + "CanManage", + "CanRead" + ] + }, + "ml.ForecastingExperimentState": { + "name": "ForecastingExperimentState", + "package": "ml", + "description": "", + "values": [ + "Cancelled", + "Failed", + "Pending", + "Running", + "Succeeded" + ] + }, + "ml.FunctionFunctionType": { + "name": "FunctionFunctionType", + "package": "ml", + "description": "", + "values": [ + "ApproxCountDistinct", + "ApproxPercentile", + "Avg", + "Count", + "First", + "Last", + "Max", + "Min", + "StddevPop", + "StddevSamp", + "Sum", + "VarPop", + "VarSamp" + ] + }, + "ml.LoggedModelStatus": { + "name": "LoggedModelStatus", + "package": "ml", + "description": "A LoggedModelStatus enum value represents the status of a logged model.", + "values": [ + "LoggedModelPending", + "LoggedModelReady", + "LoggedModelUploadFailed" + ] + }, + "ml.MaterializedFeaturePipelineScheduleState": { + "name": "MaterializedFeaturePipelineScheduleState", + "package": "ml", + "description": "", + "values": [ + "Active", + "Paused", + "Snapshot" + ] + }, + "ml.ModelVersionStatus": { + "name": "ModelVersionStatus", + "package": "ml", + "description": "The status of the model version. Valid values are: * `PENDING_REGISTRATION`:\nRequest to register a new model version is pending as server performs\nbackground tasks.\n\n* `FAILED_REGISTRATION`: Request to register a new model version has failed.\n\n* `READY`: Model version is ready for use.", + "values": [ + "FailedRegistration", + "PendingRegistration", + "Ready" + ] + }, + "ml.OnlineStoreState": { + "name": "OnlineStoreState", + "package": "ml", + "description": "", + "values": [ + "Available", + "Deleting", + "FailingOver", + "Starting", + "Stopped", + "Updating" + ] + }, + "ml.PermissionLevel": { + "name": "PermissionLevel", + "package": "ml", + "description": "Permission level of the requesting user on the object. For what is allowed at\neach level, see [MLflow Model permissions](..).", + "values": [ + "CanCreateRegisteredModel", + "CanEdit", + "CanManage", + "CanManageProductionVersions", + "CanManageStagingVersions", + "CanRead" + ] + }, + "ml.PublishSpecPublishMode": { + "name": "PublishSpecPublishMode", + "package": "ml", + "description": "", + "values": [ + "Continuous", + "Snapshot", + "Triggered" + ] + }, + "ml.RegisteredModelPermissionLevel": { + "name": "RegisteredModelPermissionLevel", + "package": "ml", + "description": "Permission level", + "values": [ + "CanEdit", + "CanManage", + "CanManageProductionVersions", + "CanManageStagingVersions", + "CanRead" + ] + }, + "ml.RegistryEmailSubscriptionType": { + "name": "RegistryEmailSubscriptionType", + "package": "ml", + "description": ".. note:: Experimental: This entity may change or be removed in a future\nrelease without warning. Email subscription types for registry notifications:\n- `ALL_EVENTS`: Subscribed to all events. - `DEFAULT`: Default subscription\ntype. - `SUBSCRIBED`: Subscribed to notifications. - `UNSUBSCRIBED`: Not\nsubscribed to notifications.", + "values": [ + "AllEvents", + "Default", + "Subscribed", + "Unsubscribed" + ] + }, + "ml.RegistryWebhookEvent": { + "name": "RegistryWebhookEvent", + "package": "ml", + "description": "", + "values": [ + "CommentCreated", + "ModelVersionCreated", + "ModelVersionTagSet", + "ModelVersionTransitionedStage", + "ModelVersionTransitionedToArchived", + "ModelVersionTransitionedToProduction", + "ModelVersionTransitionedToStaging", + "RegisteredModelCreated", + "TransitionRequestCreated", + "TransitionRequestToArchivedCreated", + "TransitionRequestToProductionCreated", + "TransitionRequestToStagingCreated" + ] + }, + "ml.RegistryWebhookStatus": { + "name": "RegistryWebhookStatus", + "package": "ml", + "description": "Enable or disable triggering the webhook, or put the webhook into test mode.\nThe default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated\nevent happens.\n\n* `DISABLED`: Webhook is not triggered.\n\n* `TEST_MODE`: Webhook can be triggered through the test endpoint, but is not\ntriggered on a real event.", + "values": [ + "Active", + "Disabled", + "TestMode" + ] + }, + "ml.RunInfoStatus": { + "name": "RunInfoStatus", + "package": "ml", + "description": "Status of a run.", + "values": [ + "Failed", + "Finished", + "Killed", + "Running", + "Scheduled" + ] + }, + "ml.Status": { + "name": "Status", + "package": "ml", + "description": "The status of the model version. Valid values are: * `PENDING_REGISTRATION`:\nRequest to register a new model version is pending as server performs\nbackground tasks.\n\n* `FAILED_REGISTRATION`: Request to register a new model version has failed.\n\n* `READY`: Model version is ready for use.", + "values": [ + "FailedRegistration", + "PendingRegistration", + "Ready" + ] + }, + "ml.UpdateRunStatus": { + "name": "UpdateRunStatus", + "package": "ml", + "description": "Status of a run.", + "values": [ + "Failed", + "Finished", + "Killed", + "Running", + "Scheduled" + ] + }, + "ml.ViewType": { + "name": "ViewType", + "package": "ml", + "description": "Qualifier for the view type.", + "values": [ + "ActiveOnly", + "All", + "DeletedOnly" + ] + }, + "pipelines.DayOfWeek": { + "name": "DayOfWeek", + "package": "pipelines", + "description": "Days of week in which the window is allowed to happen. If not specified all\ndays of the week will be used.", + "values": [ + "Friday", + "Monday", + "Saturday", + "Sunday", + "Thursday", + "Tuesday", + "Wednesday" + ] + }, + "pipelines.DeploymentKind": { + "name": "DeploymentKind", + "package": "pipelines", + "description": "The deployment method that manages the pipeline: - BUNDLE: The pipeline is\nmanaged by a Databricks Asset Bundle.", + "values": [ + "Bundle" + ] + }, + "pipelines.EventLevel": { + "name": "EventLevel", + "package": "pipelines", + "description": "The severity level of the event.", + "values": [ + "Error", + "Info", + "Metrics", + "Warn" + ] + }, + "pipelines.GetPipelineResponseHealth": { + "name": "GetPipelineResponseHealth", + "package": "pipelines", + "description": "The health of a pipeline.", + "values": [ + "Healthy", + "Unhealthy" + ] + }, + "pipelines.IngestionSourceType": { + "name": "IngestionSourceType", + "package": "pipelines", + "description": "", + "values": [ + "Bigquery", + "Dynamics365", + "ForeignCatalog", + "Ga4RawData", + "ManagedPostgresql", + "Mysql", + "Netsuite", + "Oracle", + "Postgresql", + "Salesforce", + "Servicenow", + "Sharepoint", + "Sqlserver", + "Teradata", + "WorkdayRaas" + ] + }, + "pipelines.MaturityLevel": { + "name": "MaturityLevel", + "package": "pipelines", + "description": "Maturity level for EventDetails.", + "values": [ + "Deprecated", + "Evolving", + "Stable" + ] + }, + "pipelines.PipelineClusterAutoscaleMode": { + "name": "PipelineClusterAutoscaleMode", + "package": "pipelines", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by\nautomatically allocating cluster resources based on workload volume, with\nminimal impact to the data processing latency of your pipelines. Enhanced\nAutoscaling is available for `updates` clusters only. The legacy autoscaling\nfeature is used for `maintenance` clusters.", + "values": [ + "Enhanced", + "Legacy" + ] + }, + "pipelines.PipelinePermissionLevel": { + "name": "PipelinePermissionLevel", + "package": "pipelines", + "description": "Permission level", + "values": [ + "CanManage", + "CanRun", + "CanView", + "IsOwner" + ] + }, + "pipelines.PipelineState": { + "name": "PipelineState", + "package": "pipelines", + "description": "The pipeline state.", + "values": [ + "Deleted", + "Deploying", + "Failed", + "Idle", + "Recovering", + "Resetting", + "Running", + "Starting", + "Stopping" + ] + }, + "pipelines.PipelineStateInfoHealth": { + "name": "PipelineStateInfoHealth", + "package": "pipelines", + "description": "The health of a pipeline.", + "values": [ + "Healthy", + "Unhealthy" + ] + }, + "pipelines.StartUpdateCause": { + "name": "StartUpdateCause", + "package": "pipelines", + "description": "What triggered this update.", + "values": [ + "ApiCall", + "InfrastructureMaintenance", + "JobTask", + "RetryOnFailure", + "SchemaChange", + "ServiceUpgrade", + "UserAction" + ] + }, + "pipelines.TableSpecificConfigScdType": { + "name": "TableSpecificConfigScdType", + "package": "pipelines", + "description": "The SCD type to use to ingest the table.", + "values": [ + "AppendOnly", + "ScdType1", + "ScdType2" + ] + }, + "pipelines.UpdateInfoCause": { + "name": "UpdateInfoCause", + "package": "pipelines", + "description": "What triggered this update.", + "values": [ + "ApiCall", + "InfrastructureMaintenance", + "JobTask", + "RetryOnFailure", + "SchemaChange", + "ServiceUpgrade", + "UserAction" + ] + }, + "pipelines.UpdateInfoState": { + "name": "UpdateInfoState", + "package": "pipelines", + "description": "The update state.", + "values": [ + "Canceled", + "Completed", + "Created", + "Failed", + "Initializing", + "Queued", + "Resetting", + "Running", + "SettingUpTables", + "Stopping", + "WaitingForResources" + ] + }, + "pipelines.UpdateStateInfoState": { + "name": "UpdateStateInfoState", + "package": "pipelines", + "description": "The update state.", + "values": [ + "Canceled", + "Completed", + "Created", + "Failed", + "Initializing", + "Queued", + "Resetting", + "Running", + "SettingUpTables", + "Stopping", + "WaitingForResources" + ] + }, + "serving.AiGatewayGuardrailPiiBehaviorBehavior": { + "name": "AiGatewayGuardrailPiiBehaviorBehavior", + "package": "serving", + "description": "", + "values": [ + "Block", + "Mask", + "None" + ] + }, + "serving.AiGatewayRateLimitKey": { + "name": "AiGatewayRateLimitKey", + "package": "serving", + "description": "", + "values": [ + "Endpoint", + "ServicePrincipal", + "User", + "UserGroup" + ] + }, + "serving.AiGatewayRateLimitRenewalPeriod": { + "name": "AiGatewayRateLimitRenewalPeriod", + "package": "serving", + "description": "", + "values": [ + "Minute" + ] + }, + "serving.AmazonBedrockConfigBedrockProvider": { + "name": "AmazonBedrockConfigBedrockProvider", + "package": "serving", + "description": "", + "values": [ + "Ai21labs", + "Amazon", + "Anthropic", + "Cohere" + ] + }, + "serving.ChatMessageRole": { + "name": "ChatMessageRole", + "package": "serving", + "description": "The role of the message. One of [system, user, assistant].", + "values": [ + "Assistant", + "System", + "User" + ] + }, + "serving.EmbeddingsV1ResponseEmbeddingElementObject": { + "name": "EmbeddingsV1ResponseEmbeddingElementObject", + "package": "serving", + "description": "This will always be 'embedding'.", + "values": [ + "Embedding" + ] + }, + "serving.EndpointStateConfigUpdate": { + "name": "EndpointStateConfigUpdate", + "package": "serving", + "description": "", + "values": [ + "InProgress", + "NotUpdating", + "UpdateCanceled", + "UpdateFailed" + ] + }, + "serving.EndpointStateReady": { + "name": "EndpointStateReady", + "package": "serving", + "description": "", + "values": [ + "NotReady", + "Ready" + ] + }, + "serving.ExternalFunctionRequestHttpMethod": { + "name": "ExternalFunctionRequestHttpMethod", + "package": "serving", + "description": "", + "values": [ + "Delete", + "Get", + "Patch", + "Post", + "Put" + ] + }, + "serving.ExternalModelProvider": { + "name": "ExternalModelProvider", + "package": "serving", + "description": "", + "values": [ + "Ai21labs", + "AmazonBedrock", + "Anthropic", + "Cohere", + "Custom", + "DatabricksModelServing", + "GoogleCloudVertexAi", + "Openai", + "Palm" + ] + }, + "serving.QueryEndpointResponseObject": { + "name": "QueryEndpointResponseObject", + "package": "serving", + "description": "The type of object returned by the __external/foundation model__ serving\nendpoint, one of [text_completion, chat.completion, list (of embeddings)].", + "values": [ + "ChatCompletion", + "List", + "TextCompletion" + ] + }, + "serving.RateLimitKey": { + "name": "RateLimitKey", + "package": "serving", + "description": "", + "values": [ + "Endpoint", + "User" + ] + }, + "serving.RateLimitRenewalPeriod": { + "name": "RateLimitRenewalPeriod", + "package": "serving", + "description": "", + "values": [ + "Minute" + ] + }, "serving.ServedModelInputWorkloadType": { "name": "ServedModelInputWorkloadType", "package": "serving", - "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", - "fields": {} + "description": "Please keep this in sync with with workload types in\nInferenceEndpointEntities.scala", + "values": [ + "Cpu", + "GpuLarge", + "GpuMedium", + "GpuSmall", + "MultigpuMedium" + ] }, - "serving.ServingModelWorkloadType": { - "name": "ServingModelWorkloadType", + "serving.ServedModelStateDeployment": { + "name": "ServedModelStateDeployment", "package": "serving", - "description": "Please keep this in sync with with workload types in InferenceEndpointEntities.scala", - "fields": {} + "description": "", + "values": [ + "DeploymentAborted", + "DeploymentCreating", + "DeploymentFailed", + "DeploymentReady", + "DeploymentRecovering" + ] }, - "serving.TrafficConfig": { - "name": "TrafficConfig", + "serving.ServingEndpointDetailedPermissionLevel": { + "name": "ServingEndpointDetailedPermissionLevel", "package": "serving", - "description": "traffic config configuration.", - "fields": { - "routes": { - "name": "routes", - "type": "any", - "description": "The list of routes that define traffic to each served entity.", - "required": false - } - } + "description": "", + "values": [ + "CanManage", + "CanQuery", + "CanView" + ] + }, + "serving.ServingEndpointPermissionLevel": { + "name": "ServingEndpointPermissionLevel", + "package": "serving", + "description": "Permission level", + "values": [ + "CanManage", + "CanQuery", + "CanView" + ] + }, + "serving.ServingModelWorkloadType": { + "name": "ServingModelWorkloadType", + "package": "serving", + "description": "Please keep this in sync with with workload types in\nInferenceEndpointEntities.scala", + "values": [ + "Cpu", + "GpuLarge", + "GpuMedium", + "GpuSmall", + "MultigpuMedium" + ] }, "sql.Aggregation": { "name": "Aggregation", "package": "sql", - "description": "aggregation configuration.", - "fields": {} + "description": "", + "values": [ + "Avg", + "Count", + "CountDistinct", + "Max", + "Median", + "Min", + "Stddev", + "Sum" + ] }, "sql.AlertEvaluationState": { "name": "AlertEvaluationState", "package": "sql", - "description": "UNSPECIFIED - default unspecify value for proto enum, do not use it in the code\nUNKNOWN - alert not yet evaluated\nTRIGGERED - alert is triggered\nOK - alert is not triggered\nERROR - alert evaluation failed", - "fields": {} + "description": "UNSPECIFIED - default unspecify value for proto enum, do not use it in the\ncode UNKNOWN - alert not yet evaluated TRIGGERED - alert is triggered OK -\nalert is not triggered ERROR - alert evaluation failed", + "values": [ + "Error", + "Ok", + "Triggered", + "Unknown" + ] }, "sql.AlertLifecycleState": { "name": "AlertLifecycleState", "package": "sql", - "description": "alert lifecycle state configuration.", - "fields": {} + "description": "", + "values": [ + "Active", + "Deleted" + ] }, - "sql.AlertV2Evaluation": { - "name": "AlertV2Evaluation", + "sql.AlertOperator": { + "name": "AlertOperator", "package": "sql", - "description": "alert v2 evaluation configuration.", - "fields": { - "comparison_operator": { - "name": "comparison_operator", - "type": "any", - "description": "Operator used for comparison in alert evaluation.", - "required": false - }, - "empty_result_state": { - "name": "empty_result_state", - "type": "any", - "description": "Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because `UNKNOWN` state is planned to be deprecated.", - "required": false - }, - "last_evaluated_at": { - "name": "last_evaluated_at", - "type": "string (timestamp)", - "description": "Timestamp of the last evaluation.", - "required": false, - "output_only": true - }, - "notification": { - "name": "notification", - "type": "any", - "description": "User or Notification Destination to notify when alert is triggered.", - "required": false - }, - "source": { - "name": "source", - "type": "any", - "description": "Source column from result to use to evaluate alert", - "required": false - }, - "state": { - "name": "state", - "type": "any", - "description": "Latest state of alert evaluation.", - "required": false, - "output_only": true - }, - "threshold": { - "name": "threshold", - "type": "any", - "description": "Threshold to user for alert evaluation, can be a column or a value.", - "required": false - } - } + "description": "", + "values": [ + "Equal", + "GreaterThan", + "GreaterThanOrEqual", + "IsNull", + "LessThan", + "LessThanOrEqual", + "NotEqual" + ] }, - "sql.AlertV2Notification": { - "name": "AlertV2Notification", + "sql.AlertOptionsEmptyResultState": { + "name": "AlertOptionsEmptyResultState", "package": "sql", - "description": "alert v2 notification configuration.", - "fields": { - "notify_on_ok": { - "name": "notify_on_ok", - "type": "any", - "description": "Whether to notify alert subscribers when alert returns back to normal.", - "required": false - }, - "retrigger_seconds": { - "name": "retrigger_seconds", - "type": "int", - "description": "Number of seconds an alert waits after being triggered before it is allowed to send another notification.\nIf set to 0 or omitted, the alert will not send any further notifications after the first trigger\nSetting this value to 1 allows the alert to send a notification on every evaluation where the condition is met, effectively making it always retrigger for notification purposes.", - "required": false - }, - "subscriptions": { - "name": "subscriptions", - "type": "any", - "description": "", - "required": false - } - } + "description": "State that alert evaluates to when query result is empty.", + "values": [ + "Ok", + "Triggered", + "Unknown" + ] }, - "sql.AlertV2Operand": { - "name": "AlertV2Operand", + "sql.AlertState": { + "name": "AlertState", "package": "sql", - "description": "alert v2 operand configuration.", - "fields": { - "column": { - "name": "column", - "type": "any", - "description": "", - "required": false - }, - "value": { - "name": "value", - "type": "any", - "description": "", - "required": false - } - } + "description": "", + "values": [ + "Ok", + "Triggered", + "Unknown" + ] }, - "sql.AlertV2OperandColumn": { - "name": "AlertV2OperandColumn", + "sql.ChannelName": { + "name": "ChannelName", "package": "sql", - "description": "alert v2 operand column configuration.", - "fields": { - "aggregation": { - "name": "aggregation", - "type": "any", - "description": "If not set, the behavior is equivalent to using `First row` in the UI.", - "required": false - }, - "display": { - "name": "display", - "type": "any", - "description": "", - "required": false - }, - "name": { - "name": "name", - "type": "any", - "description": "", - "required": false - } - } + "description": "", + "values": [ + "ChannelNameCurrent", + "ChannelNameCustom", + "ChannelNamePreview", + "ChannelNamePrevious" + ] + }, + "sql.ColumnInfoTypeName": { + "name": "ColumnInfoTypeName", + "package": "sql", + "description": "The name of the base data type. This doesn't include details for complex\ntypes such as STRUCT, MAP or ARRAY.", + "values": [ + "Array", + "Binary", + "Boolean", + "Byte", + "Char", + "Date", + "Decimal", + "Double", + "Float", + "Int", + "Interval", + "Long", + "Map", + "Null", + "Short", + "String", + "Struct", + "Timestamp", + "UserDefinedType" + ] + }, + "sql.ComparisonOperator": { + "name": "ComparisonOperator", + "package": "sql", + "description": "", + "values": [ + "Equal", + "GreaterThan", + "GreaterThanOrEqual", + "IsNotNull", + "IsNull", + "LessThan", + "LessThanOrEqual", + "NotEqual" + ] + }, + "sql.CreateWarehouseRequestWarehouseType": { + "name": "CreateWarehouseRequestWarehouseType", + "package": "sql", + "description": "", + "values": [ + "Classic", + "Pro", + "TypeUnspecified" + ] + }, + "sql.DatePrecision": { + "name": "DatePrecision", + "package": "sql", + "description": "", + "values": [ + "DayPrecision", + "MinutePrecision", + "SecondPrecision" + ] + }, + "sql.DateRangeValueDynamicDateRange": { + "name": "DateRangeValueDynamicDateRange", + "package": "sql", + "description": "", + "values": [ + "Last12Months", + "Last14Days", + "Last24Hours", + "Last30Days", + "Last60Days", + "Last7Days", + "Last8Hours", + "Last90Days", + "LastHour", + "LastMonth", + "LastWeek", + "LastYear", + "ThisMonth", + "ThisWeek", + "ThisYear", + "Today", + "Yesterday" + ] + }, + "sql.DateValueDynamicDate": { + "name": "DateValueDynamicDate", + "package": "sql", + "description": "", + "values": [ + "Now", + "Yesterday" + ] + }, + "sql.Disposition": { + "name": "Disposition", + "package": "sql", + "description": "", + "values": [ + "ExternalLinks", + "Inline" + ] + }, + "sql.EditWarehouseRequestWarehouseType": { + "name": "EditWarehouseRequestWarehouseType", + "package": "sql", + "description": "", + "values": [ + "Classic", + "Pro", + "TypeUnspecified" + ] + }, + "sql.EndpointInfoWarehouseType": { + "name": "EndpointInfoWarehouseType", + "package": "sql", + "description": "", + "values": [ + "Classic", + "Pro", + "TypeUnspecified" + ] + }, + "sql.ExecuteStatementRequestOnWaitTimeout": { + "name": "ExecuteStatementRequestOnWaitTimeout", + "package": "sql", + "description": "When `wait_timeout \u003e 0s`, the call will block up to the specified time. If\nthe statement execution doesn't finish within this time, `on_wait_timeout`\ndetermines whether the execution should continue or be canceled. When set to\n`CONTINUE`, the statement execution continues asynchronously and the call\nreturns a statement ID which can be used for polling with\n:method:statementexecution/getStatement. When set to `CANCEL`, the statement\nexecution is canceled and the call returns with a `CANCELED` ...", + "values": [ + "Cancel", + "Continue" + ] + }, + "sql.Format": { + "name": "Format", + "package": "sql", + "description": "", + "values": [ + "ArrowStream", + "Csv", + "JsonArray" + ] + }, + "sql.GetWarehouseResponseWarehouseType": { + "name": "GetWarehouseResponseWarehouseType", + "package": "sql", + "description": "", + "values": [ + "Classic", + "Pro", + "TypeUnspecified" + ] + }, + "sql.GetWorkspaceWarehouseConfigResponseSecurityPolicy": { + "name": "GetWorkspaceWarehouseConfigResponseSecurityPolicy", + "package": "sql", + "description": "Security policy to be used for warehouses", + "values": [ + "DataAccessControl", + "None", + "Passthrough" + ] + }, + "sql.LegacyAlertState": { + "name": "LegacyAlertState", + "package": "sql", + "description": "", + "values": [ + "Ok", + "Triggered", + "Unknown" + ] }, - "sql.AlertV2OperandValue": { - "name": "AlertV2OperandValue", + "sql.LifecycleState": { + "name": "LifecycleState", "package": "sql", - "description": "alert v2 operand value configuration.", - "fields": { - "bool_value": { - "name": "bool_value", - "type": "any", - "description": "", - "required": false - }, - "double_value": { - "name": "double_value", - "type": "any", - "description": "", - "required": false - }, - "string_value": { - "name": "string_value", - "type": "any", - "description": "", - "required": false - } - } + "description": "", + "values": [ + "Active", + "Trashed" + ] }, - "sql.AlertV2RunAs": { - "name": "AlertV2RunAs", + "sql.ListOrder": { + "name": "ListOrder", "package": "sql", - "description": "alert v2 run as configuration.", - "fields": { - "service_principal_name": { - "name": "service_principal_name", - "type": "string", - "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", - "required": false - }, - "user_name": { - "name": "user_name", - "type": "string", - "description": "The email of an active workspace user. Can only set this field to their own email.", - "required": false - } - } + "description": "", + "values": [ + "CreatedAt", + "Name" + ] }, - "sql.AlertV2Subscription": { - "name": "AlertV2Subscription", + "sql.ObjectType": { + "name": "ObjectType", "package": "sql", - "description": "alert v2 subscription configuration.", - "fields": { - "destination_id": { - "name": "destination_id", - "type": "string", - "description": "", - "required": false - }, - "user_email": { - "name": "user_email", - "type": "any", - "description": "", - "required": false - } - } + "description": "A singular noun object type.", + "values": [ + "Alert", + "Dashboard", + "DataSource", + "Query" + ] }, - "sql.Channel": { - "name": "Channel", + "sql.ObjectTypePlural": { + "name": "ObjectTypePlural", "package": "sql", - "description": "Configures the channel name and DBSQL version of the warehouse. CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified.", - "fields": { - "dbsql_version": { - "name": "dbsql_version", - "type": "any", - "description": "", - "required": false - }, - "name": { - "name": "name", - "type": "any", - "description": "", - "required": false - } - } + "description": "Always a plural of the object type.", + "values": [ + "Alerts", + "Dashboards", + "DataSources", + "Queries" + ] }, - "sql.ChannelName": { - "name": "ChannelName", + "sql.OwnableObjectType": { + "name": "OwnableObjectType", "package": "sql", - "description": "channel name configuration.", - "fields": {} + "description": "", + "values": [ + "Alert", + "Dashboard", + "Query" + ] }, - "sql.ComparisonOperator": { - "name": "ComparisonOperator", + "sql.ParameterType": { + "name": "ParameterType", "package": "sql", - "description": "comparison operator configuration.", - "fields": {} + "description": "", + "values": [ + "Datetime", + "Enum", + "Number", + "Query", + "Text" + ] }, - "sql.CreateWarehouseRequestWarehouseType": { - "name": "CreateWarehouseRequestWarehouseType", + "sql.PermissionLevel": { + "name": "PermissionLevel", "package": "sql", - "description": "create warehouse request warehouse type configuration.", - "fields": {} + "description": "* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * `CAN_EDIT`:\nCan edit the query * `CAN_MANAGE`: Can manage the query", + "values": [ + "CanEdit", + "CanManage", + "CanRun", + "CanView" + ] }, - "sql.CronSchedule": { - "name": "CronSchedule", + "sql.PlansState": { + "name": "PlansState", "package": "sql", - "description": "cron schedule configuration.", - "fields": { - "pause_status": { - "name": "pause_status", - "type": "any", - "description": "Indicate whether this schedule is paused or not.", - "required": false - }, - "quartz_cron_schedule": { - "name": "quartz_cron_schedule", - "type": "any", - "description": "A cron expression using quartz syntax that specifies the schedule for this pipeline.\nShould use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html", - "required": false - }, - "timezone_id": { - "name": "timezone_id", - "type": "string", - "description": "A Java timezone id. The schedule will be resolved using this timezone.\nThis will be combined with the quartz_cron_schedule to determine the schedule.\nSee https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.", - "required": false - } - } + "description": "Possible Reasons for which we have not saved plans in the database", + "values": [ + "Empty", + "Exists", + "IgnoredLargePlansSize", + "IgnoredSmallDuration", + "IgnoredSparkPlanType", + "Unknown" + ] }, - "sql.EndpointTagPair": { - "name": "EndpointTagPair", + "sql.QueryStatementType": { + "name": "QueryStatementType", "package": "sql", - "description": "endpoint tag pair configuration.", - "fields": { - "key": { - "name": "key", - "type": "any", - "description": "", - "required": false - }, - "value": { - "name": "value", - "type": "any", - "description": "", - "required": false - } - } + "description": "", + "values": [ + "Alter", + "Analyze", + "Copy", + "Create", + "Delete", + "Describe", + "Drop", + "Explain", + "Grant", + "Insert", + "Merge", + "Optimize", + "Other", + "Refresh", + "Replace", + "Revoke", + "Select", + "Set", + "Show", + "Truncate", + "Update", + "Use" + ] }, - "sql.EndpointTags": { - "name": "EndpointTags", + "sql.QueryStatus": { + "name": "QueryStatus", "package": "sql", - "description": "endpoint tags configuration.", - "fields": { - "custom_tags": { - "name": "custom_tags", - "type": "map[string]string", - "description": "", - "required": false - } - } + "description": "Statuses which are also used by OperationStatus in runtime", + "values": [ + "Canceled", + "Compiled", + "Compiling", + "Failed", + "Finished", + "Queued", + "Running", + "Started" + ] + }, + "sql.RunAsMode": { + "name": "RunAsMode", + "package": "sql", + "description": "", + "values": [ + "Owner", + "Viewer" + ] + }, + "sql.RunAsRole": { + "name": "RunAsRole", + "package": "sql", + "description": "", + "values": [ + "Owner", + "Viewer" + ] }, "sql.SchedulePauseStatus": { "name": "SchedulePauseStatus", "package": "sql", - "description": "schedule pause status configuration.", - "fields": {} + "description": "", + "values": [ + "Paused", + "Unpaused" + ] + }, + "sql.ServiceErrorCode": { + "name": "ServiceErrorCode", + "package": "sql", + "description": "", + "values": [ + "Aborted", + "AlreadyExists", + "BadRequest", + "Cancelled", + "DeadlineExceeded", + "InternalError", + "IoError", + "NotFound", + "ResourceExhausted", + "ServiceUnderMaintenance", + "TemporarilyUnavailable", + "Unauthenticated", + "Unknown", + "WorkspaceTemporarilyUnavailable" + ] + }, + "sql.SetWorkspaceWarehouseConfigRequestSecurityPolicy": { + "name": "SetWorkspaceWarehouseConfigRequestSecurityPolicy", + "package": "sql", + "description": "Security policy to be used for warehouses", + "values": [ + "DataAccessControl", + "None", + "Passthrough" + ] }, "sql.SpotInstancePolicy": { "name": "SpotInstancePolicy", "package": "sql", - "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", - "fields": {} + "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and Executors\n| | AZURE | On Demand D...", + "values": [ + "CostOptimized", + "PolicyUnspecified", + "ReliabilityOptimized" + ] }, - "workspace.AzureKeyVaultSecretScopeMetadata": { - "name": "AzureKeyVaultSecretScopeMetadata", + "sql.State": { + "name": "State", + "package": "sql", + "description": "* State of a warehouse.", + "values": [ + "Deleted", + "Deleting", + "Running", + "Starting", + "Stopped", + "Stopping" + ] + }, + "sql.StatementState": { + "name": "StatementState", + "package": "sql", + "description": "", + "values": [ + "Canceled", + "Closed", + "Failed", + "Pending", + "Running", + "Succeeded" + ] + }, + "sql.Status": { + "name": "Status", + "package": "sql", + "description": "", + "values": [ + "Degraded", + "Failed", + "Healthy" + ] + }, + "sql.SuccessMessage": { + "name": "SuccessMessage", + "package": "sql", + "description": "", + "values": [ + "Success" + ] + }, + "sql.TerminationReasonCode": { + "name": "TerminationReasonCode", + "package": "sql", + "description": "The status code indicating why the cluster was terminated", + "values": [ + "AbuseDetected", + "AccessTokenFailure", + "AllocationTimeout", + "AllocationTimeoutNoHealthyAndWarmedUpClusters", + "AllocationTimeoutNoHealthyClusters", + "AllocationTimeoutNoMatchedClusters", + "AllocationTimeoutNoReadyClusters", + "AllocationTimeoutNoUnallocatedClusters", + "AllocationTimeoutNoWarmedUpClusters", + "AllocationTimeoutNodeDaemonNotReady", + "AttachProjectFailure", + "AwsAuthorizationFailure", + "AwsInaccessibleKmsKeyFailure", + "AwsInstanceProfileUpdateFailure", + "AwsInsufficientFreeAddressesInSubnetFailure", + "AwsInsufficientInstanceCapacityFailure", + "AwsInvalidKeyPair", + "AwsInvalidKmsKeyState", + "AwsMaxSpotInstanceCountExceededFailure", + "AwsRequestLimitExceeded", + "AwsResourceQuotaExceeded", + "AwsUnsupportedFailure", + "AzureByokKeyPermissionFailure", + "AzureEphemeralDiskFailure", + "AzureInvalidDeploymentTemplate", + "AzureOperationNotAllowedException", + "AzurePackedDeploymentPartialFailure", + "AzureQuotaExceededException", + "AzureResourceManagerThrottling", + "AzureResourceProviderThrottling", + "AzureUnexpectedDeploymentTemplateFailure", + "AzureVmExtensionFailure", + "AzureVnetConfigurationFailure", + "BootstrapTimeout", + "BootstrapTimeoutCloudProviderException", + "BootstrapTimeoutDueToMisconfig", + "BudgetPolicyLimitEnforcementActivated", + "BudgetPolicyResolutionFailure", + "CloudAccountPodQuotaExceeded", + "CloudAccountSetupFailure", + "CloudOperationCancelled", + "CloudProviderDiskSetupFailure", + "CloudProviderInstanceNotLaunched", + "CloudProviderLaunchFailure", + "CloudProviderLaunchFailureDueToMisconfig", + "CloudProviderResourceStockout", + "CloudProviderResourceStockoutDueToMisconfig", + "CloudProviderShutdown", + "ClusterOperationThrottled", + "ClusterOperationTimeout", + "CommunicationLost", + "ContainerLaunchFailure", + "ControlPlaneConnectionFailure", + "ControlPlaneConnectionFailureDueToMisconfig", + "ControlPlaneRequestFailure", + "ControlPlaneRequestFailureDueToMisconfig", + "DataAccessConfigChanged", + "DatabaseConnectionFailure", + "DbfsComponentUnhealthy", + "DbrImageResolutionFailure", + "DisasterRecoveryReplication", + "DnsResolutionError", + "DockerContainerCreationException", + "DockerImagePullFailure", + "DockerImageTooLargeForInstanceException", + "DockerInvalidOsException", + "DriverEviction", + "DriverLaunchTimeout", + "DriverNodeUnreachable", + "DriverOutOfDisk", + "DriverOutOfMemory", + "DriverPodCreationFailure", + "DriverUnexpectedFailure", + "DriverUnhealthy", + "DriverUnreachable", + "DriverUnresponsive", + "DynamicSparkConfSizeExceeded", + "EosSparkImage", + "ExecutionComponentUnhealthy", + "ExecutorPodUnscheduled", + "GcpApiRateQuotaExceeded", + "GcpDeniedByOrgPolicy", + "GcpForbidden", + "GcpIamTimeout", + "GcpInaccessibleKmsKeyFailure", + "GcpInsufficientCapacity", + "GcpIpSpaceExhausted", + "GcpKmsKeyPermissionDenied", + "GcpNotFound", + "GcpQuotaExceeded", + "GcpResourceQuotaExceeded", + "GcpServiceAccountAccessDenied", + "GcpServiceAccountDeleted", + "GcpServiceAccountNotFound", + "GcpSubnetNotReady", + "GcpTrustedImageProjectsViolated", + "GkeBasedClusterTermination", + "GlobalInitScriptFailure", + "HiveMetastoreProvisioningFailure", + "ImagePullPermissionDenied", + "InPenaltyBox", + "Inactivity", + "InitContainerNotFinished", + "InitScriptFailure", + "InstancePoolClusterFailure", + "InstancePoolMaxCapacityReached", + "InstancePoolNotFound", + "InstanceUnreachable", + "InstanceUnreachableDueToMisconfig", + "InternalCapacityFailure", + "InternalError", + "InvalidArgument", + "InvalidAwsParameter", + "InvalidInstancePlacementProtocol", + "InvalidSparkImage", + "InvalidWorkerImageFailure", + "IpExhaustionFailure", + "JobFinished", + "K8sActivePodQuotaExceeded", + "K8sAutoscalingFailure", + "K8sDbrClusterLaunchTimeout", + "LazyAllocationTimeout", + "MaintenanceMode", + "MetastoreComponentUnhealthy", + "NephosResourceManagement", + "NetvisorSetupTimeout", + "NetworkCheckControlPlaneFailure", + "NetworkCheckControlPlaneFailureDueToMisconfig", + "NetworkCheckDnsServerFailure", + "NetworkCheckDnsServerFailureDueToMisconfig", + "NetworkCheckMetadataEndpointFailure", + "NetworkCheckMetadataEndpointFailureDueToMisconfig", + "NetworkCheckMultipleComponentsFailure", + "NetworkCheckMultipleComponentsFailureDueToMisconfig", + "NetworkCheckNicFailure", + "NetworkCheckNicFailureDueToMisconfig", + "NetworkCheckStorageFailure", + "NetworkCheckStorageFailureDueToMisconfig", + "NetworkConfigurationFailure", + "NfsMountFailure", + "NoMatchedK8s", + "NoMatchedK8sTestingTag", + "NpipTunnelSetupFailure", + "NpipTunnelTokenFailure", + "PodAssignmentFailure", + "PodSchedulingFailure", + "RequestRejected", + "RequestThrottled", + "ResourceUsageBlocked", + "SecretCreationFailure", + "SecretPermissionDenied", + "SecretResolutionError", + "SecurityDaemonRegistrationException", + "SelfBootstrapFailure", + "ServerlessLongRunningTerminated", + "SkippedSlowNodes", + "SlowImageDownload", + "SparkError", + "SparkImageDownloadFailure", + "SparkImageDownloadThrottled", + "SparkImageNotFound", + "SparkStartupFailure", + "SpotInstanceTermination", + "SshBootstrapFailure", + "StorageDownloadFailure", + "StorageDownloadFailureDueToMisconfig", + "StorageDownloadFailureSlow", + "StorageDownloadFailureThrottled", + "StsClientSetupFailure", + "SubnetExhaustedFailure", + "TemporarilyUnavailable", + "TrialExpired", + "UnexpectedLaunchFailure", + "UnexpectedPodRecreation", + "Unknown", + "UnsupportedInstanceType", + "UpdateInstanceProfileFailure", + "UsagePolicyEntitlementDenied", + "UserInitiatedVmTermination", + "UserRequest", + "WorkerSetupFailure", + "WorkspaceCancelledError", + "WorkspaceConfigurationError", + "WorkspaceUpdate" + ] + }, + "sql.TerminationReasonType": { + "name": "TerminationReasonType", + "package": "sql", + "description": "type of the termination", + "values": [ + "ClientError", + "CloudFailure", + "ServiceFault", + "Success" + ] + }, + "sql.WarehousePermissionLevel": { + "name": "WarehousePermissionLevel", + "package": "sql", + "description": "Permission level", + "values": [ + "CanManage", + "CanMonitor", + "CanUse", + "CanView", + "IsOwner" + ] + }, + "sql.WarehouseTypePairWarehouseType": { + "name": "WarehouseTypePairWarehouseType", + "package": "sql", + "description": "", + "values": [ + "Classic", + "Pro", + "TypeUnspecified" + ] + }, + "workspace.AclPermission": { + "name": "AclPermission", "package": "workspace", - "description": "The metadata of the Azure KeyVault for a secret scope of type `AZURE_KEYVAULT`", - "fields": { - "dns_name": { - "name": "dns_name", - "type": "string", - "description": "The DNS of the KeyVault", - "required": false - }, - "resource_id": { - "name": "resource_id", - "type": "string", - "description": "The resource id of the azure KeyVault that user wants to associate the scope with.", - "required": false - } - } + "description": "The ACL permission levels for Secret ACLs applied to secret scopes.", + "values": [ + "Manage", + "Read", + "Write" + ] }, - "workspace.ScopeBackendType": { - "name": "ScopeBackendType", + "workspace.ExportFormat": { + "name": "ExportFormat", "package": "workspace", - "description": "The types of secret scope backends in the Secret Manager. Azure KeyVault backed secret scopes\nwill be supported in a later release.", - "fields": {} - } - }, - "enums": { - "compute.State": { - "name": "State", - "package": "compute", - "description": "The state of a cluster.", + "description": "The format for workspace import and export.", "values": [ - "ERROR", - "PENDING", - "RESIZING", - "RESTARTING", - "RUNNING", - "TERMINATED", - "TERMINATING", - "UNKNOWN" + "Auto", + "Dbc", + "Html", + "Jupyter", + "RMarkdown", + "Raw", + "Source" ] }, - "jobs.RunLifeCycleState": { - "name": "RunLifeCycleState", - "package": "jobs", - "description": "The current state of the run lifecycle.", + "workspace.ImportFormat": { + "name": "ImportFormat", + "package": "workspace", + "description": "The format for workspace import and export.", "values": [ - "INTERNAL_ERROR", - "PENDING", - "RUNNING", - "SKIPPED", - "TERMINATED", - "TERMINATING" + "Auto", + "Dbc", + "Html", + "Jupyter", + "RMarkdown", + "Raw", + "Source" ] }, - "pipelines.PipelineState": { - "name": "PipelineState", - "package": "pipelines", - "description": "The state of a pipeline.", - "values": [ - "DELETED", - "FAILED", - "IDLE", - "RECOVERING", - "RESETTING", - "RUNNING", - "STARTING", - "STOPPING" + "workspace.Language": { + "name": "Language", + "package": "workspace", + "description": "The language of notebook.", + "values": [ + "Python", + "R", + "Scala", + "Sql" + ] + }, + "workspace.ObjectType": { + "name": "ObjectType", + "package": "workspace", + "description": "The type of the object in workspace.", + "values": [ + "Dashboard", + "Directory", + "File", + "Library", + "Notebook", + "Repo" + ] + }, + "workspace.RepoPermissionLevel": { + "name": "RepoPermissionLevel", + "package": "workspace", + "description": "Permission level", + "values": [ + "CanEdit", + "CanManage", + "CanRead", + "CanRun" + ] + }, + "workspace.ScopeBackendType": { + "name": "ScopeBackendType", + "package": "workspace", + "description": "The types of secret scope backends in the Secret Manager. Azure KeyVault\nbacked secret scopes will be supported in a later release.", + "values": [ + "AzureKeyvault", + "Databricks" + ] + }, + "workspace.WorkspaceObjectPermissionLevel": { + "name": "WorkspaceObjectPermissionLevel", + "package": "workspace", + "description": "Permission level", + "values": [ + "CanEdit", + "CanManage", + "CanRead", + "CanRun" ] } } diff --git a/tools/gen_sdk_docs_index.go b/tools/gen_sdk_docs_index.go index 8bd7bb2ace..988b438de0 100644 --- a/tools/gen_sdk_docs_index.go +++ b/tools/gen_sdk_docs_index.go @@ -4,15 +4,23 @@ // // go run tools/gen_sdk_docs_index.go -output experimental/aitools/lib/providers/sdkdocs/ // -// This tool parses the annotations_openapi.yml file and Go SDK interfaces to generate -// a comprehensive SDK documentation index that is embedded into the CLI binary. +// This tool parses the Go SDK source code to generate a comprehensive SDK documentation +// index that is embedded into the CLI binary. It extracts service interfaces, method +// signatures, type definitions, and enums directly from the SDK. package main import ( + "bytes" "encoding/json" "flag" "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/printer" + "go/token" "os" + "os/exec" "path/filepath" "regexp" "sort" @@ -26,6 +34,7 @@ import ( type SDKDocsIndex struct { Version string `json:"version"` GeneratedAt string `json:"generated_at"` + SDKVersion string `json:"sdk_version"` Services map[string]*ServiceDoc `json:"services"` Types map[string]*TypeDoc `json:"types"` Enums map[string]*EnumDoc `json:"enums"` @@ -46,9 +55,6 @@ type MethodDoc struct { Signature string `json:"signature"` Parameters []ParamDoc `json:"parameters"` Returns *ReturnDoc `json:"returns,omitempty"` - Example string `json:"example,omitempty"` - HTTPMethod string `json:"http_method,omitempty"` - HTTPPath string `json:"http_path,omitempty"` } // ParamDoc represents documentation for a method parameter. @@ -79,8 +85,6 @@ type FieldDoc struct { Type string `json:"type"` Description string `json:"description"` Required bool `json:"required"` - OutputOnly bool `json:"output_only,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` } // EnumDoc represents documentation for an enum type. @@ -96,9 +100,22 @@ type AnnotationsFile map[string]map[string]FieldAnnotation // FieldAnnotation represents annotations for a single field type FieldAnnotation struct { - Description string `yaml:"description"` - OutputOnly string `yaml:"x-databricks-field-behaviors_output_only"` - DeprecationMessage string `yaml:"deprecation_message"` + Description string `yaml:"description"` +} + +// Priority services to include (most commonly used) +var priorityServices = map[string]bool{ + "apps": true, + "catalog": true, + "compute": true, + "files": true, + "iam": true, + "jobs": true, + "ml": true, + "pipelines": true, + "serving": true, + "sql": true, + "workspace": true, } func main() { @@ -113,18 +130,35 @@ func main() { os.Exit(1) } - // Load annotations - annotations, err := loadAnnotations(filepath.Join(projectRoot, *annotationsPath)) + // Find SDK path + sdkPath, sdkVersion, err := findSDKPath() if err != nil { - fmt.Fprintf(os.Stderr, "Error loading annotations: %v\n", err) + fmt.Fprintf(os.Stderr, "Error finding SDK path: %v\n", err) os.Exit(1) } + fmt.Printf("Found SDK at: %s (version %s)\n", sdkPath, sdkVersion) - // Generate index - index := generateIndex(annotations) + // Load annotations for additional type descriptions + annotations, err := loadAnnotations(filepath.Join(projectRoot, *annotationsPath)) + if err != nil { + fmt.Fprintf(os.Stderr, "Warning: Could not load annotations: %v\n", err) + annotations = make(AnnotationsFile) + } + + // Generate index from SDK + index, err := generateIndexFromSDK(sdkPath, sdkVersion, annotations) + if err != nil { + fmt.Fprintf(os.Stderr, "Error generating index: %v\n", err) + os.Exit(1) + } // Write output - outputPath := filepath.Join(projectRoot, *outputDir, "sdk_docs_index.json") + var outputPath string + if filepath.IsAbs(*outputDir) { + outputPath = filepath.Join(*outputDir, "sdk_docs_index.json") + } else { + outputPath = filepath.Join(projectRoot, *outputDir, "sdk_docs_index.json") + } if err := writeIndex(index, outputPath); err != nil { fmt.Fprintf(os.Stderr, "Error writing index: %v\n", err) os.Exit(1) @@ -154,6 +188,25 @@ func findProjectRoot() (string, error) { } } +func findSDKPath() (string, string, error) { + // Use go list to find the SDK module path + cmd := exec.Command("go", "list", "-m", "-json", "github.com/databricks/databricks-sdk-go") + output, err := cmd.Output() + if err != nil { + return "", "", fmt.Errorf("failed to find SDK: %w", err) + } + + var modInfo struct { + Dir string `json:"Dir"` + Version string `json:"Version"` + } + if err := json.Unmarshal(output, &modInfo); err != nil { + return "", "", fmt.Errorf("failed to parse module info: %w", err) + } + + return modInfo.Dir, modInfo.Version, nil +} + func loadAnnotations(path string) (AnnotationsFile, error) { data, err := os.ReadFile(path) if err != nil { @@ -168,64 +221,449 @@ func loadAnnotations(path string) (AnnotationsFile, error) { return annotations, nil } -func generateIndex(annotations AnnotationsFile) *SDKDocsIndex { +func generateIndexFromSDK(sdkPath, sdkVersion string, annotations AnnotationsFile) (*SDKDocsIndex, error) { index := &SDKDocsIndex{ Version: "1.0", GeneratedAt: time.Now().UTC().Format(time.RFC3339), + SDKVersion: sdkVersion, Services: make(map[string]*ServiceDoc), Types: make(map[string]*TypeDoc), Enums: make(map[string]*EnumDoc), } - // Extract types from annotations - for fullTypeName, fields := range annotations { - typeName := extractTypeName(fullTypeName) - packageName := extractPackageName(fullTypeName) + servicePath := filepath.Join(sdkPath, "service") + entries, err := os.ReadDir(servicePath) + if err != nil { + return nil, fmt.Errorf("failed to read service directory: %w", err) + } - if typeName == "" { + for _, entry := range entries { + if !entry.IsDir() { continue } - typeDoc := &TypeDoc{ - Name: typeName, - Package: packageName, - Description: inferTypeDescription(typeName), - Fields: make(map[string]*FieldDoc), + pkgName := entry.Name() + + // Skip non-priority services to keep index manageable + if !priorityServices[pkgName] { + continue } - for fieldName, annotation := range fields { - if fieldName == "_" { - // Type-level description - if annotation.Description != "" { - typeDoc.Description = annotation.Description + pkgPath := filepath.Join(servicePath, pkgName) + + // Parse the package + serviceDoc, types, enums, err := parseServicePackage(pkgPath, pkgName) + if err != nil { + fmt.Fprintf(os.Stderr, "Warning: Failed to parse %s: %v\n", pkgName, err) + continue + } + + if serviceDoc != nil && len(serviceDoc.Methods) > 0 { + index.Services[pkgName] = serviceDoc + } + + // Add types + for typeName, typeDoc := range types { + index.Types[pkgName+"."+typeName] = typeDoc + } + + // Add enums + for enumName, enumDoc := range enums { + index.Enums[pkgName+"."+enumName] = enumDoc + } + } + + // Enrich with annotations + enrichWithAnnotations(index, annotations) + + return index, nil +} + +func parseServicePackage(pkgPath, pkgName string) (*ServiceDoc, map[string]*TypeDoc, map[string]*EnumDoc, error) { + fset := token.NewFileSet() + + // Parse all Go files in the package + pkgs, err := parser.ParseDir(fset, pkgPath, func(fi os.FileInfo) bool { + // Skip test files + return !strings.HasSuffix(fi.Name(), "_test.go") + }, parser.ParseComments) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to parse package: %w", err) + } + + var serviceDoc *ServiceDoc + types := make(map[string]*TypeDoc) + enums := make(map[string]*EnumDoc) + + for _, pkg := range pkgs { + // Create doc package for better comment extraction + docPkg := doc.New(pkg, pkgPath, doc.AllDecls) + + // Find service interface + for _, typ := range docPkg.Types { + if strings.HasSuffix(typ.Name, "Service") && !strings.HasSuffix(typ.Name, "Interface") { + // This is a service interface + if serviceDoc == nil { + serviceDoc = parseServiceInterface(typ, pkgName, fset, pkg) + } + } else if isEnumType(typ, pkg) { + // This is an enum + enumDoc := parseEnumType(typ, pkgName, pkg) + if enumDoc != nil && len(enumDoc.Values) > 0 { + enums[typ.Name] = enumDoc + } + } else if isStructType(typ, pkg) { + // This is a struct type + typeDoc := parseStructType(typ, pkgName, fset, pkg) + if typeDoc != nil { + types[typ.Name] = typeDoc } + } + } + } + + return serviceDoc, types, enums, nil +} + +func parseServiceInterface(typ *doc.Type, pkgName string, fset *token.FileSet, pkg *ast.Package) *ServiceDoc { + serviceDoc := &ServiceDoc{ + Name: strings.TrimSuffix(typ.Name, "Service"), + Description: cleanDescription(typ.Doc), + Package: fmt.Sprintf("github.com/databricks/databricks-sdk-go/service/%s", pkgName), + Methods: make(map[string]*MethodDoc), + } + + // Find the interface declaration + for _, file := range pkg.Files { + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { continue } - fieldDoc := &FieldDoc{ - Name: fieldName, - Type: inferFieldType(fieldName), - Description: annotation.Description, - OutputOnly: annotation.OutputOnly == "true", - Deprecated: annotation.DeprecationMessage != "", + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != typ.Name { + continue + } + + ifaceType, ok := typeSpec.Type.(*ast.InterfaceType) + if !ok { + continue + } + + // Parse methods + for _, method := range ifaceType.Methods.List { + if len(method.Names) == 0 { + continue + } + + methodName := method.Names[0].Name + funcType, ok := method.Type.(*ast.FuncType) + if !ok { + continue + } + + methodDoc := parseMethod(methodName, funcType, method.Doc, fset) + if methodDoc != nil { + serviceDoc.Methods[methodName] = methodDoc + } + } + } + } + } + + return serviceDoc +} + +func parseMethod(name string, funcType *ast.FuncType, doc *ast.CommentGroup, fset *token.FileSet) *MethodDoc { + methodDoc := &MethodDoc{ + Name: name, + Description: cleanDescription(extractCommentText(doc)), + Parameters: []ParamDoc{}, + } + + // Build signature + var sig bytes.Buffer + sig.WriteString(name) + sig.WriteString("(") + + // Parse parameters + if funcType.Params != nil { + params := []string{} + for _, field := range funcType.Params.List { + typeStr := typeToString(field.Type, fset) + + for _, name := range field.Names { + params = append(params, fmt.Sprintf("%s %s", name.Name, typeStr)) + + // Skip context parameter + if name.Name == "ctx" { + continue + } + + methodDoc.Parameters = append(methodDoc.Parameters, ParamDoc{ + Name: name.Name, + Type: typeStr, + Required: true, + }) + } + + // Handle unnamed parameters + if len(field.Names) == 0 { + params = append(params, typeStr) } - typeDoc.Fields[fieldName] = fieldDoc + } + sig.WriteString(strings.Join(params, ", ")) + } + sig.WriteString(")") + + // Parse return type + if funcType.Results != nil && len(funcType.Results.List) > 0 { + returns := []string{} + for _, field := range funcType.Results.List { + typeStr := typeToString(field.Type, fset) + returns = append(returns, typeStr) + } + + if len(returns) == 1 { + sig.WriteString(" ") + sig.WriteString(returns[0]) + if returns[0] != "error" { + methodDoc.Returns = &ReturnDoc{Type: returns[0]} + } + } else { + sig.WriteString(" (") + sig.WriteString(strings.Join(returns, ", ")) + sig.WriteString(")") + // Find non-error return type + for _, ret := range returns { + if ret != "error" { + methodDoc.Returns = &ReturnDoc{Type: ret} + break + } + } + } + } + + methodDoc.Signature = sig.String() + return methodDoc +} + +func isEnumType(typ *doc.Type, pkg *ast.Package) bool { + // Check if type has constants defined (enum pattern) + return len(typ.Consts) > 0 +} + +func isStructType(typ *doc.Type, pkg *ast.Package) bool { + // Check if this is a struct type + for _, file := range pkg.Files { + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != typ.Name { + continue + } + + _, isStruct := typeSpec.Type.(*ast.StructType) + return isStruct + } + } + } + return false +} + +func parseEnumType(typ *doc.Type, pkgName string, pkg *ast.Package) *EnumDoc { + enumDoc := &EnumDoc{ + Name: typ.Name, + Package: pkgName, + Description: cleanDescription(typ.Doc), + Values: []string{}, + } + + // Extract enum values from constants + for _, c := range typ.Consts { + for _, name := range c.Names { + // Extract the value part after the type prefix + value := strings.TrimPrefix(name, typ.Name) + if value != name && value != "" { + enumDoc.Values = append(enumDoc.Values, value) + } + } + } + + return enumDoc +} + +func parseStructType(typ *doc.Type, pkgName string, fset *token.FileSet, pkg *ast.Package) *TypeDoc { + typeDoc := &TypeDoc{ + Name: typ.Name, + Package: pkgName, + Description: cleanDescription(typ.Doc), + Fields: make(map[string]*FieldDoc), + } + + // Find the struct declaration + for _, file := range pkg.Files { + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != typ.Name { + continue + } + + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + continue + } + + // Parse fields + for _, field := range structType.Fields.List { + if len(field.Names) == 0 { + continue + } + + fieldName := field.Names[0].Name + + // Skip internal fields + if fieldName == "ForceSendFields" { + continue + } + + // Get JSON name from tag + jsonName := fieldName + if field.Tag != nil { + jsonName = extractJSONName(field.Tag.Value) + if jsonName == "" || jsonName == "-" { + continue + } + } + + fieldDoc := &FieldDoc{ + Name: jsonName, + Type: typeToString(field.Type, fset), + Description: cleanDescription(extractCommentText(field.Doc)), + } + + typeDoc.Fields[jsonName] = fieldDoc + } + } + } + } + + // Only return if we have fields + if len(typeDoc.Fields) == 0 { + return nil + } + + return typeDoc +} + +func typeToString(expr ast.Expr, fset *token.FileSet) string { + var buf bytes.Buffer + printer.Fprint(&buf, fset, expr) + return buf.String() +} + +func extractCommentText(cg *ast.CommentGroup) string { + if cg == nil { + return "" + } + return cg.Text() +} + +func extractJSONName(tag string) string { + // Parse struct tag to get JSON name + // Tag format: `json:"name,omitempty"` + re := regexp.MustCompile(`json:"([^",]+)`) + matches := re.FindStringSubmatch(tag) + if len(matches) > 1 { + return matches[1] + } + return "" +} + +func cleanDescription(s string) string { + // Clean up description text + s = strings.TrimSpace(s) + + // Remove "Deprecated:" notices for cleaner output + if idx := strings.Index(s, "\n\nDeprecated:"); idx > 0 { + s = s[:idx] + } + + // Truncate very long descriptions + if len(s) > 500 { + s = s[:497] + "..." + } + + return s +} + +func enrichWithAnnotations(index *SDKDocsIndex, annotations AnnotationsFile) { + // Add type descriptions from annotations + for fullTypeName, fields := range annotations { + typeName := extractTypeName(fullTypeName) + packageName := extractPackageName(fullTypeName) + + if typeName == "" { + continue } - // Determine the service this type belongs to service := inferServiceFromPackage(packageName) typePath := service + "." + typeName - index.Types[typePath] = typeDoc - } - // Add well-known services with common methods - addCoreServices(index) + // Check if type already exists + if existing, ok := index.Types[typePath]; ok { + // Enrich existing type with annotation descriptions + for fieldName, annotation := range fields { + if fieldName == "_" && annotation.Description != "" { + existing.Description = annotation.Description + continue + } + if field, ok := existing.Fields[fieldName]; ok && field.Description == "" { + field.Description = annotation.Description + } + } + } else { + // Create new type from annotations + typeDoc := &TypeDoc{ + Name: typeName, + Package: packageName, + Description: "", + Fields: make(map[string]*FieldDoc), + } - return index + for fieldName, annotation := range fields { + if fieldName == "_" { + typeDoc.Description = annotation.Description + continue + } + typeDoc.Fields[fieldName] = &FieldDoc{ + Name: fieldName, + Type: "any", + Description: annotation.Description, + } + } + + if len(typeDoc.Fields) > 0 || typeDoc.Description != "" { + index.Types[typePath] = typeDoc + } + } + } } func extractTypeName(fullPath string) string { - // Extract type name from paths like "github.com/databricks/cli/bundle/config/resources.Alert" parts := strings.Split(fullPath, ".") if len(parts) > 0 { return parts[len(parts)-1] @@ -234,7 +672,6 @@ func extractTypeName(fullPath string) string { } func extractPackageName(fullPath string) string { - // Extract package from paths like "github.com/databricks/cli/bundle/config/resources.Alert" parts := strings.Split(fullPath, "/") if len(parts) > 0 { lastPart := parts[len(parts)-1] @@ -247,7 +684,6 @@ func extractPackageName(fullPath string) string { } func inferServiceFromPackage(packageName string) string { - // Map package names to service names serviceMap := map[string]string{ "resources": "bundle", "jobs": "jobs", @@ -261,9 +697,7 @@ func inferServiceFromPackage(packageName string) string { "ml": "ml", "workspace": "workspace", "iam": "iam", - "settings": "settings", "files": "files", - "sharing": "sharing", } if service, ok := serviceMap[packageName]; ok { @@ -272,365 +706,14 @@ func inferServiceFromPackage(packageName string) string { return packageName } -func inferTypeDescription(typeName string) string { - // Generate reasonable descriptions based on type name patterns - if strings.HasSuffix(typeName, "Request") { - base := strings.TrimSuffix(typeName, "Request") - return fmt.Sprintf("Request parameters for %s operation.", toSentenceCase(base)) - } - if strings.HasSuffix(typeName, "Response") { - base := strings.TrimSuffix(typeName, "Response") - return fmt.Sprintf("Response from %s operation.", toSentenceCase(base)) - } - if strings.HasSuffix(typeName, "Settings") { - base := strings.TrimSuffix(typeName, "Settings") - return fmt.Sprintf("Configuration settings for %s.", toSentenceCase(base)) - } - if strings.HasSuffix(typeName, "Spec") { - base := strings.TrimSuffix(typeName, "Spec") - return fmt.Sprintf("Specification for %s.", toSentenceCase(base)) - } - return fmt.Sprintf("%s configuration.", toSentenceCase(typeName)) -} - -func inferFieldType(fieldName string) string { - // Infer type from common field name patterns - patterns := map[*regexp.Regexp]string{ - regexp.MustCompile(`(?i)_id$`): "string", - regexp.MustCompile(`(?i)_ids$`): "[]string", - regexp.MustCompile(`(?i)_time$`): "string (timestamp)", - regexp.MustCompile(`(?i)_at$`): "string (timestamp)", - regexp.MustCompile(`(?i)^is_`): "bool", - regexp.MustCompile(`(?i)^has_`): "bool", - regexp.MustCompile(`(?i)^enable`): "bool", - regexp.MustCompile(`(?i)_enabled$`): "bool", - regexp.MustCompile(`(?i)_count$`): "int", - regexp.MustCompile(`(?i)_size$`): "int", - regexp.MustCompile(`(?i)_minutes$`): "int", - regexp.MustCompile(`(?i)_seconds$`): "int", - regexp.MustCompile(`(?i)_name$`): "string", - regexp.MustCompile(`(?i)_path$`): "string", - regexp.MustCompile(`(?i)_url$`): "string", - regexp.MustCompile(`(?i)description`): "string", - regexp.MustCompile(`(?i)tags$`): "map[string]string", - } - - for pattern, typeName := range patterns { - if pattern.MatchString(fieldName) { - return typeName - } - } - - return "any" -} - -func toSentenceCase(s string) string { - // Convert CamelCase to sentence case - var result strings.Builder - for i, r := range s { - if i > 0 && r >= 'A' && r <= 'Z' { - result.WriteRune(' ') - } - result.WriteRune(r) - } - return strings.ToLower(result.String()) -} - -func addCoreServices(index *SDKDocsIndex) { - // Jobs service - index.Services["jobs"] = &ServiceDoc{ - Name: "Jobs", - Description: "The Jobs API allows you to create, edit, and delete jobs. Jobs are the primary unit of scheduled execution in Databricks.", - Package: "github.com/databricks/databricks-sdk-go/service/jobs", - Methods: map[string]*MethodDoc{ - "Create": { - Name: "Create", - Description: "Create a new job.", - Signature: "Create(ctx context.Context, request CreateJob) (*CreateResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "CreateJob", Description: "Job creation parameters including name, tasks, and schedule", Required: true}, - }, - Returns: &ReturnDoc{Type: "*CreateResponse", Description: "Contains the job_id of the created job"}, - Example: "resp, err := w.Jobs.Create(ctx, jobs.CreateJob{\n Name: \"my-job\",\n Tasks: []jobs.Task{{TaskKey: \"main\", ...}},\n})", - }, - "List": { - Name: "List", - Description: "Retrieves a list of jobs.", - Signature: "List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]", - Parameters: []ParamDoc{ - {Name: "request", Type: "ListJobsRequest", Description: "Filter and pagination parameters", Required: false}, - }, - Returns: &ReturnDoc{Type: "listing.Iterator[BaseJob]", Description: "Iterator over jobs matching the filter"}, - }, - "Get": { - Name: "Get", - Description: "Retrieves the details for a single job.", - Signature: "Get(ctx context.Context, request GetJobRequest) (*Job, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "GetJobRequest", Description: "Contains job_id to retrieve", Required: true}, - }, - Returns: &ReturnDoc{Type: "*Job", Description: "Full job details including settings and run history"}, - }, - "Delete": { - Name: "Delete", - Description: "Deletes a job.", - Signature: "Delete(ctx context.Context, request DeleteJob) error", - Parameters: []ParamDoc{ - {Name: "request", Type: "DeleteJob", Description: "Contains job_id to delete", Required: true}, - }, - }, - "RunNow": { - Name: "RunNow", - Description: "Triggers an immediate run of a job.", - Signature: "RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "RunNow", Description: "Job ID and optional parameters for the run", Required: true}, - }, - Returns: &ReturnDoc{Type: "*RunNowResponse", Description: "Contains run_id of the triggered run"}, - }, - }, - } - - // Clusters/Compute service - index.Services["compute"] = &ServiceDoc{ - Name: "Clusters", - Description: "The Clusters API allows you to create, start, edit, and terminate clusters. Clusters are managed cloud resources for running Spark workloads.", - Package: "github.com/databricks/databricks-sdk-go/service/compute", - Methods: map[string]*MethodDoc{ - "Create": { - Name: "Create", - Description: "Create a new Spark cluster.", - Signature: "Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "CreateCluster", Description: "Cluster configuration including node types, autoscaling, and Spark version", Required: true}, - }, - Returns: &ReturnDoc{Type: "*CreateClusterResponse", Description: "Contains cluster_id of the created cluster"}, - }, - "List": { - Name: "List", - Description: "Returns information about all clusters.", - Signature: "List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails]", - Returns: &ReturnDoc{Type: "listing.Iterator[ClusterDetails]", Description: "Iterator over cluster details"}, - }, - "Get": { - Name: "Get", - Description: "Retrieves the information for a cluster given its identifier.", - Signature: "Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "GetClusterRequest", Description: "Contains cluster_id", Required: true}, - }, - Returns: &ReturnDoc{Type: "*ClusterDetails", Description: "Full cluster configuration and state"}, - }, - "Start": { - Name: "Start", - Description: "Starts a terminated cluster.", - Signature: "Start(ctx context.Context, request StartCluster) error", - Parameters: []ParamDoc{ - {Name: "request", Type: "StartCluster", Description: "Contains cluster_id to start", Required: true}, - }, - }, - "Delete": { - Name: "Delete", - Description: "Permanently deletes a Spark cluster.", - Signature: "Delete(ctx context.Context, request DeleteCluster) error", - Parameters: []ParamDoc{ - {Name: "request", Type: "DeleteCluster", Description: "Contains cluster_id to delete", Required: true}, - }, - }, - }, - } - - // Pipelines service - index.Services["pipelines"] = &ServiceDoc{ - Name: "Pipelines", - Description: "The Delta Live Tables API allows you to create, edit, and run pipelines for data transformation and ingestion.", - Package: "github.com/databricks/databricks-sdk-go/service/pipelines", - Methods: map[string]*MethodDoc{ - "Create": { - Name: "Create", - Description: "Creates a new data processing pipeline.", - Signature: "Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "CreatePipeline", Description: "Pipeline configuration including clusters, libraries, and target", Required: true}, - }, - Returns: &ReturnDoc{Type: "*CreatePipelineResponse", Description: "Contains pipeline_id of the created pipeline"}, - }, - "List": { - Name: "List", - Description: "Lists pipelines defined in the workspace.", - Signature: "List(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]", - Returns: &ReturnDoc{Type: "listing.Iterator[PipelineStateInfo]", Description: "Iterator over pipeline info"}, - }, - "StartUpdate": { - Name: "StartUpdate", - Description: "Starts a new update for the pipeline.", - Signature: "StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "StartUpdate", Description: "Pipeline ID and update options", Required: true}, - }, - Returns: &ReturnDoc{Type: "*StartUpdateResponse", Description: "Contains update_id of the started update"}, - }, - }, - } - - // Catalog service - index.Services["catalog"] = &ServiceDoc{ - Name: "Catalog", - Description: "Unity Catalog APIs for managing catalogs, schemas, tables, and other data assets.", - Package: "github.com/databricks/databricks-sdk-go/service/catalog", - Methods: map[string]*MethodDoc{ - "ListCatalogs": { - Name: "ListCatalogs", - Description: "Lists all catalogs in the metastore.", - Signature: "List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo]", - Returns: &ReturnDoc{Type: "listing.Iterator[CatalogInfo]", Description: "Iterator over catalog information"}, - }, - "ListSchemas": { - Name: "ListSchemas", - Description: "Lists all schemas in a catalog.", - Signature: "List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo]", - Parameters: []ParamDoc{ - {Name: "request", Type: "ListSchemasRequest", Description: "Contains catalog_name to list schemas from", Required: true}, - }, - Returns: &ReturnDoc{Type: "listing.Iterator[SchemaInfo]", Description: "Iterator over schema information"}, - }, - "ListTables": { - Name: "ListTables", - Description: "Lists all tables in a schema.", - Signature: "List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo]", - Parameters: []ParamDoc{ - {Name: "request", Type: "ListTablesRequest", Description: "Contains catalog_name and schema_name", Required: true}, - }, - Returns: &ReturnDoc{Type: "listing.Iterator[TableInfo]", Description: "Iterator over table information"}, - }, - }, - } - - // Apps service - index.Services["apps"] = &ServiceDoc{ - Name: "Apps", - Description: "Databricks Apps API for deploying and managing web applications on Databricks.", - Package: "github.com/databricks/databricks-sdk-go/service/apps", - Methods: map[string]*MethodDoc{ - "Create": { - Name: "Create", - Description: "Creates a new app.", - Signature: "Create(ctx context.Context, request CreateAppRequest) (*App, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "CreateAppRequest", Description: "App configuration including name and description", Required: true}, - }, - Returns: &ReturnDoc{Type: "*App", Description: "The created app details"}, - }, - "Deploy": { - Name: "Deploy", - Description: "Deploys an app to Databricks Apps.", - Signature: "Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "CreateAppDeploymentRequest", Description: "Deployment configuration", Required: true}, - }, - Returns: &ReturnDoc{Type: "*AppDeployment", Description: "Deployment status and details"}, - }, - "List": { - Name: "List", - Description: "Lists all apps in the workspace.", - Signature: "List(ctx context.Context, request ListAppsRequest) listing.Iterator[App]", - Returns: &ReturnDoc{Type: "listing.Iterator[App]", Description: "Iterator over apps"}, - }, - }, - } - - // SQL service - index.Services["sql"] = &ServiceDoc{ - Name: "SQL", - Description: "Databricks SQL APIs for managing warehouses, queries, and dashboards.", - Package: "github.com/databricks/databricks-sdk-go/service/sql", - Methods: map[string]*MethodDoc{ - "ExecuteStatement": { - Name: "ExecuteStatement", - Description: "Execute a SQL statement and return results.", - Signature: "ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "ExecuteStatementRequest", Description: "SQL statement, warehouse ID, and execution options", Required: true}, - }, - Returns: &ReturnDoc{Type: "*ExecuteStatementResponse", Description: "Query results or statement ID for async execution"}, - }, - "ListWarehouses": { - Name: "ListWarehouses", - Description: "Lists all SQL warehouses.", - Signature: "List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo]", - Returns: &ReturnDoc{Type: "listing.Iterator[EndpointInfo]", Description: "Iterator over warehouse information"}, - }, - }, - } - - // Workspace service - index.Services["workspace"] = &ServiceDoc{ - Name: "Workspace", - Description: "Workspace API for managing notebooks, folders, and other workspace objects.", - Package: "github.com/databricks/databricks-sdk-go/service/workspace", - Methods: map[string]*MethodDoc{ - "List": { - Name: "List", - Description: "Lists the contents of a directory.", - Signature: "List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo]", - Parameters: []ParamDoc{ - {Name: "request", Type: "ListWorkspaceRequest", Description: "Contains path to list", Required: true}, - }, - Returns: &ReturnDoc{Type: "listing.Iterator[ObjectInfo]", Description: "Iterator over workspace objects"}, - }, - "GetStatus": { - Name: "GetStatus", - Description: "Gets the status of a workspace object.", - Signature: "GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error)", - Parameters: []ParamDoc{ - {Name: "request", Type: "GetStatusRequest", Description: "Contains path to get status for", Required: true}, - }, - Returns: &ReturnDoc{Type: "*ObjectInfo", Description: "Object information including type and path"}, - }, - "Import": { - Name: "Import", - Description: "Imports a notebook or file into the workspace.", - Signature: "Import(ctx context.Context, request Import) error", - Parameters: []ParamDoc{ - {Name: "request", Type: "Import", Description: "Path, content, and format of the object to import", Required: true}, - }, - }, - }, - } - - // Add some common enums - index.Enums["jobs.RunLifeCycleState"] = &EnumDoc{ - Name: "RunLifeCycleState", - Package: "jobs", - Description: "The current state of the run lifecycle.", - Values: []string{"PENDING", "RUNNING", "TERMINATING", "TERMINATED", "SKIPPED", "INTERNAL_ERROR"}, - } - - index.Enums["compute.State"] = &EnumDoc{ - Name: "State", - Package: "compute", - Description: "The state of a cluster.", - Values: []string{"PENDING", "RUNNING", "RESTARTING", "RESIZING", "TERMINATING", "TERMINATED", "ERROR", "UNKNOWN"}, - } - - index.Enums["pipelines.PipelineState"] = &EnumDoc{ - Name: "PipelineState", - Package: "pipelines", - Description: "The state of a pipeline.", - Values: []string{"IDLE", "RUNNING", "STARTING", "STOPPING", "DELETED", "RECOVERING", "FAILED", "RESETTING"}, - } -} - func writeIndex(index *SDKDocsIndex, path string) error { - // Ensure directory exists if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return fmt.Errorf("failed to create output directory: %w", err) } - // Sort maps for deterministic output + // Sort for deterministic output sortIndex(index) - // Marshal with indentation for readability data, err := json.MarshalIndent(index, "", " ") if err != nil { return fmt.Errorf("failed to marshal index: %w", err) @@ -644,20 +727,6 @@ func writeIndex(index *SDKDocsIndex, path string) error { } func sortIndex(index *SDKDocsIndex) { - // Sort service methods - for _, service := range index.Services { - // Methods are already in a map, which will be sorted by JSON marshaling - _ = service - } - - // Sort type fields - for _, typeDoc := range index.Types { - // Sort fields by converting to sorted slice would require changing structure - // For now, rely on JSON marshaling order - _ = typeDoc - } - - // Sort enum values for _, enumDoc := range index.Enums { sort.Strings(enumDoc.Values) } diff --git a/tools/verify_sdk_docs_index.py b/tools/verify_sdk_docs_index.py new file mode 100755 index 0000000000..d8eec32b97 --- /dev/null +++ b/tools/verify_sdk_docs_index.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +""" +Verify that sdk_docs_index.json is up to date. + +Usage: + python3 tools/verify_sdk_docs_index.py + +This script regenerates the SDK docs index and compares it with the committed +version to detect if someone forgot to run `make sdk-docs-index` after +changing the SDK version. + +Exit codes: + 0 - Index is up to date + 1 - Index is out of date (needs regeneration) + 2 - Error during verification +""" + +import json +import subprocess +import sys +import tempfile +from pathlib import Path + + +def main(): + script_dir = Path(__file__).parent + project_root = script_dir.parent + index_path = project_root / "experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json" + + if not index_path.exists(): + print(f"ERROR: SDK docs index not found at {index_path}") + sys.exit(2) + + # Read current index + with open(index_path) as f: + current_index = json.load(f) + + # Regenerate index to temp directory + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_path = Path(tmp_dir) + + result = subprocess.run( + ["go", "run", "./tools/gen_sdk_docs_index.go", "-output", str(tmp_path) + "/"], + cwd=project_root, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + print(f"ERROR: Failed to regenerate SDK docs index") + print(result.stderr) + sys.exit(2) + + # The generator creates the file with a fixed name + generated_path = tmp_path / "sdk_docs_index.json" + if not generated_path.exists(): + print(f"ERROR: Generated index not found at {generated_path}") + print(f"Generator output: {result.stdout}") + sys.exit(2) + + with open(generated_path) as f: + new_index = json.load(f) + + # Compare indexes (ignoring generated_at timestamp) + current_comparable = {k: v for k, v in current_index.items() if k != "generated_at"} + new_comparable = {k: v for k, v in new_index.items() if k != "generated_at"} + + if current_comparable == new_comparable: + print("SDK docs index is up to date.") + sys.exit(0) + + # Find differences + print("SDK docs index is OUT OF DATE!") + print("") + + if current_index.get("sdk_version") != new_index.get("sdk_version"): + print(f" SDK version changed: {current_index.get('sdk_version')} -> {new_index.get('sdk_version')}") + + current_services = set(current_index.get("services", {}).keys()) + new_services = set(new_index.get("services", {}).keys()) + if current_services != new_services: + added = new_services - current_services + removed = current_services - new_services + if added: + print(f" Services added: {added}") + if removed: + print(f" Services removed: {removed}") + + current_types = len(current_index.get("types", {})) + new_types = len(new_index.get("types", {})) + if current_types != new_types: + print(f" Types count changed: {current_types} -> {new_types}") + + current_enums = len(current_index.get("enums", {})) + new_enums = len(new_index.get("enums", {})) + if current_enums != new_enums: + print(f" Enums count changed: {current_enums} -> {new_enums}") + + print("") + print("Run `make sdk-docs-index` to update the index.") + sys.exit(1) + + +if __name__ == "__main__": + main() From 2851e3791593f62e9f110819e6dbf69e1276b4f3 Mon Sep 17 00:00:00 2001 From: Evgenii Kniazev Date: Tue, 20 Jan 2026 13:53:12 +0000 Subject: [PATCH 4/4] Add CI/CD automation for SDK docs index This commit adds GitHub Actions workflows to automatically manage the SDK documentation index: 1. check.yml: Added verification step to fail PRs with stale index - Runs `make verify-sdk-docs-index` on every PR 2. update-sdk-docs.yml: New workflow for automatic updates - Triggers on: manual dispatch, daily schedule, go.mod changes - Auto-commits to main when SDK version changes via push - Creates PR for scheduled/manual triggers if changes detected - Includes SDK version in commit messages This ensures the SDK docs index stays up to date when: - Dependabot bumps the SDK version - Manual SDK updates are made - Daily scheduled checks detect drift Co-Authored-By: Claude Opus 4.5 --- .github/workflows/check.yml | 3 + .github/workflows/update-sdk-docs.yml | 114 ++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 .github/workflows/update-sdk-docs.yml diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 200de3ea3b..3e34043a28 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -55,3 +55,6 @@ jobs: run: |- make checks git diff --exit-code + + - name: Verify SDK docs index is up to date + run: make verify-sdk-docs-index diff --git a/.github/workflows/update-sdk-docs.yml b/.github/workflows/update-sdk-docs.yml new file mode 100644 index 0000000000..58c27b7bd2 --- /dev/null +++ b/.github/workflows/update-sdk-docs.yml @@ -0,0 +1,114 @@ +name: update-sdk-docs + +# This workflow automatically updates the SDK docs index when: +# 1. Manually triggered +# 2. On a schedule (daily) to catch SDK updates +# 3. When go.mod changes (SDK version bump) + +on: + workflow_dispatch: + inputs: + create_pr: + description: 'Create a PR with the changes' + required: false + default: 'true' + type: boolean + + schedule: + # Run daily at 6 AM UTC to check for SDK updates + - cron: '0 6 * * *' + + push: + branches: + - main + paths: + - 'go.mod' + - 'go.sum' + +jobs: + update-sdk-docs: + runs-on: ubuntu-latest + + # Don't run on forks + if: github.repository == 'databricks/cli' + + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + # Use a token that can trigger workflows + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Go + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + with: + go-version-file: go.mod + + - name: Get current SDK version + id: sdk-version + run: | + SDK_VERSION=$(go list -m -json github.com/databricks/databricks-sdk-go | jq -r '.Version') + echo "version=$SDK_VERSION" >> $GITHUB_OUTPUT + echo "Current SDK version: $SDK_VERSION" + + - name: Regenerate SDK docs index + run: make sdk-docs-index + + - name: Check for changes + id: changes + run: | + if git diff --quiet experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json; then + echo "changed=false" >> $GITHUB_OUTPUT + echo "No changes detected in SDK docs index" + else + echo "changed=true" >> $GITHUB_OUTPUT + echo "Changes detected in SDK docs index" + git diff --stat experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json + fi + + - name: Commit and push changes (direct to main) + if: steps.changes.outputs.changed == 'true' && github.event_name == 'push' + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json + git commit -m "Auto-update SDK docs index for ${{ steps.sdk-version.outputs.version }}" + git push + + - name: Create Pull Request + if: steps.changes.outputs.changed == 'true' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: "Auto-update SDK docs index for ${{ steps.sdk-version.outputs.version }}" + title: "Auto-update SDK docs index for SDK ${{ steps.sdk-version.outputs.version }}" + body: | + This PR was automatically generated to update the SDK documentation index. + + **SDK Version:** ${{ steps.sdk-version.outputs.version }} + + The index was regenerated because: + - ${{ github.event_name == 'schedule' && 'Scheduled daily check detected changes' || 'Manual workflow trigger' }} + + ## What changed + The SDK docs index (`experimental/aitools/lib/providers/sdkdocs/sdk_docs_index.json`) + was regenerated from the current Go SDK to include any new or updated: + - Service methods and signatures + - Type definitions + - Enum values + + ## Verification + - [ ] CI checks pass + - [ ] Index contains expected services + + --- + *This PR was auto-generated by the update-sdk-docs workflow.* + branch: auto-update-sdk-docs + delete-branch: true + labels: | + automated + sdk-docs