From c7848222eff47072e4ab085f68ca9b41999c8cc6 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 08:36:25 +0100 Subject: [PATCH 1/7] Upgrade Go SDK to v0.109.0 Update the CLI dependency and checksums so this branch validates against the latest SDK release. Co-authored-by: Cursor --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 36b941dc87..250323afe1 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 // MIT github.com/charmbracelet/huh v0.8.0 github.com/charmbracelet/lipgloss v1.1.0 // MIT - github.com/databricks/databricks-sdk-go v0.106.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.109.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 072ec32235..7a5ef71b63 100644 --- a/go.sum +++ b/go.sum @@ -75,8 +75,8 @@ github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.106.0 h1:hSignqC1MWuC3w3VsXZpkOki5yfRCufZOESv79XMGxo= -github.com/databricks/databricks-sdk-go v0.106.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= +github.com/databricks/databricks-sdk-go v0.109.0 h1:e2Xx0igIpHHrtn5hO69Rrv4yEG58fiztOCHbpvzLe8c= +github.com/databricks/databricks-sdk-go v0.109.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From bbddf21cf07ece29b7eb72f33e02ce94104f10ee Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 11:12:26 +0100 Subject: [PATCH 2/7] Upgrade Go SDK to v0.110.0 Move the CLI SDK dependency from v0.109.0 to v0.110.0 and refresh module checksums accordingly. Co-authored-by: Cursor --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 250323afe1..1283bb3018 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 // MIT github.com/charmbracelet/huh v0.8.0 github.com/charmbracelet/lipgloss v1.1.0 // MIT - github.com/databricks/databricks-sdk-go v0.109.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.110.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 7a5ef71b63..1dae205541 100644 --- a/go.sum +++ b/go.sum @@ -75,8 +75,8 @@ github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.109.0 h1:e2Xx0igIpHHrtn5hO69Rrv4yEG58fiztOCHbpvzLe8c= -github.com/databricks/databricks-sdk-go v0.109.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= +github.com/databricks/databricks-sdk-go v0.110.0 h1:npiIFyXkRfRrgLBoUVwP9ZgePmjtPuwVQmMt3Bd72M0= +github.com/databricks/databricks-sdk-go v0.110.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From b48d33d2ffd36ac0aebb5b09b754de9084f196b5 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 12:11:24 +0100 Subject: [PATCH 3/7] Regenerate schema artifacts for SDK v0.110.0 Update the OpenAPI SHA and regenerate schema annotations/jsonschema for new SDK fields, and extend postgres direct-resource known remote gaps for newly added flattened project fields. Co-authored-by: Cursor --- .codegen/_openapi_sha | 2 +- bundle/direct/dresources/type_test.go | 2 + bundle/internal/schema/annotations.yml | 6 + .../internal/schema/annotations_openapi.yml | 264 +++++++++++++----- .../schema/annotations_openapi_overrides.yml | 182 ++++++------ bundle/schema/jsonschema.json | 125 ++++++++- 6 files changed, 414 insertions(+), 167 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3cb3b7da03..d1f13600ac 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -69a174b6c47c5e1039a5f14271440c10e33998ce \ No newline at end of file +281b4455821119945fcc4c850cf2cfad03e23c6c \ No newline at end of file diff --git a/bundle/direct/dresources/type_test.go b/bundle/direct/dresources/type_test.go index 643f27423e..e0806f1233 100644 --- a/bundle/direct/dresources/type_test.go +++ b/bundle/direct/dresources/type_test.go @@ -64,6 +64,8 @@ var knownMissingInRemoteType = map[string][]string{ "suspend_timeout_duration", }, "postgres_projects": { + "budget_policy_id", + "custom_tags", "default_endpoint_settings", "display_name", "history_retention_duration", diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 7c8e98d337..a407be9260 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -924,9 +924,15 @@ github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.PostgresProject: + "budget_policy_id": + "description": |- + PLACEHOLDER "create_time": "description": |- PLACEHOLDER + "custom_tags": + "description": |- + PLACEHOLDER "default_endpoint_settings": "description": |- PLACEHOLDER diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 231b18d8fd..aa80afab1f 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -150,6 +150,11 @@ github.com/databricks/cli/bundle/config/resources.App: "service_principal_name": "x-databricks-field-behaviors_output_only": |- true + "space": + "description": |- + Name of the space this app belongs to. + "x-databricks-preview": |- + PRIVATE "update_time": "description": |- The update time of the app. Formatted timestamp in ISO 6801. @@ -548,6 +553,43 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "usage_policy_id": "description": |- The desired usage policy to associate with the instance. +github.com/databricks/cli/bundle/config/resources.ExternalLocation: + "comment": + "description": |- + User-provided free-form text description. + "credential_name": + "description": |- + Name of the storage credential used with this location. + "effective_enable_file_events": + "description": |- + The effective value of `enable_file_events` after applying server-side defaults. + "x-databricks-field-behaviors_output_only": |- + true + "enable_file_events": + "description": |- + Whether to enable file events on this external location. Default to `true`. Set to `false` to disable file events. + The actual applied value may differ due to server-side defaults; check `effective_enable_file_events` for the effective state. + "encryption_details": + "description": |- + Encryption options that apply to clients connecting to cloud storage. + "fallback": + "description": |- + Indicates whether fallback mode is enabled for this external location. When fallback mode is enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. + "file_event_queue": + "description": |- + File event queue settings. If `enable_file_events` is not `false`, must be defined and have exactly one of the documented properties. + "name": + "description": |- + Name of the external location. + "read_only": + "description": |- + Indicates whether the external location is read-only. + "skip_validation": + "description": |- + Skips validation of the storage credential associated with the external location. + "url": + "description": |- + Path URL of the external location. github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -815,71 +857,6 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: Usage policy of this pipeline. "x-databricks-preview": |- PRIVATE -github.com/databricks/cli/bundle/config/resources.PostgresBranch: - "expire_time": - "description": |- - Absolute expiration timestamp. When set, the branch will expire at this time. - "is_protected": - "description": |- - When set to true, protects the branch from deletion and reset. Associated compute endpoints and the project cannot be deleted while the branch is protected. - "no_expiry": - "description": |- - Explicitly disable expiration. When set to true, the branch will not expire. - If set to false, the request is invalid; provide either ttl or expire_time instead. - "source_branch": - "description": |- - The name of the source branch from which this branch was created (data lineage for point-in-time recovery). - If not specified, defaults to the project's default branch. - Format: projects/{project_id}/branches/{branch_id} - "source_branch_lsn": - "description": |- - The Log Sequence Number (LSN) on the source branch from which this branch was created. - "source_branch_time": - "description": |- - The point in time on the source branch from which this branch was created. - "ttl": - "description": |- - Relative time-to-live duration. When set, the branch will expire at creation_time + ttl. -github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: - "autoscaling_limit_max_cu": - "description": |- - The maximum number of Compute Units. Minimum value is 0.5. - "autoscaling_limit_min_cu": - "description": |- - The minimum number of Compute Units. Minimum value is 0.5. - "disabled": - "description": |- - Whether to restrict connections to the compute endpoint. - Enabling this option schedules a suspend compute operation. - A disabled compute endpoint cannot be enabled by a connection or - console action. - "endpoint_type": - "description": |- - The endpoint type. A branch can only have one READ_WRITE endpoint. - "no_suspension": - "description": |- - When set to true, explicitly disables automatic suspension (never suspend). - Should be set to true when provided. - "settings": - "description": |- - A collection of settings for a compute endpoint. - "suspend_timeout_duration": - "description": |- - Duration of inactivity after which the compute endpoint is automatically suspended. - If specified should be between 60s and 604800s (1 minute to 1 week). -github.com/databricks/cli/bundle/config/resources.PostgresProject: - "default_endpoint_settings": - "description": |- - A collection of settings for a compute endpoint. - "display_name": - "description": |- - Human-readable project name. Length should be between 1 and 256 characters. - "history_retention_duration": - "description": |- - The number of seconds to retain the shared history for point in time recovery for all branches in this project. Value should be between 0s and 2592000s (up to 30 days). - "pg_version": - "description": |- - The major Postgres version number. Supported versions are 16 and 17. github.com/databricks/cli/bundle/config/resources.QualityMonitor: "assets_dir": "description": |- @@ -1025,6 +1002,7 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: - 2X-Large - 3X-Large - 4X-Large + - 5X-Large "creator_name": "description": |- warehouse creator name @@ -1382,6 +1360,8 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSec EXECUTE - |- USE_CONNECTION + - |- + MODIFY github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSecurableType: "_": "enum": @@ -1422,6 +1402,8 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeSize: MEDIUM - |- LARGE + - |- + LIQUID github.com/databricks/databricks-sdk-go/service/apps.ComputeState: "_": "enum": @@ -1512,6 +1494,60 @@ github.com/databricks/databricks-sdk-go/service/apps.GitSource: "tag": "description": |- Git tag to checkout. +github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "queue_url": + "description": |- + The AQS queue url in the format https://sqs.{region}.amazonaws.com/{account id}/{queue name}. + Only required for provided_sqs. +github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "queue_url": + "description": |- + The AQS queue url in the format https://{storage account}.queue.core.windows.net/{queue name} + Only required for provided_aqs. + "resource_group": + "description": |- + Optional resource group for the queue, event grid subscription, and external location storage + account. + Only required for locations with a service principal storage credential + "subscription_id": + "description": |- + Optional subscription id for the queue, event grid subscription, and external location storage + account. + Required for locations with a service principal storage credential +github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: + "_": + "description": |- + Encryption options that apply to clients connecting to cloud storage. + "sse_encryption_details": + "description": |- + Server-Side Encryption properties for clients communicating with AWS s3. +github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: + "managed_aqs": {} + "managed_pubsub": {} + "managed_sqs": {} + "provided_aqs": {} + "provided_pubsub": {} + "provided_sqs": {} +github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "subscription_name": + "description": |- + The Pub/Sub subscription name in the format projects/{project}/subscriptions/{subscription name}. + Only required for provided_pubsub. github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: "pause_status": "description": |- @@ -1661,6 +1697,24 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "version_num": "description": |- Integer version number of the model version to which this alias points. +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: + "_": + "description": |- + Server-Side Encryption properties for clients communicating with AWS s3. + "algorithm": + "description": |- + Sets the value of the 'x-amz-server-side-encryption' header in S3 request. + "aws_kms_key_arn": + "description": |- + Optional. The ARN of the SSE-KMS key used with the S3 location, when algorithm = "SSE-KMS". + Sets the value of the 'x-amz-server-side-encryption-aws-kms-key-id' header. +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: + "_": + "enum": + - |- + AWS_SSE_S3 + - |- + AWS_SSE_KMS github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": "enum": @@ -3061,6 +3115,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: This field is deprecated "x-databricks-preview": |- PRIVATE + "sparse_checkout": + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: "job_cluster_key": "description": |- @@ -3591,6 +3648,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: Command-line parameters passed to spark submit. Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout: + "patterns": + "description": |- + List of patterns to include for sparse checkout. github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: "alert": "description": |- @@ -4184,6 +4245,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MYSQL - |- POSTGRESQL + - |- + REDSHIFT + - |- + SQLDW - |- SQLSERVER - |- @@ -4208,6 +4273,68 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: SHAREPOINT - |- DYNAMICS365 + - |- + CONFLUENCE + - |- + META_MARKETING + - |- + GOOGLE_ADS + - |- + TIKTOK_ADS + - |- + SALESFORCE_MARKETING_CLOUD + - |- + HUBSPOT + - |- + WORKDAY_HCM + - |- + GUIDEWIRE + - |- + ZENDESK + - |- + SLACK_AUDIT_LOGS + - |- + CROWDSTRIKE_EVENT_STREAM + - |- + WORKDAY_ACTIVITY_LOGGING + - |- + AKAMAI_WAF + - |- + VEEVA + - |- + VEEVA_VAULT + - |- + M365_AUDIT_LOGS + - |- + OKTA_SYSTEM_LOGS + - |- + ONE_PASSWORD_EVENT_LOGS + - |- + PROOFPOINT_SIEM + - |- + WIZ_AUDIT_LOGS + - |- + GITHUB + - |- + OUTLOOK + - |- + SMARTSHEET + - |- + MICROSOFT_TEAMS + - |- + ADOBE_CAMPAIGNS + - |- + LINKEDIN_ADS + - |- + X_ADS + - |- + BING_ADS + - |- + GOOGLE_SEARCH_CONSOLE + - |- + PINTEREST_ADS + - |- + REDDIT_ADS - |- FOREIGN_CATALOG github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} @@ -4617,6 +4744,13 @@ github.com/databricks/databricks-sdk-go/service/postgres.EndpointType: ENDPOINT_TYPE_READ_WRITE - |- ENDPOINT_TYPE_READ_ONLY +github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag: + "key": + "description": |- + The key of the custom tag. + "value": + "description": |- + The value of the custom tag. github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings: "_": "description": |- diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 9febc35111..4a2b7e8e67 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -260,6 +260,30 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermissionLeve CAN_USE - |- CAN_MANAGE +github.com/databricks/cli/bundle/config/resources.ExternalLocation: +github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: + "_": + "description": |- + Privilege to grant on an external location + "enum": + - |- + ALL_PRIVILEGES + - |- + CREATE_EXTERNAL_TABLE + - |- + CREATE_EXTERNAL_VOLUME + - |- + CREATE_MANAGED_STORAGE + - |- + CREATE_TABLE + - |- + CREATE_VOLUME + - |- + MANAGE + - |- + READ_FILES + - |- + WRITE_FILES github.com/databricks/cli/bundle/config/resources.Job: "_": "markdown_description": |- @@ -855,6 +879,56 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus: "description": |- PLACEHOLDER "state": {} +github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER + "resource_group": + "description": |- + PLACEHOLDER + "subscription_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: + "sse_encryption_details": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: + "managed_aqs": + "description": |- + PLACEHOLDER + "managed_pubsub": + "description": |- + PLACEHOLDER + "managed_sqs": + "description": |- + PLACEHOLDER + "provided_aqs": + "description": |- + PLACEHOLDER + "provided_pubsub": + "description": |- + PLACEHOLDER + "provided_sqs": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: + "managed_resource_id": + "description": |- + PLACEHOLDER + "subscription_name": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: "granularities": "description": |- @@ -876,6 +950,22 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "schema_name": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: + "algorithm": + "description": |- + PLACEHOLDER + "aws_kms_key_arn": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: + "_": + "description": |- + SSE algorithm to use for encrypting S3 objects + "enum": + - |- + AWS_SSE_KMS + - |- + AWS_SSE_S3 github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: "availability": "description": |- @@ -953,6 +1043,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: "git_snapshot": "description": |- PLACEHOLDER + "sparse_checkout": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: "spec": "description": |- @@ -1101,92 +1194,3 @@ github.com/databricks/databricks-sdk-go/service/sql.EndpointTags: "custom_tags": "description": |- PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER - "resource_group": - "description": |- - PLACEHOLDER - "subscription_id": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: - "sse_encryption_details": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: - "managed_aqs": - "description": |- - PLACEHOLDER - "managed_pubsub": - "description": |- - PLACEHOLDER - "managed_sqs": - "description": |- - PLACEHOLDER - "provided_aqs": - "description": |- - PLACEHOLDER - "provided_pubsub": - "description": |- - PLACEHOLDER - "provided_sqs": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: - "managed_resource_id": - "description": |- - PLACEHOLDER - "subscription_name": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: - "algorithm": - "description": |- - PLACEHOLDER - "aws_kms_key_arn": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: - "_": - "description": |- - SSE algorithm to use for encrypting S3 objects - "enum": - - |- - AWS_SSE_KMS - - |- - AWS_SSE_S3 -github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: - "_": - "description": |- - Privilege to grant on an external location - "enum": - - |- - ALL_PRIVILEGES - - |- - CREATE_EXTERNAL_TABLE - - |- - CREATE_EXTERNAL_VOLUME - - |- - CREATE_MANAGED_STORAGE - - |- - CREATE_TABLE - - |- - CREATE_VOLUME - - |- - MANAGE - - |- - READ_FILES - - |- - WRITE_FILES diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 0d547a3b2d..98e973c78c 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -209,6 +209,12 @@ "source_code_path": { "$ref": "#/$defs/string" }, + "space": { + "description": "Name of the space this app belongs to.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "usage_policy_id": { "$ref": "#/$defs/string" }, @@ -1799,6 +1805,12 @@ { "type": "object", "properties": { + "budget_policy_id": { + "$ref": "#/$defs/string" + }, + "custom_tags": { + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + }, "default_endpoint_settings": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings" }, @@ -2181,7 +2193,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large\n- 5X-Large", "$ref": "#/$defs/string" }, "creator_name": { @@ -3667,7 +3679,8 @@ "WRITE_VOLUME", "SELECT", "EXECUTE", - "USE_CONNECTION" + "USE_CONNECTION", + "MODIFY" ] }, { @@ -3728,7 +3741,8 @@ "type": "string", "enum": [ "MEDIUM", - "LARGE" + "LARGE", + "LIQUID" ] }, { @@ -3858,9 +3872,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "queue_url": { "$ref": "#/$defs/string" } @@ -3878,9 +3889,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "queue_url": { "$ref": "#/$defs/string" }, @@ -3903,6 +3911,7 @@ "oneOf": [ { "type": "object", + "description": "Encryption options that apply to clients connecting to cloud storage.", "properties": { "sse_encryption_details": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails" @@ -3953,9 +3962,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "subscription_name": { "$ref": "#/$defs/string" } @@ -4277,6 +4283,7 @@ "oneOf": [ { "type": "object", + "description": "Server-Side Encryption properties for clients communicating with AWS s3.", "properties": { "algorithm": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm" @@ -4299,6 +4306,8 @@ "type": "string", "description": "SSE algorithm to use for encrypting S3 objects", "enum": [ + "AWS_SSE_S3", + "AWS_SSE_KMS", "AWS_SSE_KMS", "AWS_SSE_S3" ] @@ -6164,6 +6173,11 @@ "git_url": { "description": "URL of the repository to be cloned by this job.", "$ref": "#/$defs/string" + }, + "sparse_checkout": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false, @@ -7086,6 +7100,24 @@ } ] }, + "jobs.SparseCheckout": { + "oneOf": [ + { + "type": "object", + "properties": { + "patterns": { + "description": "List of patterns to include for sparse checkout.", + "$ref": "#/$defs/slice/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.SqlTask": { "oneOf": [ { @@ -8149,6 +8181,8 @@ "enum": [ "MYSQL", "POSTGRESQL", + "REDSHIFT", + "SQLDW", "SQLSERVER", "SALESFORCE", "BIGQUERY", @@ -8161,6 +8195,37 @@ "TERADATA", "SHAREPOINT", "DYNAMICS365", + "CONFLUENCE", + "META_MARKETING", + "GOOGLE_ADS", + "TIKTOK_ADS", + "SALESFORCE_MARKETING_CLOUD", + "HUBSPOT", + "WORKDAY_HCM", + "GUIDEWIRE", + "ZENDESK", + "SLACK_AUDIT_LOGS", + "CROWDSTRIKE_EVENT_STREAM", + "WORKDAY_ACTIVITY_LOGGING", + "AKAMAI_WAF", + "VEEVA", + "VEEVA_VAULT", + "M365_AUDIT_LOGS", + "OKTA_SYSTEM_LOGS", + "ONE_PASSWORD_EVENT_LOGS", + "PROOFPOINT_SIEM", + "WIZ_AUDIT_LOGS", + "GITHUB", + "OUTLOOK", + "SMARTSHEET", + "MICROSOFT_TEAMS", + "ADOBE_CAMPAIGNS", + "LINKEDIN_ADS", + "X_ADS", + "BING_ADS", + "GOOGLE_SEARCH_CONSOLE", + "PINTEREST_ADS", + "REDDIT_ADS", "FOREIGN_CATALOG" ] }, @@ -8889,6 +8954,28 @@ } ] }, + "postgres.ProjectCustomTag": { + "oneOf": [ + { + "type": "object", + "properties": { + "key": { + "description": "The key of the custom tag.", + "$ref": "#/$defs/string" + }, + "value": { + "description": "The value of the custom tag.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "postgres.ProjectDefaultEndpointSettings": { "oneOf": [ { @@ -11689,6 +11776,20 @@ } ] }, + "postgres.ProjectCustomTag": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { "oneOf": [ { From 2dca3dba72f8c0cdedccaa290969fa9dffd2fed9 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 12:22:43 +0100 Subject: [PATCH 4/7] Fix lint and generated artifacts for SDK v0.110.0 Address new SDK field/lint fallout by updating external location and postgres project struct literals, suppressing legacy quality monitor API deprecation warnings, and syncing acceptance refschema and Python bundle codegen outputs. Co-authored-by: Cursor --- acceptance/bundle/refschema/out.fields.txt | 20 +++++ bundle/config/resources/quality_monitor.go | 1 + bundle/direct/dresources/external_location.go | 85 ++++++++++--------- bundle/direct/dresources/postgres_project.go | 2 + bundle/direct/dresources/quality_monitor.go | 4 + python/databricks/bundles/jobs/__init__.py | 8 ++ .../bundles/jobs/_models/dashboard_task.py | 5 +- .../bundles/jobs/_models/git_source.py | 14 +++ .../bundles/jobs/_models/sparse_checkout.py | 40 +++++++++ 9 files changed, 134 insertions(+), 45 deletions(-) create mode 100644 python/databricks/bundles/jobs/_models/sparse_checkout.py diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 03f183ad0a..701d218202 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -196,6 +196,7 @@ resources.apps.*.service_principal_client_id string ALL resources.apps.*.service_principal_id int64 ALL resources.apps.*.service_principal_name string ALL resources.apps.*.source_code_path string INPUT +resources.apps.*.space string ALL resources.apps.*.update_time string ALL resources.apps.*.updater string ALL resources.apps.*.url string ALL @@ -668,6 +669,7 @@ resources.external_locations.*.created_at int64 REMOTE resources.external_locations.*.created_by string REMOTE resources.external_locations.*.credential_id string REMOTE resources.external_locations.*.credential_name string ALL +resources.external_locations.*.effective_enable_file_events bool ALL resources.external_locations.*.enable_file_events bool ALL resources.external_locations.*.encryption_details *catalog.EncryptionDetails ALL resources.external_locations.*.encryption_details.sse_encryption_details *catalog.SseEncryptionDetails ALL @@ -771,6 +773,9 @@ resources.jobs.*.git_source.job_source *jobs.JobSource ALL resources.jobs.*.git_source.job_source.dirty_state jobs.JobSourceDirtyState ALL resources.jobs.*.git_source.job_source.import_from_git_branch string ALL resources.jobs.*.git_source.job_source.job_config_path string ALL +resources.jobs.*.git_source.sparse_checkout *jobs.SparseCheckout ALL +resources.jobs.*.git_source.sparse_checkout.patterns []string ALL +resources.jobs.*.git_source.sparse_checkout.patterns[*] string ALL resources.jobs.*.health *jobs.JobsHealthRules ALL resources.jobs.*.health.rules []jobs.JobsHealthRule ALL resources.jobs.*.health.rules[*] jobs.JobsHealthRule ALL @@ -2573,7 +2578,12 @@ resources.postgres_endpoints.*.suspend_timeout_duration *duration.Duration INPUT resources.postgres_endpoints.*.uid string REMOTE resources.postgres_endpoints.*.update_time *time.Time REMOTE resources.postgres_endpoints.*.url string INPUT +resources.postgres_projects.*.budget_policy_id string INPUT STATE resources.postgres_projects.*.create_time *time.Time REMOTE +resources.postgres_projects.*.custom_tags []postgres.ProjectCustomTag INPUT STATE +resources.postgres_projects.*.custom_tags[*] postgres.ProjectCustomTag INPUT STATE +resources.postgres_projects.*.custom_tags[*].key string INPUT STATE +resources.postgres_projects.*.custom_tags[*].value string INPUT STATE resources.postgres_projects.*.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_max_cu float64 INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_min_cu float64 INPUT STATE @@ -2591,6 +2601,11 @@ resources.postgres_projects.*.name string REMOTE resources.postgres_projects.*.pg_version int INPUT STATE resources.postgres_projects.*.project_id string INPUT STATE resources.postgres_projects.*.spec *postgres.ProjectSpec REMOTE +resources.postgres_projects.*.spec.budget_policy_id string REMOTE +resources.postgres_projects.*.spec.custom_tags []postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.spec.custom_tags[*] postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.spec.custom_tags[*].key string REMOTE +resources.postgres_projects.*.spec.custom_tags[*].value string REMOTE resources.postgres_projects.*.spec.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE @@ -2603,6 +2618,11 @@ resources.postgres_projects.*.spec.history_retention_duration *duration.Duration resources.postgres_projects.*.spec.pg_version int REMOTE resources.postgres_projects.*.status *postgres.ProjectStatus REMOTE resources.postgres_projects.*.status.branch_logical_size_limit_bytes int64 REMOTE +resources.postgres_projects.*.status.budget_policy_id string REMOTE +resources.postgres_projects.*.status.custom_tags []postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.status.custom_tags[*] postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.status.custom_tags[*].key string REMOTE +resources.postgres_projects.*.status.custom_tags[*].value string REMOTE resources.postgres_projects.*.status.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index f373676d45..da871ed148 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -30,6 +30,7 @@ func (s QualityMonitor) MarshalJSON() ([]byte, error) { } func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + //nolint:staticcheck // Bundle resource still uses legacy QualityMonitors API until direct migration is complete. _, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) diff --git a/bundle/direct/dresources/external_location.go b/bundle/direct/dresources/external_location.go index 2d0591e180..61ff286c32 100644 --- a/bundle/direct/dresources/external_location.go +++ b/bundle/direct/dresources/external_location.go @@ -23,17 +23,18 @@ func (*ResourceExternalLocation) PrepareState(input *resources.ExternalLocation) func (*ResourceExternalLocation) RemapState(info *catalog.ExternalLocationInfo) *catalog.CreateExternalLocation { return &catalog.CreateExternalLocation{ - Comment: info.Comment, - CredentialName: info.CredentialName, - EnableFileEvents: info.EnableFileEvents, - EncryptionDetails: info.EncryptionDetails, - Fallback: info.Fallback, - FileEventQueue: info.FileEventQueue, - Name: info.Name, - ReadOnly: info.ReadOnly, - SkipValidation: false, // This is an input-only parameter, never returned by API - Url: info.Url, - ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields), + Comment: info.Comment, + CredentialName: info.CredentialName, + EffectiveEnableFileEvents: info.EffectiveEnableFileEvents, + EnableFileEvents: info.EnableFileEvents, + EncryptionDetails: info.EncryptionDetails, + Fallback: info.Fallback, + FileEventQueue: info.FileEventQueue, + Name: info.Name, + ReadOnly: info.ReadOnly, + SkipValidation: false, // This is an input-only parameter, never returned by API + Url: info.Url, + ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields), } } @@ -52,21 +53,22 @@ func (r *ResourceExternalLocation) DoCreate(ctx context.Context, config *catalog // DoUpdate updates the external location in place and returns remote state. func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, config *catalog.CreateExternalLocation, _ Changes) (*catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Only set if name actually changes (see DoUpdateWithID) - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EffectiveEnableFileEvents: config.EffectiveEnableFileEvents, + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Only set if name actually changes (see DoUpdateWithID) + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), } return r.client.ExternalLocations.Update(ctx, updateRequest) @@ -75,21 +77,22 @@ func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, conf // DoUpdateWithID updates the external location and returns the new ID if the name changes. func (r *ResourceExternalLocation) DoUpdateWithID(ctx context.Context, id string, config *catalog.CreateExternalLocation) (string, *catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Initialized below if needed - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EffectiveEnableFileEvents: config.EffectiveEnableFileEvents, + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Initialized below if needed + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), } if config.Name != id { diff --git a/bundle/direct/dresources/postgres_project.go b/bundle/direct/dresources/postgres_project.go index e08ba9e603..bdfaf40595 100644 --- a/bundle/direct/dresources/postgres_project.go +++ b/bundle/direct/dresources/postgres_project.go @@ -38,6 +38,8 @@ func (*ResourcePostgresProject) RemapState(remote *postgres.Project) *PostgresPr // This means we cannot detect remote drift for spec fields. // Use an empty struct (not nil) so field-level diffing works correctly. ProjectSpec: postgres.ProjectSpec{ + BudgetPolicyId: "", + CustomTags: nil, DefaultEndpointSettings: nil, DisplayName: "", HistoryRetentionDuration: nil, diff --git a/bundle/direct/dresources/quality_monitor.go b/bundle/direct/dresources/quality_monitor.go index e9e85d779e..022fd87077 100644 --- a/bundle/direct/dresources/quality_monitor.go +++ b/bundle/direct/dresources/quality_monitor.go @@ -66,6 +66,7 @@ func (*ResourceQualityMonitor) RemapState(info *catalog.MonitorInfo) *QualityMon } func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalog.MonitorInfo, error) { + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. return r.client.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) @@ -74,6 +75,7 @@ func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalo func (r *ResourceQualityMonitor) DoCreate(ctx context.Context, config *QualityMonitorState) (string, *catalog.MonitorInfo, error) { req := config.CreateMonitor req.TableName = config.TableName + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Create(ctx, req) if err != nil || response == nil { return "", nil, err @@ -99,6 +101,7 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config ForceSendFields: utils.FilterFields[catalog.UpdateMonitor](config.ForceSendFields), } + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Update(ctx, updateRequest) if err != nil { return nil, err @@ -108,6 +111,7 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config } func (r *ResourceQualityMonitor) DoDelete(ctx context.Context, id string) error { + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. _, err := r.client.QualityMonitors.Delete(ctx, catalog.DeleteQualityMonitorRequest{ TableName: id, }) diff --git a/python/databricks/bundles/jobs/__init__.py b/python/databricks/bundles/jobs/__init__.py index bbe0fe85fa..3e98d2acd6 100644 --- a/python/databricks/bundles/jobs/__init__.py +++ b/python/databricks/bundles/jobs/__init__.py @@ -221,6 +221,9 @@ "SparkSubmitTask", "SparkSubmitTaskDict", "SparkSubmitTaskParam", + "SparseCheckout", + "SparseCheckoutDict", + "SparseCheckoutParam", "SqlTask", "SqlTaskAlert", "SqlTaskAlertDict", @@ -635,6 +638,11 @@ SparkSubmitTaskDict, SparkSubmitTaskParam, ) +from databricks.bundles.jobs._models.sparse_checkout import ( + SparseCheckout, + SparseCheckoutDict, + SparseCheckoutParam, +) from databricks.bundles.jobs._models.sql_task import SqlTask, SqlTaskDict, SqlTaskParam from databricks.bundles.jobs._models.sql_task_alert import ( SqlTaskAlert, diff --git a/python/databricks/bundles/jobs/_models/dashboard_task.py b/python/databricks/bundles/jobs/_models/dashboard_task.py index 98e171359c..4f9cd829a1 100644 --- a/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrDict, VariableOrOptional -from databricks.bundles.jobs._models.subscription import ( - Subscription, - SubscriptionParam, -) +from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam if TYPE_CHECKING: from typing_extensions import Self diff --git a/python/databricks/bundles/jobs/_models/git_source.py b/python/databricks/bundles/jobs/_models/git_source.py index 76fa000f66..539a192088 100644 --- a/python/databricks/bundles/jobs/_models/git_source.py +++ b/python/databricks/bundles/jobs/_models/git_source.py @@ -5,6 +5,10 @@ from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOr, VariableOrOptional from databricks.bundles.jobs._models.git_provider import GitProvider, GitProviderParam +from databricks.bundles.jobs._models.sparse_checkout import ( + SparseCheckout, + SparseCheckoutParam, +) if TYPE_CHECKING: from typing_extensions import Self @@ -45,6 +49,11 @@ class GitSource: Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ + sparse_checkout: VariableOrOptional[SparseCheckout] = None + """ + :meta private: [EXPERIMENTAL] + """ + @classmethod def from_dict(cls, value: "GitSourceDict") -> "Self": return _transform(cls, value) @@ -81,5 +90,10 @@ class GitSourceDict(TypedDict, total=False): Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ + sparse_checkout: VariableOrOptional[SparseCheckoutParam] + """ + :meta private: [EXPERIMENTAL] + """ + GitSourceParam = GitSourceDict | GitSource diff --git a/python/databricks/bundles/jobs/_models/sparse_checkout.py b/python/databricks/bundles/jobs/_models/sparse_checkout.py new file mode 100644 index 0000000000..af68734ad0 --- /dev/null +++ b/python/databricks/bundles/jobs/_models/sparse_checkout.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrList + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class SparseCheckout: + """ + :meta private: [EXPERIMENTAL] + """ + + patterns: VariableOrList[str] = field(default_factory=list) + """ + List of patterns to include for sparse checkout. + """ + + @classmethod + def from_dict(cls, value: "SparseCheckoutDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "SparseCheckoutDict": + return _transform_to_json_value(self) # type:ignore + + +class SparseCheckoutDict(TypedDict, total=False): + """""" + + patterns: VariableOrList[str] + """ + List of patterns to include for sparse checkout. + """ + + +SparseCheckoutParam = SparseCheckoutDict | SparseCheckout From b0e86020222a30031469b34c815f5d448f7d88c0 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 13:08:50 +0100 Subject: [PATCH 5/7] Revert "Fix lint and generated artifacts for SDK v0.110.0" This reverts commit 2dca3dba72f8c0cdedccaa290969fa9dffd2fed9. --- acceptance/bundle/refschema/out.fields.txt | 20 ----- bundle/config/resources/quality_monitor.go | 1 - bundle/direct/dresources/external_location.go | 85 +++++++++---------- bundle/direct/dresources/postgres_project.go | 2 - bundle/direct/dresources/quality_monitor.go | 4 - python/databricks/bundles/jobs/__init__.py | 8 -- .../bundles/jobs/_models/dashboard_task.py | 5 +- .../bundles/jobs/_models/git_source.py | 14 --- .../bundles/jobs/_models/sparse_checkout.py | 40 --------- 9 files changed, 45 insertions(+), 134 deletions(-) delete mode 100644 python/databricks/bundles/jobs/_models/sparse_checkout.py diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 701d218202..03f183ad0a 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -196,7 +196,6 @@ resources.apps.*.service_principal_client_id string ALL resources.apps.*.service_principal_id int64 ALL resources.apps.*.service_principal_name string ALL resources.apps.*.source_code_path string INPUT -resources.apps.*.space string ALL resources.apps.*.update_time string ALL resources.apps.*.updater string ALL resources.apps.*.url string ALL @@ -669,7 +668,6 @@ resources.external_locations.*.created_at int64 REMOTE resources.external_locations.*.created_by string REMOTE resources.external_locations.*.credential_id string REMOTE resources.external_locations.*.credential_name string ALL -resources.external_locations.*.effective_enable_file_events bool ALL resources.external_locations.*.enable_file_events bool ALL resources.external_locations.*.encryption_details *catalog.EncryptionDetails ALL resources.external_locations.*.encryption_details.sse_encryption_details *catalog.SseEncryptionDetails ALL @@ -773,9 +771,6 @@ resources.jobs.*.git_source.job_source *jobs.JobSource ALL resources.jobs.*.git_source.job_source.dirty_state jobs.JobSourceDirtyState ALL resources.jobs.*.git_source.job_source.import_from_git_branch string ALL resources.jobs.*.git_source.job_source.job_config_path string ALL -resources.jobs.*.git_source.sparse_checkout *jobs.SparseCheckout ALL -resources.jobs.*.git_source.sparse_checkout.patterns []string ALL -resources.jobs.*.git_source.sparse_checkout.patterns[*] string ALL resources.jobs.*.health *jobs.JobsHealthRules ALL resources.jobs.*.health.rules []jobs.JobsHealthRule ALL resources.jobs.*.health.rules[*] jobs.JobsHealthRule ALL @@ -2578,12 +2573,7 @@ resources.postgres_endpoints.*.suspend_timeout_duration *duration.Duration INPUT resources.postgres_endpoints.*.uid string REMOTE resources.postgres_endpoints.*.update_time *time.Time REMOTE resources.postgres_endpoints.*.url string INPUT -resources.postgres_projects.*.budget_policy_id string INPUT STATE resources.postgres_projects.*.create_time *time.Time REMOTE -resources.postgres_projects.*.custom_tags []postgres.ProjectCustomTag INPUT STATE -resources.postgres_projects.*.custom_tags[*] postgres.ProjectCustomTag INPUT STATE -resources.postgres_projects.*.custom_tags[*].key string INPUT STATE -resources.postgres_projects.*.custom_tags[*].value string INPUT STATE resources.postgres_projects.*.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_max_cu float64 INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_min_cu float64 INPUT STATE @@ -2601,11 +2591,6 @@ resources.postgres_projects.*.name string REMOTE resources.postgres_projects.*.pg_version int INPUT STATE resources.postgres_projects.*.project_id string INPUT STATE resources.postgres_projects.*.spec *postgres.ProjectSpec REMOTE -resources.postgres_projects.*.spec.budget_policy_id string REMOTE -resources.postgres_projects.*.spec.custom_tags []postgres.ProjectCustomTag REMOTE -resources.postgres_projects.*.spec.custom_tags[*] postgres.ProjectCustomTag REMOTE -resources.postgres_projects.*.spec.custom_tags[*].key string REMOTE -resources.postgres_projects.*.spec.custom_tags[*].value string REMOTE resources.postgres_projects.*.spec.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE @@ -2618,11 +2603,6 @@ resources.postgres_projects.*.spec.history_retention_duration *duration.Duration resources.postgres_projects.*.spec.pg_version int REMOTE resources.postgres_projects.*.status *postgres.ProjectStatus REMOTE resources.postgres_projects.*.status.branch_logical_size_limit_bytes int64 REMOTE -resources.postgres_projects.*.status.budget_policy_id string REMOTE -resources.postgres_projects.*.status.custom_tags []postgres.ProjectCustomTag REMOTE -resources.postgres_projects.*.status.custom_tags[*] postgres.ProjectCustomTag REMOTE -resources.postgres_projects.*.status.custom_tags[*].key string REMOTE -resources.postgres_projects.*.status.custom_tags[*].value string REMOTE resources.postgres_projects.*.status.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index da871ed148..f373676d45 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -30,7 +30,6 @@ func (s QualityMonitor) MarshalJSON() ([]byte, error) { } func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { - //nolint:staticcheck // Bundle resource still uses legacy QualityMonitors API until direct migration is complete. _, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) diff --git a/bundle/direct/dresources/external_location.go b/bundle/direct/dresources/external_location.go index 61ff286c32..2d0591e180 100644 --- a/bundle/direct/dresources/external_location.go +++ b/bundle/direct/dresources/external_location.go @@ -23,18 +23,17 @@ func (*ResourceExternalLocation) PrepareState(input *resources.ExternalLocation) func (*ResourceExternalLocation) RemapState(info *catalog.ExternalLocationInfo) *catalog.CreateExternalLocation { return &catalog.CreateExternalLocation{ - Comment: info.Comment, - CredentialName: info.CredentialName, - EffectiveEnableFileEvents: info.EffectiveEnableFileEvents, - EnableFileEvents: info.EnableFileEvents, - EncryptionDetails: info.EncryptionDetails, - Fallback: info.Fallback, - FileEventQueue: info.FileEventQueue, - Name: info.Name, - ReadOnly: info.ReadOnly, - SkipValidation: false, // This is an input-only parameter, never returned by API - Url: info.Url, - ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields), + Comment: info.Comment, + CredentialName: info.CredentialName, + EnableFileEvents: info.EnableFileEvents, + EncryptionDetails: info.EncryptionDetails, + Fallback: info.Fallback, + FileEventQueue: info.FileEventQueue, + Name: info.Name, + ReadOnly: info.ReadOnly, + SkipValidation: false, // This is an input-only parameter, never returned by API + Url: info.Url, + ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields), } } @@ -53,22 +52,21 @@ func (r *ResourceExternalLocation) DoCreate(ctx context.Context, config *catalog // DoUpdate updates the external location in place and returns remote state. func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, config *catalog.CreateExternalLocation, _ Changes) (*catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EffectiveEnableFileEvents: config.EffectiveEnableFileEvents, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Only set if name actually changes (see DoUpdateWithID) - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Only set if name actually changes (see DoUpdateWithID) + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), } return r.client.ExternalLocations.Update(ctx, updateRequest) @@ -77,22 +75,21 @@ func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, conf // DoUpdateWithID updates the external location and returns the new ID if the name changes. func (r *ResourceExternalLocation) DoUpdateWithID(ctx context.Context, id string, config *catalog.CreateExternalLocation) (string, *catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EffectiveEnableFileEvents: config.EffectiveEnableFileEvents, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Initialized below if needed - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Initialized below if needed + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), } if config.Name != id { diff --git a/bundle/direct/dresources/postgres_project.go b/bundle/direct/dresources/postgres_project.go index bdfaf40595..e08ba9e603 100644 --- a/bundle/direct/dresources/postgres_project.go +++ b/bundle/direct/dresources/postgres_project.go @@ -38,8 +38,6 @@ func (*ResourcePostgresProject) RemapState(remote *postgres.Project) *PostgresPr // This means we cannot detect remote drift for spec fields. // Use an empty struct (not nil) so field-level diffing works correctly. ProjectSpec: postgres.ProjectSpec{ - BudgetPolicyId: "", - CustomTags: nil, DefaultEndpointSettings: nil, DisplayName: "", HistoryRetentionDuration: nil, diff --git a/bundle/direct/dresources/quality_monitor.go b/bundle/direct/dresources/quality_monitor.go index 022fd87077..e9e85d779e 100644 --- a/bundle/direct/dresources/quality_monitor.go +++ b/bundle/direct/dresources/quality_monitor.go @@ -66,7 +66,6 @@ func (*ResourceQualityMonitor) RemapState(info *catalog.MonitorInfo) *QualityMon } func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalog.MonitorInfo, error) { - //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. return r.client.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) @@ -75,7 +74,6 @@ func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalo func (r *ResourceQualityMonitor) DoCreate(ctx context.Context, config *QualityMonitorState) (string, *catalog.MonitorInfo, error) { req := config.CreateMonitor req.TableName = config.TableName - //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Create(ctx, req) if err != nil || response == nil { return "", nil, err @@ -101,7 +99,6 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config ForceSendFields: utils.FilterFields[catalog.UpdateMonitor](config.ForceSendFields), } - //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Update(ctx, updateRequest) if err != nil { return nil, err @@ -111,7 +108,6 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config } func (r *ResourceQualityMonitor) DoDelete(ctx context.Context, id string) error { - //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. _, err := r.client.QualityMonitors.Delete(ctx, catalog.DeleteQualityMonitorRequest{ TableName: id, }) diff --git a/python/databricks/bundles/jobs/__init__.py b/python/databricks/bundles/jobs/__init__.py index 3e98d2acd6..bbe0fe85fa 100644 --- a/python/databricks/bundles/jobs/__init__.py +++ b/python/databricks/bundles/jobs/__init__.py @@ -221,9 +221,6 @@ "SparkSubmitTask", "SparkSubmitTaskDict", "SparkSubmitTaskParam", - "SparseCheckout", - "SparseCheckoutDict", - "SparseCheckoutParam", "SqlTask", "SqlTaskAlert", "SqlTaskAlertDict", @@ -638,11 +635,6 @@ SparkSubmitTaskDict, SparkSubmitTaskParam, ) -from databricks.bundles.jobs._models.sparse_checkout import ( - SparseCheckout, - SparseCheckoutDict, - SparseCheckoutParam, -) from databricks.bundles.jobs._models.sql_task import SqlTask, SqlTaskDict, SqlTaskParam from databricks.bundles.jobs._models.sql_task_alert import ( SqlTaskAlert, diff --git a/python/databricks/bundles/jobs/_models/dashboard_task.py b/python/databricks/bundles/jobs/_models/dashboard_task.py index 4f9cd829a1..98e171359c 100644 --- a/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,7 +4,10 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrDict, VariableOrOptional -from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam +from databricks.bundles.jobs._models.subscription import ( + Subscription, + SubscriptionParam, +) if TYPE_CHECKING: from typing_extensions import Self diff --git a/python/databricks/bundles/jobs/_models/git_source.py b/python/databricks/bundles/jobs/_models/git_source.py index 539a192088..76fa000f66 100644 --- a/python/databricks/bundles/jobs/_models/git_source.py +++ b/python/databricks/bundles/jobs/_models/git_source.py @@ -5,10 +5,6 @@ from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOr, VariableOrOptional from databricks.bundles.jobs._models.git_provider import GitProvider, GitProviderParam -from databricks.bundles.jobs._models.sparse_checkout import ( - SparseCheckout, - SparseCheckoutParam, -) if TYPE_CHECKING: from typing_extensions import Self @@ -49,11 +45,6 @@ class GitSource: Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ - sparse_checkout: VariableOrOptional[SparseCheckout] = None - """ - :meta private: [EXPERIMENTAL] - """ - @classmethod def from_dict(cls, value: "GitSourceDict") -> "Self": return _transform(cls, value) @@ -90,10 +81,5 @@ class GitSourceDict(TypedDict, total=False): Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ - sparse_checkout: VariableOrOptional[SparseCheckoutParam] - """ - :meta private: [EXPERIMENTAL] - """ - GitSourceParam = GitSourceDict | GitSource diff --git a/python/databricks/bundles/jobs/_models/sparse_checkout.py b/python/databricks/bundles/jobs/_models/sparse_checkout.py deleted file mode 100644 index af68734ad0..0000000000 --- a/python/databricks/bundles/jobs/_models/sparse_checkout.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, TypedDict - -from databricks.bundles.core._transform import _transform -from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import VariableOrList - -if TYPE_CHECKING: - from typing_extensions import Self - - -@dataclass(kw_only=True) -class SparseCheckout: - """ - :meta private: [EXPERIMENTAL] - """ - - patterns: VariableOrList[str] = field(default_factory=list) - """ - List of patterns to include for sparse checkout. - """ - - @classmethod - def from_dict(cls, value: "SparseCheckoutDict") -> "Self": - return _transform(cls, value) - - def as_dict(self) -> "SparseCheckoutDict": - return _transform_to_json_value(self) # type:ignore - - -class SparseCheckoutDict(TypedDict, total=False): - """""" - - patterns: VariableOrList[str] - """ - List of patterns to include for sparse checkout. - """ - - -SparseCheckoutParam = SparseCheckoutDict | SparseCheckout From 1033cb70d447811dac5ab2caeb0492769b986d9c Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 13:08:50 +0100 Subject: [PATCH 6/7] Revert "Regenerate schema artifacts for SDK v0.110.0" This reverts commit b48d33d2ffd36ac0aebb5b09b754de9084f196b5. --- .codegen/_openapi_sha | 2 +- bundle/direct/dresources/type_test.go | 2 - bundle/internal/schema/annotations.yml | 6 - .../internal/schema/annotations_openapi.yml | 264 +++++------------- .../schema/annotations_openapi_overrides.yml | 182 ++++++------ bundle/schema/jsonschema.json | 125 +-------- 6 files changed, 167 insertions(+), 414 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index d1f13600ac..3cb3b7da03 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -281b4455821119945fcc4c850cf2cfad03e23c6c \ No newline at end of file +69a174b6c47c5e1039a5f14271440c10e33998ce \ No newline at end of file diff --git a/bundle/direct/dresources/type_test.go b/bundle/direct/dresources/type_test.go index e0806f1233..643f27423e 100644 --- a/bundle/direct/dresources/type_test.go +++ b/bundle/direct/dresources/type_test.go @@ -64,8 +64,6 @@ var knownMissingInRemoteType = map[string][]string{ "suspend_timeout_duration", }, "postgres_projects": { - "budget_policy_id", - "custom_tags", "default_endpoint_settings", "display_name", "history_retention_duration", diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index a407be9260..7c8e98d337 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -924,15 +924,9 @@ github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.PostgresProject: - "budget_policy_id": - "description": |- - PLACEHOLDER "create_time": "description": |- PLACEHOLDER - "custom_tags": - "description": |- - PLACEHOLDER "default_endpoint_settings": "description": |- PLACEHOLDER diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index aa80afab1f..231b18d8fd 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -150,11 +150,6 @@ github.com/databricks/cli/bundle/config/resources.App: "service_principal_name": "x-databricks-field-behaviors_output_only": |- true - "space": - "description": |- - Name of the space this app belongs to. - "x-databricks-preview": |- - PRIVATE "update_time": "description": |- The update time of the app. Formatted timestamp in ISO 6801. @@ -553,43 +548,6 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "usage_policy_id": "description": |- The desired usage policy to associate with the instance. -github.com/databricks/cli/bundle/config/resources.ExternalLocation: - "comment": - "description": |- - User-provided free-form text description. - "credential_name": - "description": |- - Name of the storage credential used with this location. - "effective_enable_file_events": - "description": |- - The effective value of `enable_file_events` after applying server-side defaults. - "x-databricks-field-behaviors_output_only": |- - true - "enable_file_events": - "description": |- - Whether to enable file events on this external location. Default to `true`. Set to `false` to disable file events. - The actual applied value may differ due to server-side defaults; check `effective_enable_file_events` for the effective state. - "encryption_details": - "description": |- - Encryption options that apply to clients connecting to cloud storage. - "fallback": - "description": |- - Indicates whether fallback mode is enabled for this external location. When fallback mode is enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. - "file_event_queue": - "description": |- - File event queue settings. If `enable_file_events` is not `false`, must be defined and have exactly one of the documented properties. - "name": - "description": |- - Name of the external location. - "read_only": - "description": |- - Indicates whether the external location is read-only. - "skip_validation": - "description": |- - Skips validation of the storage credential associated with the external location. - "url": - "description": |- - Path URL of the external location. github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -857,6 +815,71 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: Usage policy of this pipeline. "x-databricks-preview": |- PRIVATE +github.com/databricks/cli/bundle/config/resources.PostgresBranch: + "expire_time": + "description": |- + Absolute expiration timestamp. When set, the branch will expire at this time. + "is_protected": + "description": |- + When set to true, protects the branch from deletion and reset. Associated compute endpoints and the project cannot be deleted while the branch is protected. + "no_expiry": + "description": |- + Explicitly disable expiration. When set to true, the branch will not expire. + If set to false, the request is invalid; provide either ttl or expire_time instead. + "source_branch": + "description": |- + The name of the source branch from which this branch was created (data lineage for point-in-time recovery). + If not specified, defaults to the project's default branch. + Format: projects/{project_id}/branches/{branch_id} + "source_branch_lsn": + "description": |- + The Log Sequence Number (LSN) on the source branch from which this branch was created. + "source_branch_time": + "description": |- + The point in time on the source branch from which this branch was created. + "ttl": + "description": |- + Relative time-to-live duration. When set, the branch will expire at creation_time + ttl. +github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: + "autoscaling_limit_max_cu": + "description": |- + The maximum number of Compute Units. Minimum value is 0.5. + "autoscaling_limit_min_cu": + "description": |- + The minimum number of Compute Units. Minimum value is 0.5. + "disabled": + "description": |- + Whether to restrict connections to the compute endpoint. + Enabling this option schedules a suspend compute operation. + A disabled compute endpoint cannot be enabled by a connection or + console action. + "endpoint_type": + "description": |- + The endpoint type. A branch can only have one READ_WRITE endpoint. + "no_suspension": + "description": |- + When set to true, explicitly disables automatic suspension (never suspend). + Should be set to true when provided. + "settings": + "description": |- + A collection of settings for a compute endpoint. + "suspend_timeout_duration": + "description": |- + Duration of inactivity after which the compute endpoint is automatically suspended. + If specified should be between 60s and 604800s (1 minute to 1 week). +github.com/databricks/cli/bundle/config/resources.PostgresProject: + "default_endpoint_settings": + "description": |- + A collection of settings for a compute endpoint. + "display_name": + "description": |- + Human-readable project name. Length should be between 1 and 256 characters. + "history_retention_duration": + "description": |- + The number of seconds to retain the shared history for point in time recovery for all branches in this project. Value should be between 0s and 2592000s (up to 30 days). + "pg_version": + "description": |- + The major Postgres version number. Supported versions are 16 and 17. github.com/databricks/cli/bundle/config/resources.QualityMonitor: "assets_dir": "description": |- @@ -1002,7 +1025,6 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: - 2X-Large - 3X-Large - 4X-Large - - 5X-Large "creator_name": "description": |- warehouse creator name @@ -1360,8 +1382,6 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSec EXECUTE - |- USE_CONNECTION - - |- - MODIFY github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSecurableType: "_": "enum": @@ -1402,8 +1422,6 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeSize: MEDIUM - |- LARGE - - |- - LIQUID github.com/databricks/databricks-sdk-go/service/apps.ComputeState: "_": "enum": @@ -1494,60 +1512,6 @@ github.com/databricks/databricks-sdk-go/service/apps.GitSource: "tag": "description": |- Git tag to checkout. -github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: - "managed_resource_id": - "description": |- - Unique identifier included in the name of file events managed cloud resources. - "x-databricks-field-behaviors_output_only": |- - true - "queue_url": - "description": |- - The AQS queue url in the format https://sqs.{region}.amazonaws.com/{account id}/{queue name}. - Only required for provided_sqs. -github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: - "managed_resource_id": - "description": |- - Unique identifier included in the name of file events managed cloud resources. - "x-databricks-field-behaviors_output_only": |- - true - "queue_url": - "description": |- - The AQS queue url in the format https://{storage account}.queue.core.windows.net/{queue name} - Only required for provided_aqs. - "resource_group": - "description": |- - Optional resource group for the queue, event grid subscription, and external location storage - account. - Only required for locations with a service principal storage credential - "subscription_id": - "description": |- - Optional subscription id for the queue, event grid subscription, and external location storage - account. - Required for locations with a service principal storage credential -github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: - "_": - "description": |- - Encryption options that apply to clients connecting to cloud storage. - "sse_encryption_details": - "description": |- - Server-Side Encryption properties for clients communicating with AWS s3. -github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: - "managed_aqs": {} - "managed_pubsub": {} - "managed_sqs": {} - "provided_aqs": {} - "provided_pubsub": {} - "provided_sqs": {} -github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: - "managed_resource_id": - "description": |- - Unique identifier included in the name of file events managed cloud resources. - "x-databricks-field-behaviors_output_only": |- - true - "subscription_name": - "description": |- - The Pub/Sub subscription name in the format projects/{project}/subscriptions/{subscription name}. - Only required for provided_pubsub. github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: "pause_status": "description": |- @@ -1697,24 +1661,6 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "version_num": "description": |- Integer version number of the model version to which this alias points. -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: - "_": - "description": |- - Server-Side Encryption properties for clients communicating with AWS s3. - "algorithm": - "description": |- - Sets the value of the 'x-amz-server-side-encryption' header in S3 request. - "aws_kms_key_arn": - "description": |- - Optional. The ARN of the SSE-KMS key used with the S3 location, when algorithm = "SSE-KMS". - Sets the value of the 'x-amz-server-side-encryption-aws-kms-key-id' header. -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: - "_": - "enum": - - |- - AWS_SSE_S3 - - |- - AWS_SSE_KMS github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": "enum": @@ -3115,9 +3061,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: This field is deprecated "x-databricks-preview": |- PRIVATE - "sparse_checkout": - "x-databricks-preview": |- - PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: "job_cluster_key": "description": |- @@ -3648,10 +3591,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: Command-line parameters passed to spark submit. Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. -github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout: - "patterns": - "description": |- - List of patterns to include for sparse checkout. github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: "alert": "description": |- @@ -4245,10 +4184,6 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MYSQL - |- POSTGRESQL - - |- - REDSHIFT - - |- - SQLDW - |- SQLSERVER - |- @@ -4273,68 +4208,6 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: SHAREPOINT - |- DYNAMICS365 - - |- - CONFLUENCE - - |- - META_MARKETING - - |- - GOOGLE_ADS - - |- - TIKTOK_ADS - - |- - SALESFORCE_MARKETING_CLOUD - - |- - HUBSPOT - - |- - WORKDAY_HCM - - |- - GUIDEWIRE - - |- - ZENDESK - - |- - SLACK_AUDIT_LOGS - - |- - CROWDSTRIKE_EVENT_STREAM - - |- - WORKDAY_ACTIVITY_LOGGING - - |- - AKAMAI_WAF - - |- - VEEVA - - |- - VEEVA_VAULT - - |- - M365_AUDIT_LOGS - - |- - OKTA_SYSTEM_LOGS - - |- - ONE_PASSWORD_EVENT_LOGS - - |- - PROOFPOINT_SIEM - - |- - WIZ_AUDIT_LOGS - - |- - GITHUB - - |- - OUTLOOK - - |- - SMARTSHEET - - |- - MICROSOFT_TEAMS - - |- - ADOBE_CAMPAIGNS - - |- - LINKEDIN_ADS - - |- - X_ADS - - |- - BING_ADS - - |- - GOOGLE_SEARCH_CONSOLE - - |- - PINTEREST_ADS - - |- - REDDIT_ADS - |- FOREIGN_CATALOG github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} @@ -4744,13 +4617,6 @@ github.com/databricks/databricks-sdk-go/service/postgres.EndpointType: ENDPOINT_TYPE_READ_WRITE - |- ENDPOINT_TYPE_READ_ONLY -github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag: - "key": - "description": |- - The key of the custom tag. - "value": - "description": |- - The value of the custom tag. github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings: "_": "description": |- diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 4a2b7e8e67..9febc35111 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -260,30 +260,6 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermissionLeve CAN_USE - |- CAN_MANAGE -github.com/databricks/cli/bundle/config/resources.ExternalLocation: -github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: - "_": - "description": |- - Privilege to grant on an external location - "enum": - - |- - ALL_PRIVILEGES - - |- - CREATE_EXTERNAL_TABLE - - |- - CREATE_EXTERNAL_VOLUME - - |- - CREATE_MANAGED_STORAGE - - |- - CREATE_TABLE - - |- - CREATE_VOLUME - - |- - MANAGE - - |- - READ_FILES - - |- - WRITE_FILES github.com/databricks/cli/bundle/config/resources.Job: "_": "markdown_description": |- @@ -879,56 +855,6 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus: "description": |- PLACEHOLDER "state": {} -github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER - "resource_group": - "description": |- - PLACEHOLDER - "subscription_id": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: - "sse_encryption_details": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: - "managed_aqs": - "description": |- - PLACEHOLDER - "managed_pubsub": - "description": |- - PLACEHOLDER - "managed_sqs": - "description": |- - PLACEHOLDER - "provided_aqs": - "description": |- - PLACEHOLDER - "provided_pubsub": - "description": |- - PLACEHOLDER - "provided_sqs": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: - "managed_resource_id": - "description": |- - PLACEHOLDER - "subscription_name": - "description": |- - PLACEHOLDER github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: "granularities": "description": |- @@ -950,22 +876,6 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "schema_name": "description": |- PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: - "algorithm": - "description": |- - PLACEHOLDER - "aws_kms_key_arn": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: - "_": - "description": |- - SSE algorithm to use for encrypting S3 objects - "enum": - - |- - AWS_SSE_KMS - - |- - AWS_SSE_S3 github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: "availability": "description": |- @@ -1043,9 +953,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: "git_snapshot": "description": |- PLACEHOLDER - "sparse_checkout": - "description": |- - PLACEHOLDER github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: "spec": "description": |- @@ -1194,3 +1101,92 @@ github.com/databricks/databricks-sdk-go/service/sql.EndpointTags: "custom_tags": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER + "resource_group": + "description": |- + PLACEHOLDER + "subscription_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: + "sse_encryption_details": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: + "managed_aqs": + "description": |- + PLACEHOLDER + "managed_pubsub": + "description": |- + PLACEHOLDER + "managed_sqs": + "description": |- + PLACEHOLDER + "provided_aqs": + "description": |- + PLACEHOLDER + "provided_pubsub": + "description": |- + PLACEHOLDER + "provided_sqs": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: + "managed_resource_id": + "description": |- + PLACEHOLDER + "subscription_name": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: + "algorithm": + "description": |- + PLACEHOLDER + "aws_kms_key_arn": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: + "_": + "description": |- + SSE algorithm to use for encrypting S3 objects + "enum": + - |- + AWS_SSE_KMS + - |- + AWS_SSE_S3 +github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: + "_": + "description": |- + Privilege to grant on an external location + "enum": + - |- + ALL_PRIVILEGES + - |- + CREATE_EXTERNAL_TABLE + - |- + CREATE_EXTERNAL_VOLUME + - |- + CREATE_MANAGED_STORAGE + - |- + CREATE_TABLE + - |- + CREATE_VOLUME + - |- + MANAGE + - |- + READ_FILES + - |- + WRITE_FILES diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 98e973c78c..0d547a3b2d 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -209,12 +209,6 @@ "source_code_path": { "$ref": "#/$defs/string" }, - "space": { - "description": "Name of the space this app belongs to.", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true - }, "usage_policy_id": { "$ref": "#/$defs/string" }, @@ -1805,12 +1799,6 @@ { "type": "object", "properties": { - "budget_policy_id": { - "$ref": "#/$defs/string" - }, - "custom_tags": { - "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" - }, "default_endpoint_settings": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings" }, @@ -2193,7 +2181,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large\n- 5X-Large", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", "$ref": "#/$defs/string" }, "creator_name": { @@ -3679,8 +3667,7 @@ "WRITE_VOLUME", "SELECT", "EXECUTE", - "USE_CONNECTION", - "MODIFY" + "USE_CONNECTION" ] }, { @@ -3741,8 +3728,7 @@ "type": "string", "enum": [ "MEDIUM", - "LARGE", - "LIQUID" + "LARGE" ] }, { @@ -3872,6 +3858,9 @@ { "type": "object", "properties": { + "managed_resource_id": { + "$ref": "#/$defs/string" + }, "queue_url": { "$ref": "#/$defs/string" } @@ -3889,6 +3878,9 @@ { "type": "object", "properties": { + "managed_resource_id": { + "$ref": "#/$defs/string" + }, "queue_url": { "$ref": "#/$defs/string" }, @@ -3911,7 +3903,6 @@ "oneOf": [ { "type": "object", - "description": "Encryption options that apply to clients connecting to cloud storage.", "properties": { "sse_encryption_details": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails" @@ -3962,6 +3953,9 @@ { "type": "object", "properties": { + "managed_resource_id": { + "$ref": "#/$defs/string" + }, "subscription_name": { "$ref": "#/$defs/string" } @@ -4283,7 +4277,6 @@ "oneOf": [ { "type": "object", - "description": "Server-Side Encryption properties for clients communicating with AWS s3.", "properties": { "algorithm": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm" @@ -4306,8 +4299,6 @@ "type": "string", "description": "SSE algorithm to use for encrypting S3 objects", "enum": [ - "AWS_SSE_S3", - "AWS_SSE_KMS", "AWS_SSE_KMS", "AWS_SSE_S3" ] @@ -6173,11 +6164,6 @@ "git_url": { "description": "URL of the repository to be cloned by this job.", "$ref": "#/$defs/string" - }, - "sparse_checkout": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true } }, "additionalProperties": false, @@ -7100,24 +7086,6 @@ } ] }, - "jobs.SparseCheckout": { - "oneOf": [ - { - "type": "object", - "properties": { - "patterns": { - "description": "List of patterns to include for sparse checkout.", - "$ref": "#/$defs/slice/string" - } - }, - "additionalProperties": false - }, - { - "type": "string", - "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" - } - ] - }, "jobs.SqlTask": { "oneOf": [ { @@ -8181,8 +8149,6 @@ "enum": [ "MYSQL", "POSTGRESQL", - "REDSHIFT", - "SQLDW", "SQLSERVER", "SALESFORCE", "BIGQUERY", @@ -8195,37 +8161,6 @@ "TERADATA", "SHAREPOINT", "DYNAMICS365", - "CONFLUENCE", - "META_MARKETING", - "GOOGLE_ADS", - "TIKTOK_ADS", - "SALESFORCE_MARKETING_CLOUD", - "HUBSPOT", - "WORKDAY_HCM", - "GUIDEWIRE", - "ZENDESK", - "SLACK_AUDIT_LOGS", - "CROWDSTRIKE_EVENT_STREAM", - "WORKDAY_ACTIVITY_LOGGING", - "AKAMAI_WAF", - "VEEVA", - "VEEVA_VAULT", - "M365_AUDIT_LOGS", - "OKTA_SYSTEM_LOGS", - "ONE_PASSWORD_EVENT_LOGS", - "PROOFPOINT_SIEM", - "WIZ_AUDIT_LOGS", - "GITHUB", - "OUTLOOK", - "SMARTSHEET", - "MICROSOFT_TEAMS", - "ADOBE_CAMPAIGNS", - "LINKEDIN_ADS", - "X_ADS", - "BING_ADS", - "GOOGLE_SEARCH_CONSOLE", - "PINTEREST_ADS", - "REDDIT_ADS", "FOREIGN_CATALOG" ] }, @@ -8954,28 +8889,6 @@ } ] }, - "postgres.ProjectCustomTag": { - "oneOf": [ - { - "type": "object", - "properties": { - "key": { - "description": "The key of the custom tag.", - "$ref": "#/$defs/string" - }, - "value": { - "description": "The value of the custom tag.", - "$ref": "#/$defs/string" - } - }, - "additionalProperties": false - }, - { - "type": "string", - "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" - } - ] - }, "postgres.ProjectDefaultEndpointSettings": { "oneOf": [ { @@ -11776,20 +11689,6 @@ } ] }, - "postgres.ProjectCustomTag": { - "oneOf": [ - { - "type": "array", - "items": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" - } - }, - { - "type": "string", - "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" - } - ] - }, "serving.AiGatewayRateLimit": { "oneOf": [ { From 892ce4f4c20b55198ce2ee4d2590e881fbb7fee3 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 19 Feb 2026 13:17:23 +0100 Subject: [PATCH 7/7] Regenerate SDK bump changes via make generate Revert the prior manual regeneration path and regenerate from the canonical `make generate` flow for SDK v0.110.0, then apply minimal follow-up fixes for lint/schema/refschema parity and generated Python artifacts. Co-authored-by: Cursor --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + NEXT_CHANGELOG.md | 2 + acceptance/bundle/refschema/out.fields.txt | 20 + bundle/config/resources/quality_monitor.go | 1 + bundle/direct/dresources/external_location.go | 85 +-- bundle/direct/dresources/postgres_project.go | 2 + bundle/direct/dresources/quality_monitor.go | 4 + bundle/direct/dresources/type_test.go | 2 + bundle/internal/schema/annotations.yml | 6 + .../internal/schema/annotations_openapi.yml | 194 +++--- .../schema/annotations_openapi_overrides.yml | 182 +++--- bundle/schema/jsonschema.json | 86 ++- bundle/schema/jsonschema_for_docs.json | 285 ++++++++- cmd/account/cmd.go | 2 + cmd/account/endpoints/endpoints.go | 306 ++++++++++ cmd/account/workspaces/workspaces.go | 2 +- cmd/workspace/apps/apps.go | 550 ++++++++++++++++++ cmd/workspace/lakeview/lakeview.go | 2 + .../quality-monitor-v2/quality-monitor-v2.go | 29 +- .../quality-monitors/quality-monitors.go | 52 +- python/databricks/bundles/jobs/__init__.py | 8 + .../bundles/jobs/_models/dashboard_task.py | 5 +- .../bundles/jobs/_models/git_source.py | 14 + .../bundles/jobs/_models/sparse_checkout.py | 40 ++ 25 files changed, 1625 insertions(+), 257 deletions(-) create mode 100755 cmd/account/endpoints/endpoints.go create mode 100644 python/databricks/bundles/jobs/_models/sparse_checkout.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3cb3b7da03..d1f13600ac 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -69a174b6c47c5e1039a5f14271440c10e33998ce \ No newline at end of file +281b4455821119945fcc4c850cf2cfad03e23c6c \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index f7b511de6c..e9f412073d 100755 --- a/.gitattributes +++ b/.gitattributes @@ -9,6 +9,7 @@ cmd/account/custom-app-integration/custom-app-integration.go linguist-generated= cmd/account/disable-legacy-features/disable-legacy-features.go linguist-generated=true cmd/account/enable-ip-access-lists/enable-ip-access-lists.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true +cmd/account/endpoints/endpoints.go linguist-generated=true cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true cmd/account/federation-policy/federation-policy.go linguist-generated=true cmd/account/groups-v2/groups-v2.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 7ba75974b9..9be70db18e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,5 @@ ### Dependency updates * Upgrade Go SDK to v0.106.0 (([#4486](https://github.com/databricks/cli/pull/4486))) * Upgrade Terraform provider to v1.106.0 (([#4542](https://github.com/databricks/cli/pull/4542))) + +### API Changes diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 03f183ad0a..701d218202 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -196,6 +196,7 @@ resources.apps.*.service_principal_client_id string ALL resources.apps.*.service_principal_id int64 ALL resources.apps.*.service_principal_name string ALL resources.apps.*.source_code_path string INPUT +resources.apps.*.space string ALL resources.apps.*.update_time string ALL resources.apps.*.updater string ALL resources.apps.*.url string ALL @@ -668,6 +669,7 @@ resources.external_locations.*.created_at int64 REMOTE resources.external_locations.*.created_by string REMOTE resources.external_locations.*.credential_id string REMOTE resources.external_locations.*.credential_name string ALL +resources.external_locations.*.effective_enable_file_events bool ALL resources.external_locations.*.enable_file_events bool ALL resources.external_locations.*.encryption_details *catalog.EncryptionDetails ALL resources.external_locations.*.encryption_details.sse_encryption_details *catalog.SseEncryptionDetails ALL @@ -771,6 +773,9 @@ resources.jobs.*.git_source.job_source *jobs.JobSource ALL resources.jobs.*.git_source.job_source.dirty_state jobs.JobSourceDirtyState ALL resources.jobs.*.git_source.job_source.import_from_git_branch string ALL resources.jobs.*.git_source.job_source.job_config_path string ALL +resources.jobs.*.git_source.sparse_checkout *jobs.SparseCheckout ALL +resources.jobs.*.git_source.sparse_checkout.patterns []string ALL +resources.jobs.*.git_source.sparse_checkout.patterns[*] string ALL resources.jobs.*.health *jobs.JobsHealthRules ALL resources.jobs.*.health.rules []jobs.JobsHealthRule ALL resources.jobs.*.health.rules[*] jobs.JobsHealthRule ALL @@ -2573,7 +2578,12 @@ resources.postgres_endpoints.*.suspend_timeout_duration *duration.Duration INPUT resources.postgres_endpoints.*.uid string REMOTE resources.postgres_endpoints.*.update_time *time.Time REMOTE resources.postgres_endpoints.*.url string INPUT +resources.postgres_projects.*.budget_policy_id string INPUT STATE resources.postgres_projects.*.create_time *time.Time REMOTE +resources.postgres_projects.*.custom_tags []postgres.ProjectCustomTag INPUT STATE +resources.postgres_projects.*.custom_tags[*] postgres.ProjectCustomTag INPUT STATE +resources.postgres_projects.*.custom_tags[*].key string INPUT STATE +resources.postgres_projects.*.custom_tags[*].value string INPUT STATE resources.postgres_projects.*.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_max_cu float64 INPUT STATE resources.postgres_projects.*.default_endpoint_settings.autoscaling_limit_min_cu float64 INPUT STATE @@ -2591,6 +2601,11 @@ resources.postgres_projects.*.name string REMOTE resources.postgres_projects.*.pg_version int INPUT STATE resources.postgres_projects.*.project_id string INPUT STATE resources.postgres_projects.*.spec *postgres.ProjectSpec REMOTE +resources.postgres_projects.*.spec.budget_policy_id string REMOTE +resources.postgres_projects.*.spec.custom_tags []postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.spec.custom_tags[*] postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.spec.custom_tags[*].key string REMOTE +resources.postgres_projects.*.spec.custom_tags[*].value string REMOTE resources.postgres_projects.*.spec.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.spec.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE @@ -2603,6 +2618,11 @@ resources.postgres_projects.*.spec.history_retention_duration *duration.Duration resources.postgres_projects.*.spec.pg_version int REMOTE resources.postgres_projects.*.status *postgres.ProjectStatus REMOTE resources.postgres_projects.*.status.branch_logical_size_limit_bytes int64 REMOTE +resources.postgres_projects.*.status.budget_policy_id string REMOTE +resources.postgres_projects.*.status.custom_tags []postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.status.custom_tags[*] postgres.ProjectCustomTag REMOTE +resources.postgres_projects.*.status.custom_tags[*].key string REMOTE +resources.postgres_projects.*.status.custom_tags[*].value string REMOTE resources.postgres_projects.*.status.default_endpoint_settings *postgres.ProjectDefaultEndpointSettings REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_max_cu float64 REMOTE resources.postgres_projects.*.status.default_endpoint_settings.autoscaling_limit_min_cu float64 REMOTE diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index f373676d45..da871ed148 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -30,6 +30,7 @@ func (s QualityMonitor) MarshalJSON() ([]byte, error) { } func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + //nolint:staticcheck // Bundle resource still uses legacy QualityMonitors API until direct migration is complete. _, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) diff --git a/bundle/direct/dresources/external_location.go b/bundle/direct/dresources/external_location.go index 2d0591e180..9d082d76aa 100644 --- a/bundle/direct/dresources/external_location.go +++ b/bundle/direct/dresources/external_location.go @@ -23,17 +23,18 @@ func (*ResourceExternalLocation) PrepareState(input *resources.ExternalLocation) func (*ResourceExternalLocation) RemapState(info *catalog.ExternalLocationInfo) *catalog.CreateExternalLocation { return &catalog.CreateExternalLocation{ - Comment: info.Comment, - CredentialName: info.CredentialName, - EnableFileEvents: info.EnableFileEvents, - EncryptionDetails: info.EncryptionDetails, - Fallback: info.Fallback, - FileEventQueue: info.FileEventQueue, - Name: info.Name, - ReadOnly: info.ReadOnly, - SkipValidation: false, // This is an input-only parameter, never returned by API - Url: info.Url, - ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields), + Comment: info.Comment, + CredentialName: info.CredentialName, + EffectiveEnableFileEvents: false, // Output-only; do not persist to desired state. + EnableFileEvents: info.EnableFileEvents, + EncryptionDetails: info.EncryptionDetails, + Fallback: info.Fallback, + FileEventQueue: info.FileEventQueue, + Name: info.Name, + ReadOnly: info.ReadOnly, + SkipValidation: false, // This is an input-only parameter, never returned by API + Url: info.Url, + ForceSendFields: utils.FilterFields[catalog.CreateExternalLocation](info.ForceSendFields, "EffectiveEnableFileEvents"), } } @@ -52,21 +53,22 @@ func (r *ResourceExternalLocation) DoCreate(ctx context.Context, config *catalog // DoUpdate updates the external location in place and returns remote state. func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, config *catalog.CreateExternalLocation, _ Changes) (*catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Only set if name actually changes (see DoUpdateWithID) - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EffectiveEnableFileEvents: false, // Output-only field, should never be sent in update payload. + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Only set if name actually changes (see DoUpdateWithID) + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner", "EffectiveEnableFileEvents"), } return r.client.ExternalLocations.Update(ctx, updateRequest) @@ -75,21 +77,22 @@ func (r *ResourceExternalLocation) DoUpdate(ctx context.Context, id string, conf // DoUpdateWithID updates the external location and returns the new ID if the name changes. func (r *ResourceExternalLocation) DoUpdateWithID(ctx context.Context, id string, config *catalog.CreateExternalLocation) (string, *catalog.ExternalLocationInfo, error) { updateRequest := catalog.UpdateExternalLocation{ - Comment: config.Comment, - CredentialName: config.CredentialName, - EnableFileEvents: config.EnableFileEvents, - EncryptionDetails: config.EncryptionDetails, - Fallback: config.Fallback, - FileEventQueue: config.FileEventQueue, - Force: false, - IsolationMode: "", // Not supported by DABs - Name: id, - NewName: "", // Initialized below if needed - Owner: "", // Not supported by DABs - ReadOnly: config.ReadOnly, - SkipValidation: config.SkipValidation, - Url: config.Url, - ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner"), + Comment: config.Comment, + CredentialName: config.CredentialName, + EffectiveEnableFileEvents: false, // Output-only field, should never be sent in update payload. + EnableFileEvents: config.EnableFileEvents, + EncryptionDetails: config.EncryptionDetails, + Fallback: config.Fallback, + FileEventQueue: config.FileEventQueue, + Force: false, + IsolationMode: "", // Not supported by DABs + Name: id, + NewName: "", // Initialized below if needed + Owner: "", // Not supported by DABs + ReadOnly: config.ReadOnly, + SkipValidation: config.SkipValidation, + Url: config.Url, + ForceSendFields: utils.FilterFields[catalog.UpdateExternalLocation](config.ForceSendFields, "IsolationMode", "Owner", "EffectiveEnableFileEvents"), } if config.Name != id { diff --git a/bundle/direct/dresources/postgres_project.go b/bundle/direct/dresources/postgres_project.go index e08ba9e603..bdfaf40595 100644 --- a/bundle/direct/dresources/postgres_project.go +++ b/bundle/direct/dresources/postgres_project.go @@ -38,6 +38,8 @@ func (*ResourcePostgresProject) RemapState(remote *postgres.Project) *PostgresPr // This means we cannot detect remote drift for spec fields. // Use an empty struct (not nil) so field-level diffing works correctly. ProjectSpec: postgres.ProjectSpec{ + BudgetPolicyId: "", + CustomTags: nil, DefaultEndpointSettings: nil, DisplayName: "", HistoryRetentionDuration: nil, diff --git a/bundle/direct/dresources/quality_monitor.go b/bundle/direct/dresources/quality_monitor.go index e9e85d779e..022fd87077 100644 --- a/bundle/direct/dresources/quality_monitor.go +++ b/bundle/direct/dresources/quality_monitor.go @@ -66,6 +66,7 @@ func (*ResourceQualityMonitor) RemapState(info *catalog.MonitorInfo) *QualityMon } func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalog.MonitorInfo, error) { + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. return r.client.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ TableName: id, }) @@ -74,6 +75,7 @@ func (r *ResourceQualityMonitor) DoRead(ctx context.Context, id string) (*catalo func (r *ResourceQualityMonitor) DoCreate(ctx context.Context, config *QualityMonitorState) (string, *catalog.MonitorInfo, error) { req := config.CreateMonitor req.TableName = config.TableName + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Create(ctx, req) if err != nil || response == nil { return "", nil, err @@ -99,6 +101,7 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config ForceSendFields: utils.FilterFields[catalog.UpdateMonitor](config.ForceSendFields), } + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. response, err := r.client.QualityMonitors.Update(ctx, updateRequest) if err != nil { return nil, err @@ -108,6 +111,7 @@ func (r *ResourceQualityMonitor) DoUpdate(ctx context.Context, id string, config } func (r *ResourceQualityMonitor) DoDelete(ctx context.Context, id string) error { + //nolint:staticcheck // Direct resource still uses legacy QualityMonitors API until migration to data-quality API. _, err := r.client.QualityMonitors.Delete(ctx, catalog.DeleteQualityMonitorRequest{ TableName: id, }) diff --git a/bundle/direct/dresources/type_test.go b/bundle/direct/dresources/type_test.go index 643f27423e..e0806f1233 100644 --- a/bundle/direct/dresources/type_test.go +++ b/bundle/direct/dresources/type_test.go @@ -64,6 +64,8 @@ var knownMissingInRemoteType = map[string][]string{ "suspend_timeout_duration", }, "postgres_projects": { + "budget_policy_id", + "custom_tags", "default_endpoint_settings", "display_name", "history_retention_duration", diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 7c8e98d337..a407be9260 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -924,9 +924,15 @@ github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.PostgresProject: + "budget_policy_id": + "description": |- + PLACEHOLDER "create_time": "description": |- PLACEHOLDER + "custom_tags": + "description": |- + PLACEHOLDER "default_endpoint_settings": "description": |- PLACEHOLDER diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 231b18d8fd..bd95cd1c4a 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -150,6 +150,11 @@ github.com/databricks/cli/bundle/config/resources.App: "service_principal_name": "x-databricks-field-behaviors_output_only": |- true + "space": + "description": |- + Name of the space this app belongs to. + "x-databricks-preview": |- + PRIVATE "update_time": "description": |- The update time of the app. Formatted timestamp in ISO 6801. @@ -548,6 +553,43 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "usage_policy_id": "description": |- The desired usage policy to associate with the instance. +github.com/databricks/cli/bundle/config/resources.ExternalLocation: + "comment": + "description": |- + User-provided free-form text description. + "credential_name": + "description": |- + Name of the storage credential used with this location. + "effective_enable_file_events": + "description": |- + The effective value of `enable_file_events` after applying server-side defaults. + "x-databricks-field-behaviors_output_only": |- + true + "enable_file_events": + "description": |- + Whether to enable file events on this external location. Default to `true`. Set to `false` to disable file events. + The actual applied value may differ due to server-side defaults; check `effective_enable_file_events` for the effective state. + "encryption_details": + "description": |- + Encryption options that apply to clients connecting to cloud storage. + "fallback": + "description": |- + Indicates whether fallback mode is enabled for this external location. When fallback mode is enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. + "file_event_queue": + "description": |- + File event queue settings. If `enable_file_events` is not `false`, must be defined and have exactly one of the documented properties. + "name": + "description": |- + Name of the external location. + "read_only": + "description": |- + Indicates whether the external location is read-only. + "skip_validation": + "description": |- + Skips validation of the storage credential associated with the external location. + "url": + "description": |- + Path URL of the external location. github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -815,71 +857,6 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: Usage policy of this pipeline. "x-databricks-preview": |- PRIVATE -github.com/databricks/cli/bundle/config/resources.PostgresBranch: - "expire_time": - "description": |- - Absolute expiration timestamp. When set, the branch will expire at this time. - "is_protected": - "description": |- - When set to true, protects the branch from deletion and reset. Associated compute endpoints and the project cannot be deleted while the branch is protected. - "no_expiry": - "description": |- - Explicitly disable expiration. When set to true, the branch will not expire. - If set to false, the request is invalid; provide either ttl or expire_time instead. - "source_branch": - "description": |- - The name of the source branch from which this branch was created (data lineage for point-in-time recovery). - If not specified, defaults to the project's default branch. - Format: projects/{project_id}/branches/{branch_id} - "source_branch_lsn": - "description": |- - The Log Sequence Number (LSN) on the source branch from which this branch was created. - "source_branch_time": - "description": |- - The point in time on the source branch from which this branch was created. - "ttl": - "description": |- - Relative time-to-live duration. When set, the branch will expire at creation_time + ttl. -github.com/databricks/cli/bundle/config/resources.PostgresEndpoint: - "autoscaling_limit_max_cu": - "description": |- - The maximum number of Compute Units. Minimum value is 0.5. - "autoscaling_limit_min_cu": - "description": |- - The minimum number of Compute Units. Minimum value is 0.5. - "disabled": - "description": |- - Whether to restrict connections to the compute endpoint. - Enabling this option schedules a suspend compute operation. - A disabled compute endpoint cannot be enabled by a connection or - console action. - "endpoint_type": - "description": |- - The endpoint type. A branch can only have one READ_WRITE endpoint. - "no_suspension": - "description": |- - When set to true, explicitly disables automatic suspension (never suspend). - Should be set to true when provided. - "settings": - "description": |- - A collection of settings for a compute endpoint. - "suspend_timeout_duration": - "description": |- - Duration of inactivity after which the compute endpoint is automatically suspended. - If specified should be between 60s and 604800s (1 minute to 1 week). -github.com/databricks/cli/bundle/config/resources.PostgresProject: - "default_endpoint_settings": - "description": |- - A collection of settings for a compute endpoint. - "display_name": - "description": |- - Human-readable project name. Length should be between 1 and 256 characters. - "history_retention_duration": - "description": |- - The number of seconds to retain the shared history for point in time recovery for all branches in this project. Value should be between 0s and 2592000s (up to 30 days). - "pg_version": - "description": |- - The major Postgres version number. Supported versions are 16 and 17. github.com/databricks/cli/bundle/config/resources.QualityMonitor: "assets_dir": "description": |- @@ -1025,6 +1002,7 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: - 2X-Large - 3X-Large - 4X-Large + - 5X-Large "creator_name": "description": |- warehouse creator name @@ -1512,6 +1490,60 @@ github.com/databricks/databricks-sdk-go/service/apps.GitSource: "tag": "description": |- Git tag to checkout. +github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "queue_url": + "description": |- + The AQS queue url in the format https://sqs.{region}.amazonaws.com/{account id}/{queue name}. + Only required for provided_sqs. +github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "queue_url": + "description": |- + The AQS queue url in the format https://{storage account}.queue.core.windows.net/{queue name} + Only required for provided_aqs. + "resource_group": + "description": |- + Optional resource group for the queue, event grid subscription, and external location storage + account. + Only required for locations with a service principal storage credential + "subscription_id": + "description": |- + Optional subscription id for the queue, event grid subscription, and external location storage + account. + Required for locations with a service principal storage credential +github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: + "_": + "description": |- + Encryption options that apply to clients connecting to cloud storage. + "sse_encryption_details": + "description": |- + Server-Side Encryption properties for clients communicating with AWS s3. +github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: + "managed_aqs": {} + "managed_pubsub": {} + "managed_sqs": {} + "provided_aqs": {} + "provided_pubsub": {} + "provided_sqs": {} +github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: + "managed_resource_id": + "description": |- + Unique identifier included in the name of file events managed cloud resources. + "x-databricks-field-behaviors_output_only": |- + true + "subscription_name": + "description": |- + The Pub/Sub subscription name in the format projects/{project}/subscriptions/{subscription name}. + Only required for provided_pubsub. github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: "pause_status": "description": |- @@ -1661,6 +1693,24 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "version_num": "description": |- Integer version number of the model version to which this alias points. +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: + "_": + "description": |- + Server-Side Encryption properties for clients communicating with AWS s3. + "algorithm": + "description": |- + Sets the value of the 'x-amz-server-side-encryption' header in S3 request. + "aws_kms_key_arn": + "description": |- + Optional. The ARN of the SSE-KMS key used with the S3 location, when algorithm = "SSE-KMS". + Sets the value of the 'x-amz-server-side-encryption-aws-kms-key-id' header. +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: + "_": + "enum": + - |- + AWS_SSE_S3 + - |- + AWS_SSE_KMS github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": "enum": @@ -3061,6 +3111,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: This field is deprecated "x-databricks-preview": |- PRIVATE + "sparse_checkout": + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.JobCluster: "job_cluster_key": "description": |- @@ -3591,6 +3644,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask: Command-line parameters passed to spark submit. Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. +github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout: + "patterns": + "description": |- + List of patterns to include for sparse checkout. github.com/databricks/databricks-sdk-go/service/jobs.SqlTask: "alert": "description": |- @@ -4617,6 +4674,13 @@ github.com/databricks/databricks-sdk-go/service/postgres.EndpointType: ENDPOINT_TYPE_READ_WRITE - |- ENDPOINT_TYPE_READ_ONLY +github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag: + "key": + "description": |- + The key of the custom tag. + "value": + "description": |- + The value of the custom tag. github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings: "_": "description": |- diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 9febc35111..4a2b7e8e67 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -260,6 +260,30 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermissionLeve CAN_USE - |- CAN_MANAGE +github.com/databricks/cli/bundle/config/resources.ExternalLocation: +github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: + "_": + "description": |- + Privilege to grant on an external location + "enum": + - |- + ALL_PRIVILEGES + - |- + CREATE_EXTERNAL_TABLE + - |- + CREATE_EXTERNAL_VOLUME + - |- + CREATE_MANAGED_STORAGE + - |- + CREATE_TABLE + - |- + CREATE_VOLUME + - |- + MANAGE + - |- + READ_FILES + - |- + WRITE_FILES github.com/databricks/cli/bundle/config/resources.Job: "_": "markdown_description": |- @@ -855,6 +879,56 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus: "description": |- PLACEHOLDER "state": {} +github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: + "managed_resource_id": + "description": |- + PLACEHOLDER + "queue_url": + "description": |- + PLACEHOLDER + "resource_group": + "description": |- + PLACEHOLDER + "subscription_id": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: + "sse_encryption_details": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: + "managed_aqs": + "description": |- + PLACEHOLDER + "managed_pubsub": + "description": |- + PLACEHOLDER + "managed_sqs": + "description": |- + PLACEHOLDER + "provided_aqs": + "description": |- + PLACEHOLDER + "provided_pubsub": + "description": |- + PLACEHOLDER + "provided_sqs": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: + "managed_resource_id": + "description": |- + PLACEHOLDER + "subscription_name": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: "granularities": "description": |- @@ -876,6 +950,22 @@ github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: "schema_name": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: + "algorithm": + "description": |- + PLACEHOLDER + "aws_kms_key_arn": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: + "_": + "description": |- + SSE algorithm to use for encrypting S3 objects + "enum": + - |- + AWS_SSE_KMS + - |- + AWS_SSE_S3 github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: "availability": "description": |- @@ -953,6 +1043,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.GitSource: "git_snapshot": "description": |- PLACEHOLDER + "sparse_checkout": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/jobs.JobEnvironment: "spec": "description": |- @@ -1101,92 +1194,3 @@ github.com/databricks/databricks-sdk-go/service/sql.EndpointTags: "custom_tags": "description": |- PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage: - "managed_resource_id": - "description": |- - PLACEHOLDER - "queue_url": - "description": |- - PLACEHOLDER - "resource_group": - "description": |- - PLACEHOLDER - "subscription_id": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails: - "sse_encryption_details": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue: - "managed_aqs": - "description": |- - PLACEHOLDER - "managed_pubsub": - "description": |- - PLACEHOLDER - "managed_sqs": - "description": |- - PLACEHOLDER - "provided_aqs": - "description": |- - PLACEHOLDER - "provided_pubsub": - "description": |- - PLACEHOLDER - "provided_sqs": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub: - "managed_resource_id": - "description": |- - PLACEHOLDER - "subscription_name": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails: - "algorithm": - "description": |- - PLACEHOLDER - "aws_kms_key_arn": - "description": |- - PLACEHOLDER -github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm: - "_": - "description": |- - SSE algorithm to use for encrypting S3 objects - "enum": - - |- - AWS_SSE_KMS - - |- - AWS_SSE_S3 -github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege: - "_": - "description": |- - Privilege to grant on an external location - "enum": - - |- - ALL_PRIVILEGES - - |- - CREATE_EXTERNAL_TABLE - - |- - CREATE_EXTERNAL_VOLUME - - |- - CREATE_MANAGED_STORAGE - - |- - CREATE_TABLE - - |- - CREATE_VOLUME - - |- - MANAGE - - |- - READ_FILES - - |- - WRITE_FILES diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 0d547a3b2d..c1c96d7713 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -209,6 +209,12 @@ "source_code_path": { "$ref": "#/$defs/string" }, + "space": { + "description": "Name of the space this app belongs to.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "usage_policy_id": { "$ref": "#/$defs/string" }, @@ -1799,6 +1805,12 @@ { "type": "object", "properties": { + "budget_policy_id": { + "$ref": "#/$defs/string" + }, + "custom_tags": { + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + }, "default_endpoint_settings": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings" }, @@ -2181,7 +2193,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large\n- 5X-Large", "$ref": "#/$defs/string" }, "creator_name": { @@ -3858,9 +3870,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "queue_url": { "$ref": "#/$defs/string" } @@ -3878,9 +3887,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "queue_url": { "$ref": "#/$defs/string" }, @@ -3903,6 +3909,7 @@ "oneOf": [ { "type": "object", + "description": "Encryption options that apply to clients connecting to cloud storage.", "properties": { "sse_encryption_details": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails" @@ -3953,9 +3960,6 @@ { "type": "object", "properties": { - "managed_resource_id": { - "$ref": "#/$defs/string" - }, "subscription_name": { "$ref": "#/$defs/string" } @@ -4277,6 +4281,7 @@ "oneOf": [ { "type": "object", + "description": "Server-Side Encryption properties for clients communicating with AWS s3.", "properties": { "algorithm": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm" @@ -4299,6 +4304,8 @@ "type": "string", "description": "SSE algorithm to use for encrypting S3 objects", "enum": [ + "AWS_SSE_S3", + "AWS_SSE_KMS", "AWS_SSE_KMS", "AWS_SSE_S3" ] @@ -6164,6 +6171,11 @@ "git_url": { "description": "URL of the repository to be cloned by this job.", "$ref": "#/$defs/string" + }, + "sparse_checkout": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false, @@ -7086,6 +7098,24 @@ } ] }, + "jobs.SparseCheckout": { + "oneOf": [ + { + "type": "object", + "properties": { + "patterns": { + "description": "List of patterns to include for sparse checkout.", + "$ref": "#/$defs/slice/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.SqlTask": { "oneOf": [ { @@ -8889,6 +8919,28 @@ } ] }, + "postgres.ProjectCustomTag": { + "oneOf": [ + { + "type": "object", + "properties": { + "key": { + "description": "The key of the custom tag.", + "$ref": "#/$defs/string" + }, + "value": { + "description": "The value of the custom tag.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "postgres.ProjectDefaultEndpointSettings": { "oneOf": [ { @@ -11689,6 +11741,20 @@ } ] }, + "postgres.ProjectCustomTag": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { "oneOf": [ { diff --git a/bundle/schema/jsonschema_for_docs.json b/bundle/schema/jsonschema_for_docs.json index 5d6ca3b1cd..66efb25787 100644 --- a/bundle/schema/jsonschema_for_docs.json +++ b/bundle/schema/jsonschema_for_docs.json @@ -162,6 +162,12 @@ "$ref": "#/$defs/string", "x-since-version": "v0.239.0" }, + "space": { + "description": "Name of the space this app belongs to.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "usage_policy_id": { "$ref": "#/$defs/string", "x-since-version": "v0.283.0" @@ -811,6 +817,98 @@ "CAN_MANAGE" ] }, + "resources.ExternalLocation": { + "type": "object", + "properties": { + "comment": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "credential_name": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "enable_file_events": { + "$ref": "#/$defs/bool", + "x-since-version": "v0.289.0" + }, + "encryption_details": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.EncryptionDetails", + "x-since-version": "v0.289.0" + }, + "fallback": { + "$ref": "#/$defs/bool", + "x-since-version": "v0.289.0" + }, + "file_event_queue": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.FileEventQueue", + "x-since-version": "v0.289.0" + }, + "grants": { + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.ExternalLocationGrant", + "x-since-version": "v0.289.0" + }, + "lifecycle": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle", + "x-since-version": "v0.289.0" + }, + "name": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "read_only": { + "$ref": "#/$defs/bool", + "x-since-version": "v0.289.0" + }, + "skip_validation": { + "$ref": "#/$defs/bool", + "x-since-version": "v0.289.0" + }, + "url": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false, + "required": [ + "credential_name", + "name", + "url" + ] + }, + "resources.ExternalLocationGrant": { + "type": "object", + "properties": { + "principal": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "privileges": { + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false, + "required": [ + "privileges", + "principal" + ] + }, + "resources.ExternalLocationGrantPrivilege": { + "type": "string", + "description": "Privilege to grant on an external location", + "enum": [ + "ALL_PRIVILEGES", + "CREATE_EXTERNAL_TABLE", + "CREATE_EXTERNAL_VOLUME", + "CREATE_MANAGED_STORAGE", + "CREATE_TABLE", + "CREATE_VOLUME", + "MANAGE", + "READ_FILES", + "WRITE_FILES" + ] + }, "resources.Grant": { "type": "object", "properties": { @@ -1573,6 +1671,12 @@ "resources.PostgresProject": { "type": "object", "properties": { + "budget_policy_id": { + "$ref": "#/$defs/string" + }, + "custom_tags": { + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + }, "default_endpoint_settings": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectDefaultEndpointSettings", "x-since-version": "v0.287.0" @@ -1941,7 +2045,7 @@ "x-since-version": "v0.260.0" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large\n- 5X-Large", "$ref": "#/$defs/string", "x-since-version": "v0.260.0" }, @@ -2591,6 +2695,10 @@ "markdownDescription": "The experiment definitions for the bundle, where each key is the name of the experiment. See [experiments](https://docs.databricks.com/dev-tools/bundles/resources.html#experiments).", "x-since-version": "v0.229.0" }, + "external_locations": { + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.ExternalLocation", + "x-since-version": "v0.289.0" + }, "jobs": { "description": "The job definitions for the bundle, where each key is the name of the job.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Job", @@ -3361,6 +3469,85 @@ }, "additionalProperties": false }, + "catalog.AwsSqsQueue": { + "type": "object", + "properties": { + "queue_url": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, + "catalog.AzureQueueStorage": { + "type": "object", + "properties": { + "queue_url": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "resource_group": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + }, + "subscription_id": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, + "catalog.EncryptionDetails": { + "type": "object", + "description": "Encryption options that apply to clients connecting to cloud storage.", + "properties": { + "sse_encryption_details": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetails", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, + "catalog.FileEventQueue": { + "type": "object", + "properties": { + "managed_aqs": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage", + "x-since-version": "v0.289.0" + }, + "managed_pubsub": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub", + "x-since-version": "v0.289.0" + }, + "managed_sqs": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue", + "x-since-version": "v0.289.0" + }, + "provided_aqs": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.AzureQueueStorage", + "x-since-version": "v0.289.0" + }, + "provided_pubsub": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.GcpPubsub", + "x-since-version": "v0.289.0" + }, + "provided_sqs": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.AwsSqsQueue", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, + "catalog.GcpPubsub": { + "type": "object", + "properties": { + "subscription_name": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, "catalog.MonitorCronSchedule": { "type": "object", "properties": { @@ -3597,6 +3784,31 @@ }, "additionalProperties": false }, + "catalog.SseEncryptionDetails": { + "type": "object", + "description": "Server-Side Encryption properties for clients communicating with AWS s3.", + "properties": { + "algorithm": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.SseEncryptionDetailsAlgorithm", + "x-since-version": "v0.289.0" + }, + "aws_kms_key_arn": { + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" + } + }, + "additionalProperties": false + }, + "catalog.SseEncryptionDetailsAlgorithm": { + "type": "string", + "description": "SSE algorithm to use for encrypting S3 objects", + "enum": [ + "AWS_SSE_S3", + "AWS_SSE_KMS", + "AWS_SSE_KMS", + "AWS_SSE_S3" + ] + }, "catalog.VolumeType": { "type": "string", "enum": [ @@ -4020,7 +4232,8 @@ "properties": { "base_environment": { "description": "The `base_environment` key refers to an `env.yaml` file that specifies an environment version and a collection of dependencies required for the environment setup.\nThis `env.yaml` file may itself include a `base_environment` reference pointing to another `env_1.yaml` file. However, when used as a base environment, `env_1.yaml` (or further nested references) will not be processed or included in the final environment, meaning that the resolution of `base_environment` references is not recursive.", - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "x-since-version": "v0.289.0" }, "client": { "description": "Use `environment_version` instead.", @@ -4683,7 +4896,8 @@ "properties": { "hardware_accelerator": { "description": "Hardware accelerator configuration for Serverless GPU workloads.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.HardwareAcceleratorType" + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.HardwareAcceleratorType", + "x-since-version": "v0.288.0" } }, "additionalProperties": false @@ -4810,7 +5024,8 @@ "description": "Dashboard task parameters. Used to apply dashboard filter values during dashboard task execution. Parameter values get applied to any dashboard filters that have a matching URL identifier as the parameter key.\nThe parameter value format is dependent on the filter type:\n- For text and single-select filters, provide a single value (e.g. `\"value\"`)\n- For date and datetime filters, provide the value in ISO 8601 format (e.g. `\"2000-01-01T00:00:00\"`)\n- For multi-select filters, provide a JSON array of values (e.g. `\"[\\\"value1\\\",\\\"value2\\\"]\"`)\n- For range and date range filters, provide a JSON object with `start` and `end` (e.g. `\"{\\\"start\\\":\\\"1\\\",\\\"end\\\":\\\"10\\\"}\"`)", "$ref": "#/$defs/map/string", "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "doNotSuggest": true, + "x-since-version": "v0.289.0" }, "subscription": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Subscription", @@ -5058,6 +5273,11 @@ "description": "URL of the repository to be cloned by this job.", "$ref": "#/$defs/string", "x-since-version": "v0.229.0" + }, + "sparse_checkout": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparseCheckout", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false, @@ -5771,6 +5991,16 @@ }, "additionalProperties": false }, + "jobs.SparseCheckout": { + "type": "object", + "properties": { + "patterns": { + "description": "List of patterns to include for sparse checkout.", + "$ref": "#/$defs/slice/string" + } + }, + "additionalProperties": false + }, "jobs.SqlTask": { "type": "object", "properties": { @@ -5993,7 +6223,8 @@ }, "compute": { "description": "Task level compute configuration.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Compute" + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.Compute", + "x-since-version": "v0.288.0" }, "condition_task": { "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", @@ -7298,6 +7529,20 @@ "ENDPOINT_TYPE_READ_ONLY" ] }, + "postgres.ProjectCustomTag": { + "type": "object", + "properties": { + "key": { + "description": "The key of the custom tag.", + "$ref": "#/$defs/string" + }, + "value": { + "description": "The value of the custom tag.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, "postgres.ProjectDefaultEndpointSettings": { "type": "object", "description": "A collection of settings for a compute endpoint.", @@ -8068,7 +8313,8 @@ "properties": { "burst_scaling_enabled": { "description": "Whether burst scaling is enabled. When enabled (default), the endpoint can automatically\nscale up beyond provisioned capacity to handle traffic spikes. When disabled, the endpoint\nmaintains fixed capacity at provisioned_model_units.", - "$ref": "#/$defs/bool" + "$ref": "#/$defs/bool", + "x-since-version": "v0.288.0" }, "entity_name": { "description": "The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type FEATURE_SPEC in the UC. If it is a UC object, the full name of the object should be given in the form of **catalog_name.schema_name.model_name**.", @@ -8147,7 +8393,8 @@ "properties": { "burst_scaling_enabled": { "description": "Whether burst scaling is enabled. When enabled (default), the endpoint can automatically\nscale up beyond provisioned capacity to handle traffic spikes. When disabled, the endpoint\nmaintains fixed capacity at provisioned_model_units.", - "$ref": "#/$defs/bool" + "$ref": "#/$defs/bool", + "x-since-version": "v0.288.0" }, "environment_vars": { "description": "An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: `{\"OPENAI_API_KEY\": \"{{secrets/my_scope/my_key}}\", \"DATABRICKS_TOKEN\": \"{{secrets/my_scope2/my_key2}}\"}`", @@ -8619,6 +8866,12 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.DatabaseInstance" } }, + "resources.ExternalLocation": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.ExternalLocation" + } + }, "resources.Job": { "type": "object", "additionalProperties": { @@ -8811,6 +9064,18 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermission" } }, + "resources.ExternalLocationGrant": { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.ExternalLocationGrant" + } + }, + "resources.ExternalLocationGrantPrivilege": { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.ExternalLocationGrantPrivilege" + } + }, "resources.Grant": { "type": "array", "items": { @@ -9062,6 +9327,12 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SourceConfig" } }, + "postgres.ProjectCustomTag": { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/postgres.ProjectCustomTag" + } + }, "serving.AiGatewayRateLimit": { "type": "array", "items": { diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 6d9b7300d2..22f8ba8c40 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -12,6 +12,7 @@ import ( credentials "github.com/databricks/cli/cmd/account/credentials" custom_app_integration "github.com/databricks/cli/cmd/account/custom-app-integration" encryption_keys "github.com/databricks/cli/cmd/account/encryption-keys" + endpoints "github.com/databricks/cli/cmd/account/endpoints" account_federation_policy "github.com/databricks/cli/cmd/account/federation-policy" account_groups_v2 "github.com/databricks/cli/cmd/account/groups-v2" account_iam_v2 "github.com/databricks/cli/cmd/account/iam-v2" @@ -57,6 +58,7 @@ func New() *cobra.Command { cmd.AddCommand(credentials.New()) cmd.AddCommand(custom_app_integration.New()) cmd.AddCommand(encryption_keys.New()) + cmd.AddCommand(endpoints.New()) cmd.AddCommand(account_federation_policy.New()) cmd.AddCommand(account_groups_v2.New()) cmd.AddCommand(account_iam_v2.New()) diff --git a/cmd/account/endpoints/endpoints.go b/cmd/account/endpoints/endpoints.go new file mode 100755 index 0000000000..c8e1d9803f --- /dev/null +++ b/cmd/account/endpoints/endpoints.go @@ -0,0 +1,306 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package endpoints + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/networking" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "endpoints", + Short: `These APIs manage endpoint configurations for this account.`, + Long: `These APIs manage endpoint configurations for this account.`, + GroupID: "provisioning", + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateEndpoint()) + cmd.AddCommand(newDeleteEndpoint()) + cmd.AddCommand(newGetEndpoint()) + cmd.AddCommand(newListEndpoints()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createEndpointOverrides []func( + *cobra.Command, + *networking.CreateEndpointRequest, +) + +func newCreateEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var createEndpointReq networking.CreateEndpointRequest + createEndpointReq.Endpoint = networking.Endpoint{} + var createEndpointJson flags.JsonFlag + + cmd.Flags().Var(&createEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: azure_private_endpoint_info + + cmd.Use = "create-endpoint PARENT DISPLAY_NAME REGION" + cmd.Short = `Create a network endpoint.` + cmd.Long = `Create a network endpoint. + + Creates a new network connectivity endpoint that enables private connectivity + between your network resources and Databricks services. + + After creation, the endpoint is initially in the PENDING state. The Databricks + endpoint service automatically reviews and approves the endpoint within a few + minutes. Use the GET method to retrieve the latest endpoint state. + + An endpoint can be used only after it reaches the APPROVED state. + + Arguments: + PARENT: + DISPLAY_NAME: The human-readable display name of this endpoint. The input should conform + to RFC-1034, which restricts to letters, numbers, and hyphens, with the + first character a letter, the last a letter or a number, and a 63 + character maximum. + REGION: The cloud provider region where this endpoint is located.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only PARENT as positional arguments. Provide 'display_name', 'region' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createEndpointJson.Unmarshal(&createEndpointReq.Endpoint) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnostics(ctx, diags) + if err != nil { + return err + } + } + } + createEndpointReq.Parent = args[0] + if !cmd.Flags().Changed("json") { + createEndpointReq.Endpoint.DisplayName = args[1] + } + if !cmd.Flags().Changed("json") { + createEndpointReq.Endpoint.Region = args[2] + } + + response, err := a.Endpoints.CreateEndpoint(ctx, createEndpointReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createEndpointOverrides { + fn(cmd, &createEndpointReq) + } + + return cmd +} + +// start delete-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteEndpointOverrides []func( + *cobra.Command, + *networking.DeleteEndpointRequest, +) + +func newDeleteEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var deleteEndpointReq networking.DeleteEndpointRequest + + cmd.Use = "delete-endpoint NAME" + cmd.Short = `Delete a network endpoint.` + cmd.Long = `Delete a network endpoint. + + Deletes a network endpoint. This will remove the endpoint configuration from + Databricks. Depending on the endpoint type and use case, you may also need to + delete corresponding network resources in your cloud provider account.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + deleteEndpointReq.Name = args[0] + + err = a.Endpoints.DeleteEndpoint(ctx, deleteEndpointReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteEndpointOverrides { + fn(cmd, &deleteEndpointReq) + } + + return cmd +} + +// start get-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getEndpointOverrides []func( + *cobra.Command, + *networking.GetEndpointRequest, +) + +func newGetEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var getEndpointReq networking.GetEndpointRequest + + cmd.Use = "get-endpoint NAME" + cmd.Short = `Get a network endpoint.` + cmd.Long = `Get a network endpoint. + + Gets details of a specific network endpoint.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + getEndpointReq.Name = args[0] + + response, err := a.Endpoints.GetEndpoint(ctx, getEndpointReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getEndpointOverrides { + fn(cmd, &getEndpointReq) + } + + return cmd +} + +// start list-endpoints command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listEndpointsOverrides []func( + *cobra.Command, + *networking.ListEndpointsRequest, +) + +func newListEndpoints() *cobra.Command { + cmd := &cobra.Command{} + + var listEndpointsReq networking.ListEndpointsRequest + + cmd.Flags().IntVar(&listEndpointsReq.PageSize, "page-size", listEndpointsReq.PageSize, ``) + cmd.Flags().StringVar(&listEndpointsReq.PageToken, "page-token", listEndpointsReq.PageToken, ``) + + cmd.Use = "list-endpoints PARENT" + cmd.Short = `List network endpoints.` + cmd.Long = `List network endpoints. + + Lists all network connectivity endpoints for the account.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + listEndpointsReq.Parent = args[0] + + response := a.Endpoints.ListEndpoints(ctx, listEndpointsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listEndpointsOverrides { + fn(cmd, &listEndpointsReq) + } + + return cmd +} + +// end service Endpoints diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 8c4445f002..0a738dfe17 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -73,7 +73,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.AwsRegion, "aws-region", createReq.AwsRegion, ``) - cmd.Flags().StringVar(&createReq.Cloud, "cloud", createReq.Cloud, `The cloud name.`) + cmd.Flags().StringVar(&createReq.Cloud, "cloud", createReq.Cloud, `DEPRECATED: This field is being ignored by the server and will be removed in the future.`) // TODO: complex arg: cloud_resource_container cmd.Flags().Var(&createReq.ComputeMode, "compute-mode", `If the compute mode is SERVERLESS, a serverless workspace is created that comes pre-configured with serverless compute and default storage, providing a fully-managed, enterprise-ready SaaS experience. Supported values: [HYBRID, SERVERLESS]`) cmd.Flags().StringVar(&createReq.CredentialsId, "credentials-id", createReq.CredentialsId, `ID of the workspace's credential configuration object.`) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 174c5b5156..45c8d79631 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -4,12 +4,15 @@ package apps import ( "fmt" + "strings" "time" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/common/types/fieldmask" + "github.com/databricks/databricks-sdk-go/experimental/api" "github.com/databricks/databricks-sdk-go/service/apps" "github.com/spf13/cobra" ) @@ -31,21 +34,27 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateSpace()) cmd.AddCommand(newCreateUpdate()) cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteSpace()) cmd.AddCommand(newDeploy()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetDeployment()) cmd.AddCommand(newGetPermissionLevels()) cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetSpace()) + cmd.AddCommand(newGetSpaceOperation()) cmd.AddCommand(newGetUpdate()) cmd.AddCommand(newList()) cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newListSpaces()) cmd.AddCommand(newSetPermissions()) cmd.AddCommand(newStart()) cmd.AddCommand(newStop()) cmd.AddCommand(newUpdate()) cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateSpace()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -90,6 +99,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: git_repository // TODO: complex arg: pending_deployment // TODO: array: resources + cmd.Flags().StringVar(&createReq.App.Space, "space", createReq.App.Space, `Name of the space this app belongs to.`) cmd.Flags().StringVar(&createReq.App.UsagePolicyId, "usage-policy-id", createReq.App.UsagePolicyId, ``) // TODO: array: user_api_scopes @@ -176,6 +186,127 @@ func newCreate() *cobra.Command { return cmd } +// start create-space command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSpaceOverrides []func( + *cobra.Command, + *apps.CreateSpaceRequest, +) + +func newCreateSpace() *cobra.Command { + cmd := &cobra.Command{} + + var createSpaceReq apps.CreateSpaceRequest + createSpaceReq.Space = apps.Space{} + var createSpaceJson flags.JsonFlag + + var createSpaceSkipWait bool + var createSpaceTimeout time.Duration + + cmd.Flags().BoolVar(&createSpaceSkipWait, "no-wait", createSpaceSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&createSpaceTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&createSpaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createSpaceReq.Space.Description, "description", createSpaceReq.Space.Description, `The description of the app space.`) + // TODO: array: effective_user_api_scopes + cmd.Flags().StringVar(&createSpaceReq.Space.Name, "name", createSpaceReq.Space.Name, `The name of the app space.`) + // TODO: array: resources + // TODO: complex arg: status + cmd.Flags().StringVar(&createSpaceReq.Space.UsagePolicyId, "usage-policy-id", createSpaceReq.Space.UsagePolicyId, `The usage policy ID for managing cost at the space level.`) + // TODO: array: user_api_scopes + + cmd.Use = "create-space" + cmd.Short = `Create an app space.` + cmd.Long = `Create an app space. + + Creates a new app space. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-space-operation command.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createSpaceJson.Unmarshal(&createSpaceReq.Space) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnostics(ctx, diags) + if err != nil { + return err + } + } + } + + // Determine which mode to execute based on flags. + switch { + case createSpaceSkipWait: + wait, err := w.Apps.CreateSpace(ctx, createSpaceReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Apps.GetSpaceOperation(ctx, apps.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Apps.CreateSpace(ctx, createSpaceReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for create-space to complete..." + + // Wait for completion. + opts := api.WithTimeout(createSpaceTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSpaceOverrides { + fn(cmd, &createSpaceReq) + } + + return cmd +} + // start create-update command // Slice with functions to override default command behavior. @@ -353,6 +484,108 @@ func newDelete() *cobra.Command { return cmd } +// start delete-space command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSpaceOverrides []func( + *cobra.Command, + *apps.DeleteSpaceRequest, +) + +func newDeleteSpace() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSpaceReq apps.DeleteSpaceRequest + + var deleteSpaceSkipWait bool + var deleteSpaceTimeout time.Duration + + cmd.Flags().BoolVar(&deleteSpaceSkipWait, "no-wait", deleteSpaceSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&deleteSpaceTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Use = "delete-space NAME" + cmd.Short = `Delete an app space.` + cmd.Long = `Delete an app space. + + Deletes an app space. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-space-operation command. + + Arguments: + NAME: The name of the app space.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteSpaceReq.Name = args[0] + + // Determine which mode to execute based on flags. + switch { + case deleteSpaceSkipWait: + wait, err := w.Apps.DeleteSpace(ctx, deleteSpaceReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Apps.GetSpaceOperation(ctx, apps.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Apps.DeleteSpace(ctx, deleteSpaceReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for delete-space to complete..." + + // Wait for completion. + opts := api.WithTimeout(deleteSpaceTimeout) + + err = wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return nil + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSpaceOverrides { + fn(cmd, &deleteSpaceReq) + } + + return cmd +} + // start deploy command // Slice with functions to override default command behavior. @@ -687,6 +920,124 @@ func newGetPermissions() *cobra.Command { return cmd } +// start get-space command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSpaceOverrides []func( + *cobra.Command, + *apps.GetSpaceRequest, +) + +func newGetSpace() *cobra.Command { + cmd := &cobra.Command{} + + var getSpaceReq apps.GetSpaceRequest + + cmd.Use = "get-space NAME" + cmd.Short = `Get an app space.` + cmd.Long = `Get an app space. + + Retrieves information for the app space with the supplied name. + + Arguments: + NAME: The name of the app space.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getSpaceReq.Name = args[0] + + response, err := w.Apps.GetSpace(ctx, getSpaceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSpaceOverrides { + fn(cmd, &getSpaceReq) + } + + return cmd +} + +// start get-space-operation command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSpaceOperationOverrides []func( + *cobra.Command, + *apps.GetOperationRequest, +) + +func newGetSpaceOperation() *cobra.Command { + cmd := &cobra.Command{} + + var getSpaceOperationReq apps.GetOperationRequest + + cmd.Use = "get-space-operation NAME" + cmd.Short = `Get the status of an app space operation.` + cmd.Long = `Get the status of an app space operation. + + Gets the status of an app space update operation. + + Arguments: + NAME: The name of the operation resource.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getSpaceOperationReq.Name = args[0] + + response, err := w.Apps.GetSpaceOperation(ctx, getSpaceOperationReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSpaceOperationOverrides { + fn(cmd, &getSpaceOperationReq) + } + + return cmd +} + // start get-update command // Slice with functions to override default command behavior. @@ -759,6 +1110,7 @@ func newList() *cobra.Command { cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Upper bound for items returned.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to the next page of apps.`) + cmd.Flags().StringVar(&listReq.Space, "space", listReq.Space, `Filter apps by app space name.`) cmd.Use = "list" cmd.Short = `List apps.` @@ -850,6 +1202,60 @@ func newListDeployments() *cobra.Command { return cmd } +// start list-spaces command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSpacesOverrides []func( + *cobra.Command, + *apps.ListSpacesRequest, +) + +func newListSpaces() *cobra.Command { + cmd := &cobra.Command{} + + var listSpacesReq apps.ListSpacesRequest + + cmd.Flags().IntVar(&listSpacesReq.PageSize, "page-size", listSpacesReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listSpacesReq.PageToken, "page-token", listSpacesReq.PageToken, `Pagination token to go to the next page of app spaces.`) + + cmd.Use = "list-spaces" + cmd.Short = `List app spaces.` + cmd.Long = `List app spaces. + + Lists all app spaces in the workspace.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.Apps.ListSpaces(ctx, listSpacesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSpacesOverrides { + fn(cmd, &listSpacesReq) + } + + return cmd +} + // start set-permissions command // Slice with functions to override default command behavior. @@ -1115,6 +1521,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: git_repository // TODO: complex arg: pending_deployment // TODO: array: resources + cmd.Flags().StringVar(&updateReq.App.Space, "space", updateReq.App.Space, `Name of the space this app belongs to.`) cmd.Flags().StringVar(&updateReq.App.UsagePolicyId, "usage-policy-id", updateReq.App.UsagePolicyId, ``) // TODO: array: user_api_scopes @@ -1247,4 +1654,147 @@ func newUpdatePermissions() *cobra.Command { return cmd } +// start update-space command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateSpaceOverrides []func( + *cobra.Command, + *apps.UpdateSpaceRequest, +) + +func newUpdateSpace() *cobra.Command { + cmd := &cobra.Command{} + + var updateSpaceReq apps.UpdateSpaceRequest + updateSpaceReq.Space = apps.Space{} + var updateSpaceJson flags.JsonFlag + + var updateSpaceSkipWait bool + var updateSpaceTimeout time.Duration + + cmd.Flags().BoolVar(&updateSpaceSkipWait, "no-wait", updateSpaceSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&updateSpaceTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&updateSpaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateSpaceReq.Space.Description, "description", updateSpaceReq.Space.Description, `The description of the app space.`) + // TODO: array: effective_user_api_scopes + cmd.Flags().StringVar(&updateSpaceReq.Space.Name, "name", updateSpaceReq.Space.Name, `The name of the app space.`) + // TODO: array: resources + // TODO: complex arg: status + cmd.Flags().StringVar(&updateSpaceReq.Space.UsagePolicyId, "usage-policy-id", updateSpaceReq.Space.UsagePolicyId, `The usage policy ID for managing cost at the space level.`) + // TODO: array: user_api_scopes + + cmd.Use = "update-space NAME UPDATE_MASK" + cmd.Short = `Update an app space.` + cmd.Long = `Update an app space. + + Updates an app space. The update process is asynchronous and the status of the + update can be checked with the GetSpaceOperation method. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-space-operation command. + + Arguments: + NAME: The name of the app space. The name must contain only lowercase + alphanumeric characters and hyphens. It must be unique within the + workspace. + UPDATE_MASK: The field mask must be a single string, with multiple fields separated by + commas (no spaces). The field path is relative to the resource object, + using a dot (.) to navigate sub-fields (e.g., author.given_name). + Specification of elements in sequence or map fields is not allowed, as + only the entire collection field can be specified. Field names must + exactly match the resource field names. + + A field mask of * indicates full replacement. It’s recommended to + always explicitly list the fields being updated and avoid using * + wildcards, as it can lead to unintended results if the API changes in the + future.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateSpaceJson.Unmarshal(&updateSpaceReq.Space) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnostics(ctx, diags) + if err != nil { + return err + } + } + } + updateSpaceReq.Name = args[0] + if args[1] != "" { + updateMaskArray := strings.Split(args[1], ",") + updateSpaceReq.UpdateMask = *fieldmask.New(updateMaskArray) + } + + // Determine which mode to execute based on flags. + switch { + case updateSpaceSkipWait: + wait, err := w.Apps.UpdateSpace(ctx, updateSpaceReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Apps.GetSpaceOperation(ctx, apps.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Apps.UpdateSpace(ctx, updateSpaceReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for update-space to complete..." + + // Wait for completion. + opts := api.WithTimeout(updateSpaceTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateSpaceOverrides { + fn(cmd, &updateSpaceReq) + } + + return cmd +} + // end service Apps diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 13b55887fd..4dd071a903 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -238,6 +238,8 @@ func newCreateSubscription() *cobra.Command { cmd.Flags().Var(&createSubscriptionJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().BoolVar(&createSubscriptionReq.Subscription.SkipNotify, "skip-notify", createSubscriptionReq.Subscription.SkipNotify, `Controls whether notifications are sent to the subscriber for scheduled dashboard refreshes.`) + cmd.Use = "create-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIBER" cmd.Short = `Create schedule subscription.` cmd.Long = `Create schedule subscription. diff --git a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go index 5031c4b64b..8c5e3547bd 100755 --- a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go +++ b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go @@ -20,10 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "quality-monitor-v2", - Short: `[DEPRECATED] This API is deprecated.`, - Long: `[DEPRECATED] This API is deprecated. Please use the Data Quality Monitoring - API instead (REST: /api/data-quality/v1/monitors). Manage data quality of UC - objects (currently support schema).`, + Short: `Deprecated: Please use the Data Quality Monitoring API instead (REST: /api/data-quality/v1/monitors).`, + Long: `Deprecated: Please use the Data Quality Monitoring API instead (REST: + /api/data-quality/v1/monitors). Manage data quality of UC objects (currently + support schema).`, GroupID: "qualitymonitor", RunE: root.ReportUnknownSubcommand, } @@ -68,8 +68,8 @@ func newCreateQualityMonitor() *cobra.Command { cmd.Short = `Create a quality monitor.` cmd.Long = `Create a quality monitor. - [DEPRECATED] Create a quality monitor on UC object. Use Data Quality - Monitoring API instead. + Deprecated: Use Data Quality Monitoring API instead + (/api/data-quality/v1/monitors). Create a quality monitor on UC object. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -150,8 +150,8 @@ func newDeleteQualityMonitor() *cobra.Command { cmd.Short = `Delete a quality monitor.` cmd.Long = `Delete a quality monitor. - [DEPRECATED] Delete a quality monitor on UC object. Use Data Quality - Monitoring API instead. + Deprecated: Use Data Quality Monitoring API instead + (/api/data-quality/v1/monitors). Delete a quality monitor on UC object. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -209,8 +209,8 @@ func newGetQualityMonitor() *cobra.Command { cmd.Short = `Read a quality monitor.` cmd.Long = `Read a quality monitor. - [DEPRECATED] Read a quality monitor on UC object. Use Data Quality Monitoring - API instead. + Deprecated: Use Data Quality Monitoring API instead + (/api/data-quality/v1/monitors). Read a quality monitor on UC object. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. @@ -271,8 +271,8 @@ func newListQualityMonitor() *cobra.Command { cmd.Short = `List quality monitors.` cmd.Long = `List quality monitors. - [DEPRECATED] (Unimplemented) List quality monitors. Use Data Quality - Monitoring API instead.` + Deprecated: Use Data Quality Monitoring API instead + (/api/data-quality/v1/monitors). (Unimplemented) List quality monitors.` cmd.Annotations = make(map[string]string) @@ -327,8 +327,9 @@ func newUpdateQualityMonitor() *cobra.Command { cmd.Short = `Update a quality monitor.` cmd.Long = `Update a quality monitor. - [DEPRECATED] (Unimplemented) Update a quality monitor on UC object. Use Data - Quality Monitoring API instead. + Deprecated: Use Data Quality Monitoring API instead + (/api/data-quality/v1/monitors). (Unimplemented) Update a quality monitor on + UC object. Arguments: OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. diff --git a/cmd/workspace/quality-monitors/quality-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go index 7d6fffe54a..05f5f5f3c4 100755 --- a/cmd/workspace/quality-monitors/quality-monitors.go +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -20,10 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "quality-monitors", - Short: `[DEPRECATED] This API is deprecated.`, - Long: `[DEPRECATED] This API is deprecated. Please use the Data Quality Monitors API - instead (REST: /api/data-quality/v1/monitors), which manages both Data - Profiling and Anomaly Detection. + Short: `Deprecated: Please use the Data Quality Monitors API instead (REST: /api/data-quality/v1/monitors), which manages both Data Profiling and Anomaly Detection.`, + Long: `Deprecated: Please use the Data Quality Monitors API instead (REST: + /api/data-quality/v1/monitors), which manages both Data Profiling and Anomaly + Detection. A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics tables and a dashboard that you can use to monitor @@ -73,8 +73,8 @@ func newCancelRefresh() *cobra.Command { cmd.Short = `Cancel refresh.` cmd.Long = `Cancel refresh. - [DEPRECATED] Cancels an already-initiated refresh job. Use Data Quality - Monitors API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Cancels an already-initiated refresh job. Arguments: TABLE_NAME: UC table name in format catalog.schema.table_name. table_name is case @@ -155,8 +155,9 @@ func newCreate() *cobra.Command { cmd.Short = `Create a table monitor.` cmd.Long = `Create a table monitor. - [DEPRECATED] Creates a new monitor for the specified table. Use Data Quality - Monitors API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Creates a new monitor for the specified + table. The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the @@ -253,8 +254,8 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a table monitor.` cmd.Long = `Delete a table monitor. - [DEPRECATED] Deletes a monitor for the specified table. Use Data Quality - Monitors API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Deletes a monitor for the specified table. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -323,8 +324,8 @@ func newGet() *cobra.Command { cmd.Short = `Get a table monitor.` cmd.Long = `Get a table monitor. - [DEPRECATED] Gets a monitor for the specified table. Use Data Quality Monitors - API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Gets a monitor for the specified table. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -392,9 +393,9 @@ func newGetRefresh() *cobra.Command { cmd.Short = `Get refresh.` cmd.Long = `Get refresh. - [DEPRECATED] Gets info about a specific monitor refresh using the given - refresh ID. Use Data Quality Monitors API instead - (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Gets info about a specific monitor refresh + using the given refresh ID. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -464,9 +465,9 @@ func newListRefreshes() *cobra.Command { cmd.Short = `List refreshes.` cmd.Long = `List refreshes. - [DEPRECATED] Gets an array containing the history of the most recent refreshes - (up to 25) for this table. Use Data Quality Monitors API instead - (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Gets an array containing the history of the + most recent refreshes (up to 25) for this table. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -537,8 +538,9 @@ func newRegenerateDashboard() *cobra.Command { cmd.Short = `Regenerate a monitoring dashboard.` cmd.Long = `Regenerate a monitoring dashboard. - [DEPRECATED] Regenerates the monitoring dashboard for the specified table. Use - Data Quality Monitors API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Regenerates the monitoring dashboard for the + specified table. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -620,9 +622,9 @@ func newRunRefresh() *cobra.Command { cmd.Short = `Run refresh.` cmd.Long = `Run refresh. - [DEPRECATED] Queues a metric refresh on the monitor for the specified table. - Use Data Quality Monitors API instead (/api/data-quality/v1/monitors). The - refresh will execute in the background. + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Queues a metric refresh on the monitor for + the specified table. The refresh will execute in the background. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's @@ -703,8 +705,8 @@ func newUpdate() *cobra.Command { cmd.Short = `Update a table monitor.` cmd.Long = `Update a table monitor. - [DEPRECATED] Updates a monitor for the specified table. Use Data Quality - Monitors API instead (/api/data-quality/v1/monitors). + Deprecated: Use Data Quality Monitors API instead + (/api/data-quality/v1/monitors). Updates a monitor for the specified table. The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's diff --git a/python/databricks/bundles/jobs/__init__.py b/python/databricks/bundles/jobs/__init__.py index bbe0fe85fa..3e98d2acd6 100644 --- a/python/databricks/bundles/jobs/__init__.py +++ b/python/databricks/bundles/jobs/__init__.py @@ -221,6 +221,9 @@ "SparkSubmitTask", "SparkSubmitTaskDict", "SparkSubmitTaskParam", + "SparseCheckout", + "SparseCheckoutDict", + "SparseCheckoutParam", "SqlTask", "SqlTaskAlert", "SqlTaskAlertDict", @@ -635,6 +638,11 @@ SparkSubmitTaskDict, SparkSubmitTaskParam, ) +from databricks.bundles.jobs._models.sparse_checkout import ( + SparseCheckout, + SparseCheckoutDict, + SparseCheckoutParam, +) from databricks.bundles.jobs._models.sql_task import SqlTask, SqlTaskDict, SqlTaskParam from databricks.bundles.jobs._models.sql_task_alert import ( SqlTaskAlert, diff --git a/python/databricks/bundles/jobs/_models/dashboard_task.py b/python/databricks/bundles/jobs/_models/dashboard_task.py index 98e171359c..4f9cd829a1 100644 --- a/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrDict, VariableOrOptional -from databricks.bundles.jobs._models.subscription import ( - Subscription, - SubscriptionParam, -) +from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam if TYPE_CHECKING: from typing_extensions import Self diff --git a/python/databricks/bundles/jobs/_models/git_source.py b/python/databricks/bundles/jobs/_models/git_source.py index 76fa000f66..539a192088 100644 --- a/python/databricks/bundles/jobs/_models/git_source.py +++ b/python/databricks/bundles/jobs/_models/git_source.py @@ -5,6 +5,10 @@ from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOr, VariableOrOptional from databricks.bundles.jobs._models.git_provider import GitProvider, GitProviderParam +from databricks.bundles.jobs._models.sparse_checkout import ( + SparseCheckout, + SparseCheckoutParam, +) if TYPE_CHECKING: from typing_extensions import Self @@ -45,6 +49,11 @@ class GitSource: Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ + sparse_checkout: VariableOrOptional[SparseCheckout] = None + """ + :meta private: [EXPERIMENTAL] + """ + @classmethod def from_dict(cls, value: "GitSourceDict") -> "Self": return _transform(cls, value) @@ -81,5 +90,10 @@ class GitSourceDict(TypedDict, total=False): Name of the tag to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_commit. """ + sparse_checkout: VariableOrOptional[SparseCheckoutParam] + """ + :meta private: [EXPERIMENTAL] + """ + GitSourceParam = GitSourceDict | GitSource diff --git a/python/databricks/bundles/jobs/_models/sparse_checkout.py b/python/databricks/bundles/jobs/_models/sparse_checkout.py new file mode 100644 index 0000000000..af68734ad0 --- /dev/null +++ b/python/databricks/bundles/jobs/_models/sparse_checkout.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrList + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class SparseCheckout: + """ + :meta private: [EXPERIMENTAL] + """ + + patterns: VariableOrList[str] = field(default_factory=list) + """ + List of patterns to include for sparse checkout. + """ + + @classmethod + def from_dict(cls, value: "SparseCheckoutDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "SparseCheckoutDict": + return _transform_to_json_value(self) # type:ignore + + +class SparseCheckoutDict(TypedDict, total=False): + """""" + + patterns: VariableOrList[str] + """ + List of patterns to include for sparse checkout. + """ + + +SparseCheckoutParam = SparseCheckoutDict | SparseCheckout