diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e5a903791..07b6ad108 100755 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d09dbd77f5a9560cbb816746773da43a8bdbde08 \ No newline at end of file +be0fc4f9ef0216589efada62b68de67c0c9913a1 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 2a7d04a47..7129180a4 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1801,6 +1801,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/BackfillSource.j databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/BatchCreateMaterializedFeaturesRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/BatchCreateMaterializedFeaturesResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ColumnIdentifier.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ColumnSelection.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/CommentActivityAction.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/CommentObject.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ContinuousWindow.java linguist-generated=true @@ -2249,8 +2250,14 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/BranchOper databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/BranchSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/BranchStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/BranchStatusState.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Catalog.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogSpec.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogOperationMetadata.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateBranchOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateBranchRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogOperation.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateDatabaseOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateDatabaseRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateEndpointOperation.java linguist-generated=true @@ -2259,6 +2266,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateProj databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateProjectRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateRoleOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateRoleRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableOperation.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Database.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DatabaseCredential.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DatabaseDatabaseSpec.java linguist-generated=true @@ -2267,6 +2276,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DatabaseOp databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DatabricksServiceExceptionWithDetailsProto.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteBranchOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteBranchRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogOperation.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteDatabaseOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteDatabaseRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteEndpointOperation.java linguist-generated=true @@ -2275,6 +2286,9 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteProj databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteProjectRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteRoleOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteRoleRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableOperation.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeltaTableSyncInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Endpoint.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/EndpointGroupSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/EndpointGroupStatus.java linguist-generated=true @@ -2288,11 +2302,13 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/EndpointTy databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ErrorCode.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GenerateDatabaseCredentialRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetBranchRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetCatalogRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetDatabaseRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetEndpointRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetOperationRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetProjectRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetRoleRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetSyncedTableRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/InitialEndpointSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListBranchesRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListBranchesResponse.java linguist-generated=true @@ -2304,6 +2320,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListProjec databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListProjectsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListRolesRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ListRolesResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/NewPipelineSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Operation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresImpl.java linguist-generated=true @@ -2314,6 +2331,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectDef databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectOperationMetadata.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningInfoState.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningPhase.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RequestedClaims.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RequestedClaimsPermissionSet.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RequestedResource.java linguist-generated=true @@ -2325,6 +2344,14 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RoleMember databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RoleOperationMetadata.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RoleRoleSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/RoleRoleStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTable.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableOperationMetadata.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePipelineProgress.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePosition.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableState.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpec.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/UpdateBranchOperation.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/UpdateBranchRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/UpdateDatabaseOperation.java linguist-generated=true diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml index 3fed55cba..df2f6253e 100755 --- a/.github/workflows/tagging.yml +++ b/.github/workflows/tagging.yml @@ -35,7 +35,7 @@ jobs: steps: - name: Generate GitHub App Token id: generate-token - uses: actions/create-github-app-token@v2 + uses: actions/create-github-app-token@v3 with: app-id: ${{ secrets.DECO_SDK_TAGGING_APP_ID }} private-key: ${{ secrets.DECO_SDK_TAGGING_PRIVATE_KEY }} diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md old mode 100644 new mode 100755 index cdf6d63e5..199d70ebb --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,11 @@ ### Internal Changes ### API Changes +* Add `createCatalog()`, `createSyncedTable()`, `deleteCatalog()`, `deleteSyncedTable()`, `getCatalog()` and `getSyncedTable()` methods for `workspaceClient.postgres()` service. +* Add `effectiveFileEventQueue` field for `com.databricks.sdk.service.catalog.CreateExternalLocation`. +* Add `effectiveFileEventQueue` field for `com.databricks.sdk.service.catalog.ExternalLocationInfo`. +* Add `effectiveFileEventQueue` field for `com.databricks.sdk.service.catalog.UpdateExternalLocation`. +* Add `columnSelection` field for `com.databricks.sdk.service.ml.Function`. +* Add `cascade` field for `com.databricks.sdk.service.pipelines.DeletePipelineRequest`. +* Add `defaultBranch` field for `com.databricks.sdk.service.postgres.ProjectSpec`. +* Add `defaultBranch` field for `com.databricks.sdk.service.postgres.ProjectStatus`. \ No newline at end of file diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateExternalLocation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateExternalLocation.java index 7ff3d3ff9..f37810b3b 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateExternalLocation.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateExternalLocation.java @@ -21,6 +21,15 @@ public class CreateExternalLocation { @JsonProperty("effective_enable_file_events") private Boolean effectiveEnableFileEvents; + /** + * The effective file event queue configuration after applying server-side defaults. Always + * populated when a queue is provisioned, regardless of whether the user explicitly set + * `enable_file_events`. Use this field instead of `file_event_queue` for reading the actual queue + * state. + */ + @JsonProperty("effective_file_event_queue") + private FileEventQueue effectiveFileEventQueue; + /** * Whether to enable file events on this external location. Default to `true`. Set to `false` to * disable file events. The actual applied value may differ due to server-side defaults; check @@ -91,6 +100,15 @@ public Boolean getEffectiveEnableFileEvents() { return effectiveEnableFileEvents; } + public CreateExternalLocation setEffectiveFileEventQueue(FileEventQueue effectiveFileEventQueue) { + this.effectiveFileEventQueue = effectiveFileEventQueue; + return this; + } + + public FileEventQueue getEffectiveFileEventQueue() { + return effectiveFileEventQueue; + } + public CreateExternalLocation setEnableFileEvents(Boolean enableFileEvents) { this.enableFileEvents = enableFileEvents; return this; @@ -171,6 +189,7 @@ public boolean equals(Object o) { return Objects.equals(comment, that.comment) && Objects.equals(credentialName, that.credentialName) && Objects.equals(effectiveEnableFileEvents, that.effectiveEnableFileEvents) + && Objects.equals(effectiveFileEventQueue, that.effectiveFileEventQueue) && Objects.equals(enableFileEvents, that.enableFileEvents) && Objects.equals(encryptionDetails, that.encryptionDetails) && Objects.equals(fallback, that.fallback) @@ -187,6 +206,7 @@ public int hashCode() { comment, credentialName, effectiveEnableFileEvents, + effectiveFileEventQueue, enableFileEvents, encryptionDetails, fallback, @@ -203,6 +223,7 @@ public String toString() { .add("comment", comment) .add("credentialName", credentialName) .add("effectiveEnableFileEvents", effectiveEnableFileEvents) + .add("effectiveFileEventQueue", effectiveFileEventQueue) .add("enableFileEvents", enableFileEvents) .add("encryptionDetails", encryptionDetails) .add("fallback", fallback) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationInfo.java index 8e21bf477..a370e06da 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationInfo.java @@ -40,6 +40,15 @@ public class ExternalLocationInfo { @JsonProperty("effective_enable_file_events") private Boolean effectiveEnableFileEvents; + /** + * The effective file event queue configuration after applying server-side defaults. Always + * populated when a queue is provisioned, regardless of whether the user explicitly set + * `enable_file_events`. Use this field instead of `file_event_queue` for reading the actual queue + * state. + */ + @JsonProperty("effective_file_event_queue") + private FileEventQueue effectiveFileEventQueue; + /** * Whether to enable file events on this external location. Default to `true`. Set to `false` to * disable file events. The actual applied value may differ due to server-side defaults; check @@ -162,6 +171,15 @@ public Boolean getEffectiveEnableFileEvents() { return effectiveEnableFileEvents; } + public ExternalLocationInfo setEffectiveFileEventQueue(FileEventQueue effectiveFileEventQueue) { + this.effectiveFileEventQueue = effectiveFileEventQueue; + return this; + } + + public FileEventQueue getEffectiveFileEventQueue() { + return effectiveFileEventQueue; + } + public ExternalLocationInfo setEnableFileEvents(Boolean enableFileEvents) { this.enableFileEvents = enableFileEvents; return this; @@ -282,6 +300,7 @@ public boolean equals(Object o) { && Objects.equals(credentialId, that.credentialId) && Objects.equals(credentialName, that.credentialName) && Objects.equals(effectiveEnableFileEvents, that.effectiveEnableFileEvents) + && Objects.equals(effectiveFileEventQueue, that.effectiveFileEventQueue) && Objects.equals(enableFileEvents, that.enableFileEvents) && Objects.equals(encryptionDetails, that.encryptionDetails) && Objects.equals(fallback, that.fallback) @@ -306,6 +325,7 @@ public int hashCode() { credentialId, credentialName, effectiveEnableFileEvents, + effectiveFileEventQueue, enableFileEvents, encryptionDetails, fallback, @@ -330,6 +350,7 @@ public String toString() { .add("credentialId", credentialId) .add("credentialName", credentialName) .add("effectiveEnableFileEvents", effectiveEnableFileEvents) + .add("effectiveFileEventQueue", effectiveFileEventQueue) .add("enableFileEvents", enableFileEvents) .add("encryptionDetails", encryptionDetails) .add("fallback", fallback) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateExternalLocation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateExternalLocation.java index b3893fd7d..30a957764 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateExternalLocation.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateExternalLocation.java @@ -22,6 +22,15 @@ public class UpdateExternalLocation { @JsonProperty("effective_enable_file_events") private Boolean effectiveEnableFileEvents; + /** + * The effective file event queue configuration after applying server-side defaults. Always + * populated when a queue is provisioned, regardless of whether the user explicitly set + * `enable_file_events`. Use this field instead of `file_event_queue` for reading the actual queue + * state. + */ + @JsonProperty("effective_file_event_queue") + private FileEventQueue effectiveFileEventQueue; + /** * Whether to enable file events on this external location. Default to `true`. Set to `false` to * disable file events. The actual applied value may differ due to server-side defaults; check @@ -107,6 +116,15 @@ public Boolean getEffectiveEnableFileEvents() { return effectiveEnableFileEvents; } + public UpdateExternalLocation setEffectiveFileEventQueue(FileEventQueue effectiveFileEventQueue) { + this.effectiveFileEventQueue = effectiveFileEventQueue; + return this; + } + + public FileEventQueue getEffectiveFileEventQueue() { + return effectiveFileEventQueue; + } + public UpdateExternalLocation setEnableFileEvents(Boolean enableFileEvents) { this.enableFileEvents = enableFileEvents; return this; @@ -223,6 +241,7 @@ public boolean equals(Object o) { return Objects.equals(comment, that.comment) && Objects.equals(credentialName, that.credentialName) && Objects.equals(effectiveEnableFileEvents, that.effectiveEnableFileEvents) + && Objects.equals(effectiveFileEventQueue, that.effectiveFileEventQueue) && Objects.equals(enableFileEvents, that.enableFileEvents) && Objects.equals(encryptionDetails, that.encryptionDetails) && Objects.equals(fallback, that.fallback) @@ -243,6 +262,7 @@ public int hashCode() { comment, credentialName, effectiveEnableFileEvents, + effectiveFileEventQueue, enableFileEvents, encryptionDetails, fallback, @@ -263,6 +283,7 @@ public String toString() { .add("comment", comment) .add("credentialName", credentialName) .add("effectiveEnableFileEvents", effectiveEnableFileEvents) + .add("effectiveFileEventQueue", effectiveFileEventQueue) .add("enableFileEvents", enableFileEvents) .add("encryptionDetails", encryptionDetails) .add("fallback", fallback) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ColumnSelection.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ColumnSelection.java new file mode 100755 index 000000000..5d132a1d2 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ColumnSelection.java @@ -0,0 +1,46 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.ml; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** + * A ColumnSelection function, equivalent to the LAST() record of an entity over a lifetime + * ContinuousWindow + */ +@Generated +public class ColumnSelection { + /** Column name from source to select as the feature value. */ + @JsonProperty("column") + private String column; + + public ColumnSelection setColumn(String column) { + this.column = column; + return this; + } + + public String getColumn() { + return column; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ColumnSelection that = (ColumnSelection) o; + return Objects.equals(column, that.column); + } + + @Override + public int hashCode() { + return Objects.hash(column); + } + + @Override + public String toString() { + return new ToStringer(ColumnSelection.class).add("column", column).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/Function.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/Function.java index 46a667417..c896f7bea 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/Function.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/Function.java @@ -14,6 +14,10 @@ public class Function { @JsonProperty("aggregation_function") private AggregationFunction aggregationFunction; + /** Selects the latest value of a single column in a data source */ + @JsonProperty("column_selection") + private ColumnSelection columnSelection; + /** * Deprecated: Use the function oneof with AggregationFunction instead. Kept for backwards * compatibility. Extra parameters for parameterized functions. @@ -37,6 +41,15 @@ public AggregationFunction getAggregationFunction() { return aggregationFunction; } + public Function setColumnSelection(ColumnSelection columnSelection) { + this.columnSelection = columnSelection; + return this; + } + + public ColumnSelection getColumnSelection() { + return columnSelection; + } + public Function setExtraParameters(Collection extraParameters) { this.extraParameters = extraParameters; return this; @@ -61,19 +74,21 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Function that = (Function) o; return Objects.equals(aggregationFunction, that.aggregationFunction) + && Objects.equals(columnSelection, that.columnSelection) && Objects.equals(extraParameters, that.extraParameters) && Objects.equals(functionType, that.functionType); } @Override public int hashCode() { - return Objects.hash(aggregationFunction, extraParameters, functionType); + return Objects.hash(aggregationFunction, columnSelection, extraParameters, functionType); } @Override public String toString() { return new ToStringer(Function.class) .add("aggregationFunction", aggregationFunction) + .add("columnSelection", columnSelection) .add("extraParameters", extraParameters) .add("functionType", functionType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/DeletePipelineRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/DeletePipelineRequest.java index 41e5d3417..59948761f 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/DeletePipelineRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/DeletePipelineRequest.java @@ -10,6 +10,14 @@ @Generated public class DeletePipelineRequest { + /** + * If false, pipeline deletion will not cascade to its datasets (MVs, STs, Views). By default, + * this parameter will be true and all tables will be deleted with the pipeline. + */ + @JsonIgnore + @QueryParam("cascade") + private Boolean cascade; + /** * If true, deletion will proceed even if resource cleanup fails. By default, deletion will fail * if resources cleanup is required but fails. @@ -21,6 +29,15 @@ public class DeletePipelineRequest { /** */ @JsonIgnore private String pipelineId; + public DeletePipelineRequest setCascade(Boolean cascade) { + this.cascade = cascade; + return this; + } + + public Boolean getCascade() { + return cascade; + } + public DeletePipelineRequest setForce(Boolean force) { this.force = force; return this; @@ -44,17 +61,20 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DeletePipelineRequest that = (DeletePipelineRequest) o; - return Objects.equals(force, that.force) && Objects.equals(pipelineId, that.pipelineId); + return Objects.equals(cascade, that.cascade) + && Objects.equals(force, that.force) + && Objects.equals(pipelineId, that.pipelineId); } @Override public int hashCode() { - return Objects.hash(force, pipelineId); + return Objects.hash(cascade, force, pipelineId); } @Override public String toString() { return new ToStringer(DeletePipelineRequest.class) + .add("cascade", cascade) .add("force", force) .add("pipelineId", pipelineId) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Catalog.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Catalog.java new file mode 100755 index 000000000..3b9114c01 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/Catalog.java @@ -0,0 +1,124 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.protobuf.Timestamp; +import java.util.Objects; + +@Generated +public class Catalog { + /** A timestamp indicating when the catalog was created. */ + @JsonProperty("create_time") + private Timestamp createTime; + + /** + * Output only. The full resource path of the catalog. + * + *

Format: "catalogs/{catalog_id}". + */ + @JsonProperty("name") + private String name; + + /** The desired state of the Catalog. */ + @JsonProperty("spec") + private CatalogCatalogSpec spec; + + /** The observed state of the Catalog. */ + @JsonProperty("status") + private CatalogCatalogStatus status; + + /** System-generated unique identifier for the catalog. */ + @JsonProperty("uid") + private String uid; + + /** A timestamp indicating when the catalog was last updated. */ + @JsonProperty("update_time") + private Timestamp updateTime; + + public Catalog setCreateTime(Timestamp createTime) { + this.createTime = createTime; + return this; + } + + public Timestamp getCreateTime() { + return createTime; + } + + public Catalog setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + public Catalog setSpec(CatalogCatalogSpec spec) { + this.spec = spec; + return this; + } + + public CatalogCatalogSpec getSpec() { + return spec; + } + + public Catalog setStatus(CatalogCatalogStatus status) { + this.status = status; + return this; + } + + public CatalogCatalogStatus getStatus() { + return status; + } + + public Catalog setUid(String uid) { + this.uid = uid; + return this; + } + + public String getUid() { + return uid; + } + + public Catalog setUpdateTime(Timestamp updateTime) { + this.updateTime = updateTime; + return this; + } + + public Timestamp getUpdateTime() { + return updateTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Catalog that = (Catalog) o; + return Objects.equals(createTime, that.createTime) + && Objects.equals(name, that.name) + && Objects.equals(spec, that.spec) + && Objects.equals(status, that.status) + && Objects.equals(uid, that.uid) + && Objects.equals(updateTime, that.updateTime); + } + + @Override + public int hashCode() { + return Objects.hash(createTime, name, spec, status, uid, updateTime); + } + + @Override + public String toString() { + return new ToStringer(Catalog.class) + .add("createTime", createTime) + .add("name", name) + .add("spec", spec) + .add("status", status) + .add("uid", uid) + .add("updateTime", updateTime) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogSpec.java new file mode 100755 index 000000000..2a5d34e3e --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogSpec.java @@ -0,0 +1,103 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The desired state of the Catalog. */ +@Generated +public class CatalogCatalogSpec { + /** + * The resource path of the branch associated with the catalog. + * + *

Format: projects/{project_id}/branches/{branch_id}. + */ + @JsonProperty("branch") + private String branch; + + /** + * If set to true, the specified postgres_database is created on behalf of the calling user if it + * does not already exist. In this case, the calling user has a role created for them in Postgres + * if they do not already have one. + * + *

Defaults to false, meaning that the request fails if the specified postgres_database does + * not already exist. + */ + @JsonProperty("create_database_if_missing") + private Boolean createDatabaseIfMissing; + + /** + * The name of the Postgres database inside the specified Lakebase project and branch to be + * associated with the UC catalog. This database must already exist, unless + * create_database_if_missing is set to true on creation. + * + *

A database can only be registered with one UC catalog at a time. To re-register a database + * with a different catalog, the existing catalog must be deleted first. + * + *

A child branch inherits the fact of parent's registration. This means the same-named + * database in a child branch cannot be registered with a second catalog while the parent's + * registration exists. To allow registering the database of a child branch, drop and recreate the + * database on the child branch. This removes the fact of parent's registration from this branch + * only. + * + *

Doing Point In Time Restore (PITR) prior to the moment before the Postgres DB was registered + * in the Catalog drops the fact of registration of the database. So the user should avoid doing + * so. + */ + @JsonProperty("postgres_database") + private String postgresDatabase; + + public CatalogCatalogSpec setBranch(String branch) { + this.branch = branch; + return this; + } + + public String getBranch() { + return branch; + } + + public CatalogCatalogSpec setCreateDatabaseIfMissing(Boolean createDatabaseIfMissing) { + this.createDatabaseIfMissing = createDatabaseIfMissing; + return this; + } + + public Boolean getCreateDatabaseIfMissing() { + return createDatabaseIfMissing; + } + + public CatalogCatalogSpec setPostgresDatabase(String postgresDatabase) { + this.postgresDatabase = postgresDatabase; + return this; + } + + public String getPostgresDatabase() { + return postgresDatabase; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CatalogCatalogSpec that = (CatalogCatalogSpec) o; + return Objects.equals(branch, that.branch) + && Objects.equals(createDatabaseIfMissing, that.createDatabaseIfMissing) + && Objects.equals(postgresDatabase, that.postgresDatabase); + } + + @Override + public int hashCode() { + return Objects.hash(branch, createDatabaseIfMissing, postgresDatabase); + } + + @Override + public String toString() { + return new ToStringer(CatalogCatalogSpec.class) + .add("branch", branch) + .add("createDatabaseIfMissing", createDatabaseIfMissing) + .add("postgresDatabase", postgresDatabase) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogStatus.java new file mode 100755 index 000000000..7e39a11b2 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogCatalogStatus.java @@ -0,0 +1,83 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The observed state of the Catalog. */ +@Generated +public class CatalogCatalogStatus { + /** + * The resource path of the branch associated with the catalog. + * + *

Format: projects/{project_id}/branches/{branch_id}. + */ + @JsonProperty("branch") + private String branch; + + /** The name of the Postgres database associated with the catalog. */ + @JsonProperty("postgres_database") + private String postgresDatabase; + + /** + * The resource path of the project associated with the catalog. + * + *

Format: projects/{project_id}. + */ + @JsonProperty("project") + private String project; + + public CatalogCatalogStatus setBranch(String branch) { + this.branch = branch; + return this; + } + + public String getBranch() { + return branch; + } + + public CatalogCatalogStatus setPostgresDatabase(String postgresDatabase) { + this.postgresDatabase = postgresDatabase; + return this; + } + + public String getPostgresDatabase() { + return postgresDatabase; + } + + public CatalogCatalogStatus setProject(String project) { + this.project = project; + return this; + } + + public String getProject() { + return project; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CatalogCatalogStatus that = (CatalogCatalogStatus) o; + return Objects.equals(branch, that.branch) + && Objects.equals(postgresDatabase, that.postgresDatabase) + && Objects.equals(project, that.project); + } + + @Override + public int hashCode() { + return Objects.hash(branch, postgresDatabase, project); + } + + @Override + public String toString() { + return new ToStringer(CatalogCatalogStatus.class) + .add("branch", branch) + .add("postgresDatabase", postgresDatabase) + .add("project", project) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogOperationMetadata.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogOperationMetadata.java new file mode 100755 index 000000000..3e262417d --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CatalogOperationMetadata.java @@ -0,0 +1,28 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import java.util.Objects; + +@Generated +public class CatalogOperationMetadata { + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + return Objects.hash(); + } + + @Override + public String toString() { + return new ToStringer(CatalogOperationMetadata.class).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogOperation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogOperation.java new file mode 100755 index 000000000..ddbe2780d --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogOperation.java @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.utils.SerDeUtils; +import com.databricks.sdk.service.common.lro.LroOptions; +import com.databricks.sdk.support.Generated; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Wrapper for interacting with a long-running createCatalog operation. Provides methods to wait for + * completion, check status, cancel, and access metadata. + */ +@Generated +public class CreateCatalogOperation { + private static final Logger LOG = LoggerFactory.getLogger(CreateCatalogOperation.class); + + private final PostgresService impl; + private Operation operation; + private final ObjectMapper objectMapper; + + public CreateCatalogOperation(PostgresService impl, Operation operation) { + this.impl = impl; + this.operation = operation; + this.objectMapper = SerDeUtils.createMapper(); + } + + /** + * Wait for the operation to complete and return the resulting Catalog. Waits indefinitely if no + * timeout is specified. + * + * @return the created Catalog + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public Catalog waitForCompletion() throws TimeoutException { + return waitForCompletion(Optional.empty()); + } + + /** + * Wait for the operation to complete and return the resulting Catalog. + * + * @param options the options for configuring the wait behavior, can be empty for defaults + * @return the created Catalog + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public Catalog waitForCompletion(Optional options) throws TimeoutException { + Optional timeout = options.flatMap(LroOptions::getTimeout); + long deadline = + timeout.isPresent() + ? System.currentTimeMillis() + timeout.get().toMillis() + : Long.MAX_VALUE; + String statusMessage = "polling operation..."; + int attempt = 1; + + while (System.currentTimeMillis() < deadline) { + // Refresh the operation state + refreshOperation(); + + if (operation.getDone() != null && operation.getDone()) { + // Operation completed, check for success or failure + if (operation.getError() != null) { + String errorMsg = "unknown error"; + if (operation.getError().getMessage() != null + && !operation.getError().getMessage().isEmpty()) { + errorMsg = operation.getError().getMessage(); + } + + if (operation.getError().getErrorCode() != null) { + errorMsg = String.format("[%s] %s", operation.getError().getErrorCode(), errorMsg); + } + + throw new DatabricksException("Operation failed: " + errorMsg); + } + + // Operation completed successfully, unmarshal response + if (operation.getResponse() == null) { + throw new DatabricksException("Operation completed but no response available"); + } + + try { + JsonNode responseJson = objectMapper.valueToTree(operation.getResponse()); + return objectMapper.treeToValue(responseJson, Catalog.class); + } catch (JsonProcessingException e) { + throw new DatabricksException( + "Failed to unmarshal catalog response: " + e.getMessage(), e); + } + } + + // Operation still in progress, wait before polling again + String prefix = String.format("operation=%s", operation.getName()); + int sleep = Math.min(attempt, 10); // sleep 10s max per attempt + LOG.info("{}: operation in progress (sleeping ~{}s)", prefix, sleep); + + try { + Thread.sleep((long) (sleep * 1000L + Math.random() * 1000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new DatabricksException("Current thread was interrupted", e); + } + attempt++; + } + + String timeoutMessage = + timeout.isPresent() + ? String.format("Operation timed out after %s: %s", timeout.get(), statusMessage) + : String.format("Operation timed out: %s", statusMessage); + throw new TimeoutException(timeoutMessage); + } + + /** + * Get the operation name. + * + * @return the operation name + */ + public String getName() { + return operation.getName(); + } + + /** + * Get the operation metadata. + * + * @return the operation metadata, or null if not available + * @throws DatabricksException if the metadata cannot be deserialized + */ + public CatalogOperationMetadata getMetadata() { + if (operation.getMetadata() == null) { + return null; + } + + try { + JsonNode metadataJson = objectMapper.valueToTree(operation.getMetadata()); + return objectMapper.treeToValue(metadataJson, CatalogOperationMetadata.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal operation metadata: " + e.getMessage(), e); + } + } + + /** + * Check if the operation is done. This method refreshes the operation state before checking. + * + * @return true if the operation is complete, false otherwise + * @throws DatabricksException if the status check fails + */ + public boolean isDone() { + refreshOperation(); + return operation.getDone() != null && operation.getDone(); + } + + /** Refresh the operation state by polling the server. */ + private void refreshOperation() { + operation = impl.getOperation(new GetOperationRequest().setName(operation.getName())); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogRequest.java new file mode 100755 index 000000000..dac7846d2 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateCatalogRequest.java @@ -0,0 +1,64 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class CreateCatalogRequest { + /** */ + @JsonProperty("catalog") + private Catalog catalog; + + /** + * The ID in the Unity Catalog. It becomes the full resource name, for example "my_catalog" + * becomes "catalogs/my_catalog". + */ + @JsonIgnore + @QueryParam("catalog_id") + private String catalogId; + + public CreateCatalogRequest setCatalog(Catalog catalog) { + this.catalog = catalog; + return this; + } + + public Catalog getCatalog() { + return catalog; + } + + public CreateCatalogRequest setCatalogId(String catalogId) { + this.catalogId = catalogId; + return this; + } + + public String getCatalogId() { + return catalogId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateCatalogRequest that = (CreateCatalogRequest) o; + return Objects.equals(catalog, that.catalog) && Objects.equals(catalogId, that.catalogId); + } + + @Override + public int hashCode() { + return Objects.hash(catalog, catalogId); + } + + @Override + public String toString() { + return new ToStringer(CreateCatalogRequest.class) + .add("catalog", catalog) + .add("catalogId", catalogId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableOperation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableOperation.java new file mode 100755 index 000000000..657c538f0 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableOperation.java @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.utils.SerDeUtils; +import com.databricks.sdk.service.common.lro.LroOptions; +import com.databricks.sdk.support.Generated; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Wrapper for interacting with a long-running createSyncedTable operation. Provides methods to wait + * for completion, check status, cancel, and access metadata. + */ +@Generated +public class CreateSyncedTableOperation { + private static final Logger LOG = LoggerFactory.getLogger(CreateSyncedTableOperation.class); + + private final PostgresService impl; + private Operation operation; + private final ObjectMapper objectMapper; + + public CreateSyncedTableOperation(PostgresService impl, Operation operation) { + this.impl = impl; + this.operation = operation; + this.objectMapper = SerDeUtils.createMapper(); + } + + /** + * Wait for the operation to complete and return the resulting SyncedTable. Waits indefinitely if + * no timeout is specified. + * + * @return the created SyncedTable + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public SyncedTable waitForCompletion() throws TimeoutException { + return waitForCompletion(Optional.empty()); + } + + /** + * Wait for the operation to complete and return the resulting SyncedTable. + * + * @param options the options for configuring the wait behavior, can be empty for defaults + * @return the created SyncedTable + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public SyncedTable waitForCompletion(Optional options) throws TimeoutException { + Optional timeout = options.flatMap(LroOptions::getTimeout); + long deadline = + timeout.isPresent() + ? System.currentTimeMillis() + timeout.get().toMillis() + : Long.MAX_VALUE; + String statusMessage = "polling operation..."; + int attempt = 1; + + while (System.currentTimeMillis() < deadline) { + // Refresh the operation state + refreshOperation(); + + if (operation.getDone() != null && operation.getDone()) { + // Operation completed, check for success or failure + if (operation.getError() != null) { + String errorMsg = "unknown error"; + if (operation.getError().getMessage() != null + && !operation.getError().getMessage().isEmpty()) { + errorMsg = operation.getError().getMessage(); + } + + if (operation.getError().getErrorCode() != null) { + errorMsg = String.format("[%s] %s", operation.getError().getErrorCode(), errorMsg); + } + + throw new DatabricksException("Operation failed: " + errorMsg); + } + + // Operation completed successfully, unmarshal response + if (operation.getResponse() == null) { + throw new DatabricksException("Operation completed but no response available"); + } + + try { + JsonNode responseJson = objectMapper.valueToTree(operation.getResponse()); + return objectMapper.treeToValue(responseJson, SyncedTable.class); + } catch (JsonProcessingException e) { + throw new DatabricksException( + "Failed to unmarshal syncedTable response: " + e.getMessage(), e); + } + } + + // Operation still in progress, wait before polling again + String prefix = String.format("operation=%s", operation.getName()); + int sleep = Math.min(attempt, 10); // sleep 10s max per attempt + LOG.info("{}: operation in progress (sleeping ~{}s)", prefix, sleep); + + try { + Thread.sleep((long) (sleep * 1000L + Math.random() * 1000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new DatabricksException("Current thread was interrupted", e); + } + attempt++; + } + + String timeoutMessage = + timeout.isPresent() + ? String.format("Operation timed out after %s: %s", timeout.get(), statusMessage) + : String.format("Operation timed out: %s", statusMessage); + throw new TimeoutException(timeoutMessage); + } + + /** + * Get the operation name. + * + * @return the operation name + */ + public String getName() { + return operation.getName(); + } + + /** + * Get the operation metadata. + * + * @return the operation metadata, or null if not available + * @throws DatabricksException if the metadata cannot be deserialized + */ + public SyncedTableOperationMetadata getMetadata() { + if (operation.getMetadata() == null) { + return null; + } + + try { + JsonNode metadataJson = objectMapper.valueToTree(operation.getMetadata()); + return objectMapper.treeToValue(metadataJson, SyncedTableOperationMetadata.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal operation metadata: " + e.getMessage(), e); + } + } + + /** + * Check if the operation is done. This method refreshes the operation state before checking. + * + * @return true if the operation is complete, false otherwise + * @throws DatabricksException if the status check fails + */ + public boolean isDone() { + refreshOperation(); + return operation.getDone() != null && operation.getDone(); + } + + /** Refresh the operation state by polling the server. */ + private void refreshOperation() { + operation = impl.getOperation(new GetOperationRequest().setName(operation.getName())); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableRequest.java new file mode 100755 index 000000000..a37bd66da --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/CreateSyncedTableRequest.java @@ -0,0 +1,74 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class CreateSyncedTableRequest { + /** */ + @JsonProperty("synced_table") + private SyncedTable syncedTable; + + /** + * The ID to use for the Synced Table. This becomes the final component of the SyncedTable's + * resource name. ID is required and is the synced table name, containing (catalog, schema, table) + * tuple. Elements of the tuple are the UC entity names. + * + *

Example: "{catalog}.{schema}.{table}" + * + *

synced_table_id represents both of the following: + * + *

1. An online VIEW virtual table in the Unity Catalog accessible via the Lakehouse + * Federation. 2. Postgres table named "{table}" in schema "{schema}" in the connected Postgres + * database + */ + @JsonIgnore + @QueryParam("synced_table_id") + private String syncedTableId; + + public CreateSyncedTableRequest setSyncedTable(SyncedTable syncedTable) { + this.syncedTable = syncedTable; + return this; + } + + public SyncedTable getSyncedTable() { + return syncedTable; + } + + public CreateSyncedTableRequest setSyncedTableId(String syncedTableId) { + this.syncedTableId = syncedTableId; + return this; + } + + public String getSyncedTableId() { + return syncedTableId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateSyncedTableRequest that = (CreateSyncedTableRequest) o; + return Objects.equals(syncedTable, that.syncedTable) + && Objects.equals(syncedTableId, that.syncedTableId); + } + + @Override + public int hashCode() { + return Objects.hash(syncedTable, syncedTableId); + } + + @Override + public String toString() { + return new ToStringer(CreateSyncedTableRequest.class) + .add("syncedTable", syncedTable) + .add("syncedTableId", syncedTableId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogOperation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogOperation.java new file mode 100755 index 000000000..460b87d67 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogOperation.java @@ -0,0 +1,161 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.utils.SerDeUtils; +import com.databricks.sdk.service.common.lro.LroOptions; +import com.databricks.sdk.support.Generated; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Wrapper for interacting with a long-running deleteCatalog operation. Provides methods to wait for + * completion, check status, cancel, and access metadata. + */ +@Generated +public class DeleteCatalogOperation { + private static final Logger LOG = LoggerFactory.getLogger(DeleteCatalogOperation.class); + + private final PostgresService impl; + private Operation operation; + private final ObjectMapper objectMapper; + + public DeleteCatalogOperation(PostgresService impl, Operation operation) { + this.impl = impl; + this.operation = operation; + this.objectMapper = SerDeUtils.createMapper(); + } + + /** + * Wait for the operation to complete and return the resulting . Waits indefinitely if no timeout + * is specified. + * + * @return the created + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public void waitForCompletion() throws TimeoutException { + waitForCompletion(Optional.empty()); + } + + /** + * Wait for the operation to complete and return the resulting . + * + * @param options the options for configuring the wait behavior, can be empty for defaults + * @return the created + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public void waitForCompletion(Optional options) throws TimeoutException { + Optional timeout = options.flatMap(LroOptions::getTimeout); + long deadline = + timeout.isPresent() + ? System.currentTimeMillis() + timeout.get().toMillis() + : Long.MAX_VALUE; + String statusMessage = "polling operation..."; + int attempt = 1; + + while (System.currentTimeMillis() < deadline) { + // Refresh the operation state + refreshOperation(); + + if (operation.getDone() != null && operation.getDone()) { + // Operation completed, check for success or failure + if (operation.getError() != null) { + String errorMsg = "unknown error"; + if (operation.getError().getMessage() != null + && !operation.getError().getMessage().isEmpty()) { + errorMsg = operation.getError().getMessage(); + } + + if (operation.getError().getErrorCode() != null) { + errorMsg = String.format("[%s] %s", operation.getError().getErrorCode(), errorMsg); + } + + throw new DatabricksException("Operation failed: " + errorMsg); + } + + // Operation completed successfully, unmarshal response + if (operation.getResponse() == null) { + throw new DatabricksException("Operation completed but no response available"); + } + + try { + JsonNode responseJson = objectMapper.valueToTree(operation.getResponse()); + objectMapper.treeToValue(responseJson, Void.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal response: " + e.getMessage(), e); + } + } + + // Operation still in progress, wait before polling again + String prefix = String.format("operation=%s", operation.getName()); + int sleep = Math.min(attempt, 10); // sleep 10s max per attempt + LOG.info("{}: operation in progress (sleeping ~{}s)", prefix, sleep); + + try { + Thread.sleep((long) (sleep * 1000L + Math.random() * 1000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new DatabricksException("Current thread was interrupted", e); + } + attempt++; + } + + String timeoutMessage = + timeout.isPresent() + ? String.format("Operation timed out after %s: %s", timeout.get(), statusMessage) + : String.format("Operation timed out: %s", statusMessage); + throw new TimeoutException(timeoutMessage); + } + + /** + * Get the operation name. + * + * @return the operation name + */ + public String getName() { + return operation.getName(); + } + + /** + * Get the operation metadata. + * + * @return the operation metadata, or null if not available + * @throws DatabricksException if the metadata cannot be deserialized + */ + public CatalogOperationMetadata getMetadata() { + if (operation.getMetadata() == null) { + return null; + } + + try { + JsonNode metadataJson = objectMapper.valueToTree(operation.getMetadata()); + return objectMapper.treeToValue(metadataJson, CatalogOperationMetadata.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal operation metadata: " + e.getMessage(), e); + } + } + + /** + * Check if the operation is done. This method refreshes the operation state before checking. + * + * @return true if the operation is complete, false otherwise + * @throws DatabricksException if the status check fails + */ + public boolean isDone() { + refreshOperation(); + return operation.getDone() != null && operation.getDone(); + } + + /** Refresh the operation state by polling the server. */ + private void refreshOperation() { + operation = impl.getOperation(new GetOperationRequest().setName(operation.getName())); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogRequest.java new file mode 100755 index 000000000..9ccfa3839 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteCatalogRequest.java @@ -0,0 +1,45 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +@Generated +public class DeleteCatalogRequest { + /** + * The full resource path of the catalog to delete. + * + *

Format: "catalogs/{catalog_id}". + */ + @JsonIgnore private String name; + + public DeleteCatalogRequest setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteCatalogRequest that = (DeleteCatalogRequest) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return new ToStringer(DeleteCatalogRequest.class).add("name", name).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableOperation.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableOperation.java new file mode 100755 index 000000000..7d0bf28d0 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableOperation.java @@ -0,0 +1,161 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.utils.SerDeUtils; +import com.databricks.sdk.service.common.lro.LroOptions; +import com.databricks.sdk.support.Generated; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Wrapper for interacting with a long-running deleteSyncedTable operation. Provides methods to wait + * for completion, check status, cancel, and access metadata. + */ +@Generated +public class DeleteSyncedTableOperation { + private static final Logger LOG = LoggerFactory.getLogger(DeleteSyncedTableOperation.class); + + private final PostgresService impl; + private Operation operation; + private final ObjectMapper objectMapper; + + public DeleteSyncedTableOperation(PostgresService impl, Operation operation) { + this.impl = impl; + this.operation = operation; + this.objectMapper = SerDeUtils.createMapper(); + } + + /** + * Wait for the operation to complete and return the resulting . Waits indefinitely if no timeout + * is specified. + * + * @return the created + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public void waitForCompletion() throws TimeoutException { + waitForCompletion(Optional.empty()); + } + + /** + * Wait for the operation to complete and return the resulting . + * + * @param options the options for configuring the wait behavior, can be empty for defaults + * @return the created + * @throws TimeoutException if the operation doesn't complete within the timeout + * @throws DatabricksException if the operation fails + */ + public void waitForCompletion(Optional options) throws TimeoutException { + Optional timeout = options.flatMap(LroOptions::getTimeout); + long deadline = + timeout.isPresent() + ? System.currentTimeMillis() + timeout.get().toMillis() + : Long.MAX_VALUE; + String statusMessage = "polling operation..."; + int attempt = 1; + + while (System.currentTimeMillis() < deadline) { + // Refresh the operation state + refreshOperation(); + + if (operation.getDone() != null && operation.getDone()) { + // Operation completed, check for success or failure + if (operation.getError() != null) { + String errorMsg = "unknown error"; + if (operation.getError().getMessage() != null + && !operation.getError().getMessage().isEmpty()) { + errorMsg = operation.getError().getMessage(); + } + + if (operation.getError().getErrorCode() != null) { + errorMsg = String.format("[%s] %s", operation.getError().getErrorCode(), errorMsg); + } + + throw new DatabricksException("Operation failed: " + errorMsg); + } + + // Operation completed successfully, unmarshal response + if (operation.getResponse() == null) { + throw new DatabricksException("Operation completed but no response available"); + } + + try { + JsonNode responseJson = objectMapper.valueToTree(operation.getResponse()); + objectMapper.treeToValue(responseJson, Void.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal response: " + e.getMessage(), e); + } + } + + // Operation still in progress, wait before polling again + String prefix = String.format("operation=%s", operation.getName()); + int sleep = Math.min(attempt, 10); // sleep 10s max per attempt + LOG.info("{}: operation in progress (sleeping ~{}s)", prefix, sleep); + + try { + Thread.sleep((long) (sleep * 1000L + Math.random() * 1000)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new DatabricksException("Current thread was interrupted", e); + } + attempt++; + } + + String timeoutMessage = + timeout.isPresent() + ? String.format("Operation timed out after %s: %s", timeout.get(), statusMessage) + : String.format("Operation timed out: %s", statusMessage); + throw new TimeoutException(timeoutMessage); + } + + /** + * Get the operation name. + * + * @return the operation name + */ + public String getName() { + return operation.getName(); + } + + /** + * Get the operation metadata. + * + * @return the operation metadata, or null if not available + * @throws DatabricksException if the metadata cannot be deserialized + */ + public SyncedTableOperationMetadata getMetadata() { + if (operation.getMetadata() == null) { + return null; + } + + try { + JsonNode metadataJson = objectMapper.valueToTree(operation.getMetadata()); + return objectMapper.treeToValue(metadataJson, SyncedTableOperationMetadata.class); + } catch (JsonProcessingException e) { + throw new DatabricksException("Failed to unmarshal operation metadata: " + e.getMessage(), e); + } + } + + /** + * Check if the operation is done. This method refreshes the operation state before checking. + * + * @return true if the operation is complete, false otherwise + * @throws DatabricksException if the status check fails + */ + public boolean isDone() { + refreshOperation(); + return operation.getDone() != null && operation.getDone(); + } + + /** Refresh the operation state by polling the server. */ + private void refreshOperation() { + operation = impl.getOperation(new GetOperationRequest().setName(operation.getName())); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableRequest.java new file mode 100755 index 000000000..d5da726de --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeleteSyncedTableRequest.java @@ -0,0 +1,45 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +@Generated +public class DeleteSyncedTableRequest { + /** + * The Full resource name of the synced table, of the format + * "synced_tables/{catalog}.{schema}.{table}", where (catalog, schema, table) are the UC entity + * names. + */ + @JsonIgnore private String name; + + public DeleteSyncedTableRequest setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSyncedTableRequest that = (DeleteSyncedTableRequest) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return new ToStringer(DeleteSyncedTableRequest.class).add("name", name).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeltaTableSyncInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeltaTableSyncInfo.java new file mode 100755 index 000000000..35bded0f5 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/DeltaTableSyncInfo.java @@ -0,0 +1,63 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.protobuf.Timestamp; +import java.util.Objects; + +@Generated +public class DeltaTableSyncInfo { + /** + * The timestamp when the above Delta version was committed in the source Delta table. Note: This + * is the Delta commit time, not the time the data was written to the synced table. + */ + @JsonProperty("delta_commit_time") + private Timestamp deltaCommitTime; + + /** The Delta Lake commit version that was last successfully synced. */ + @JsonProperty("delta_commit_version") + private Long deltaCommitVersion; + + public DeltaTableSyncInfo setDeltaCommitTime(Timestamp deltaCommitTime) { + this.deltaCommitTime = deltaCommitTime; + return this; + } + + public Timestamp getDeltaCommitTime() { + return deltaCommitTime; + } + + public DeltaTableSyncInfo setDeltaCommitVersion(Long deltaCommitVersion) { + this.deltaCommitVersion = deltaCommitVersion; + return this; + } + + public Long getDeltaCommitVersion() { + return deltaCommitVersion; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeltaTableSyncInfo that = (DeltaTableSyncInfo) o; + return Objects.equals(deltaCommitTime, that.deltaCommitTime) + && Objects.equals(deltaCommitVersion, that.deltaCommitVersion); + } + + @Override + public int hashCode() { + return Objects.hash(deltaCommitTime, deltaCommitVersion); + } + + @Override + public String toString() { + return new ToStringer(DeltaTableSyncInfo.class) + .add("deltaCommitTime", deltaCommitTime) + .add("deltaCommitVersion", deltaCommitVersion) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetCatalogRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetCatalogRequest.java new file mode 100755 index 000000000..46221012a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetCatalogRequest.java @@ -0,0 +1,45 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +@Generated +public class GetCatalogRequest { + /** + * The full resource path of the catalog to retrieve. + * + *

Format: "catalogs/{catalog_id}". + */ + @JsonIgnore private String name; + + public GetCatalogRequest setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetCatalogRequest that = (GetCatalogRequest) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return new ToStringer(GetCatalogRequest.class).add("name", name).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetSyncedTableRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetSyncedTableRequest.java new file mode 100755 index 000000000..7bbefa4a3 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/GetSyncedTableRequest.java @@ -0,0 +1,44 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +@Generated +public class GetSyncedTableRequest { + /** + * Format: "synced_tables/{catalog}.{schema}.{table}", where (catalog, schema, table) are the + * entity names in the Unity Catalog. + */ + @JsonIgnore private String name; + + public GetSyncedTableRequest setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetSyncedTableRequest that = (GetSyncedTableRequest) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return new ToStringer(GetSyncedTableRequest.class).add("name", name).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/NewPipelineSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/NewPipelineSpec.java new file mode 100755 index 000000000..f8b4a790b --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/NewPipelineSpec.java @@ -0,0 +1,80 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class NewPipelineSpec { + /** Budget policy to set on the newly created pipeline. */ + @JsonProperty("budget_policy_id") + private String budgetPolicyId; + + /** + * UC catalog for the pipeline to store intermediate files (checkpoints, event logs etc). This + * needs to be a standard catalog where the user has permissions to create Delta tables. + */ + @JsonProperty("storage_catalog") + private String storageCatalog; + + /** + * UC schema for the pipeline to store intermediate files (checkpoints, event logs etc). This + * needs to be in the standard catalog where the user has permissions to create Delta tables. + */ + @JsonProperty("storage_schema") + private String storageSchema; + + public NewPipelineSpec setBudgetPolicyId(String budgetPolicyId) { + this.budgetPolicyId = budgetPolicyId; + return this; + } + + public String getBudgetPolicyId() { + return budgetPolicyId; + } + + public NewPipelineSpec setStorageCatalog(String storageCatalog) { + this.storageCatalog = storageCatalog; + return this; + } + + public String getStorageCatalog() { + return storageCatalog; + } + + public NewPipelineSpec setStorageSchema(String storageSchema) { + this.storageSchema = storageSchema; + return this; + } + + public String getStorageSchema() { + return storageSchema; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NewPipelineSpec that = (NewPipelineSpec) o; + return Objects.equals(budgetPolicyId, that.budgetPolicyId) + && Objects.equals(storageCatalog, that.storageCatalog) + && Objects.equals(storageSchema, that.storageSchema); + } + + @Override + public int hashCode() { + return Objects.hash(budgetPolicyId, storageCatalog, storageSchema); + } + + @Override + public String toString() { + return new ToStringer(NewPipelineSpec.class) + .add("budgetPolicyId", budgetPolicyId) + .add("storageCatalog", storageCatalog) + .add("storageSchema", storageSchema) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresAPI.java index f78d52d68..4733a881c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresAPI.java @@ -43,6 +43,12 @@ public CreateBranchOperation createBranch(CreateBranchRequest request) { return new CreateBranchOperation(impl, operation); } + /** Register a Postgres database in the Unity Catalog. */ + public CreateCatalogOperation createCatalog(CreateCatalogRequest request) { + Operation operation = impl.createCatalog(request); + return new CreateCatalogOperation(impl, operation); + } + /** * Create a Database. * @@ -74,6 +80,12 @@ public CreateRoleOperation createRole(CreateRoleRequest request) { return new CreateRoleOperation(impl, operation); } + /** Create a Synced Table. */ + public CreateSyncedTableOperation createSyncedTable(CreateSyncedTableRequest request) { + Operation operation = impl.createSyncedTable(request); + return new CreateSyncedTableOperation(impl, operation); + } + public DeleteBranchOperation deleteBranch(String name) { return deleteBranch(new DeleteBranchRequest().setName(name)); } @@ -84,6 +96,16 @@ public DeleteBranchOperation deleteBranch(DeleteBranchRequest request) { return new DeleteBranchOperation(impl, operation); } + public DeleteCatalogOperation deleteCatalog(String name) { + return deleteCatalog(new DeleteCatalogRequest().setName(name)); + } + + /** Delete a Database Catalog. */ + public DeleteCatalogOperation deleteCatalog(DeleteCatalogRequest request) { + Operation operation = impl.deleteCatalog(request); + return new DeleteCatalogOperation(impl, operation); + } + public DeleteDatabaseOperation deleteDatabase(String name) { return deleteDatabase(new DeleteDatabaseRequest().setName(name)); } @@ -124,6 +146,16 @@ public DeleteRoleOperation deleteRole(DeleteRoleRequest request) { return new DeleteRoleOperation(impl, operation); } + public DeleteSyncedTableOperation deleteSyncedTable(String name) { + return deleteSyncedTable(new DeleteSyncedTableRequest().setName(name)); + } + + /** Delete a Synced Table. */ + public DeleteSyncedTableOperation deleteSyncedTable(DeleteSyncedTableRequest request) { + Operation operation = impl.deleteSyncedTable(request); + return new DeleteSyncedTableOperation(impl, operation); + } + /** Generate OAuth credentials for a Postgres database. */ public DatabaseCredential generateDatabaseCredential(GenerateDatabaseCredentialRequest request) { return impl.generateDatabaseCredential(request); @@ -138,6 +170,15 @@ public Branch getBranch(GetBranchRequest request) { return impl.getBranch(request); } + public Catalog getCatalog(String name) { + return getCatalog(new GetCatalogRequest().setName(name)); + } + + /** Get a Database Catalog. */ + public Catalog getCatalog(GetCatalogRequest request) { + return impl.getCatalog(request); + } + public Database getDatabase(String name) { return getDatabase(new GetDatabaseRequest().setName(name)); } @@ -189,6 +230,15 @@ public Role getRole(GetRoleRequest request) { return impl.getRole(request); } + public SyncedTable getSyncedTable(String name) { + return getSyncedTable(new GetSyncedTableRequest().setName(name)); + } + + /** Get a Synced Table. */ + public SyncedTable getSyncedTable(GetSyncedTableRequest request) { + return impl.getSyncedTable(request); + } + public Iterable listBranches(String parent) { return listBranches(new ListBranchesRequest().setParent(parent)); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresImpl.java index d0acc22af..26e5230d5 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresImpl.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresImpl.java @@ -34,6 +34,24 @@ public Operation createBranch(CreateBranchRequest request) { } } + @Override + public Operation createCatalog(CreateCatalogRequest request) { + String path = "/api/2.0/postgres/catalogs"; + try { + Request req = new Request("POST", path, apiClient.serialize(request.getCatalog())); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, Operation.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public Operation createDatabase(CreateDatabaseRequest request) { String path = String.format("/api/2.0/postgres/%s/databases", request.getParent()); @@ -106,6 +124,24 @@ public Operation createRole(CreateRoleRequest request) { } } + @Override + public Operation createSyncedTable(CreateSyncedTableRequest request) { + String path = "/api/2.0/postgres/synced_tables"; + try { + Request req = new Request("POST", path, apiClient.serialize(request.getSyncedTable())); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, Operation.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public Operation deleteBranch(DeleteBranchRequest request) { String path = String.format("/api/2.0/postgres/%s", request.getName()); @@ -123,6 +159,23 @@ public Operation deleteBranch(DeleteBranchRequest request) { } } + @Override + public Operation deleteCatalog(DeleteCatalogRequest request) { + String path = String.format("/api/2.0/postgres/%s", request.getName()); + try { + Request req = new Request("DELETE", path); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, Operation.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public Operation deleteDatabase(DeleteDatabaseRequest request) { String path = String.format("/api/2.0/postgres/%s", request.getName()); @@ -191,6 +244,23 @@ public Operation deleteRole(DeleteRoleRequest request) { } } + @Override + public Operation deleteSyncedTable(DeleteSyncedTableRequest request) { + String path = String.format("/api/2.0/postgres/%s", request.getName()); + try { + Request req = new Request("DELETE", path); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, Operation.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public DatabaseCredential generateDatabaseCredential(GenerateDatabaseCredentialRequest request) { String path = "/api/2.0/postgres/credentials"; @@ -226,6 +296,23 @@ public Branch getBranch(GetBranchRequest request) { } } + @Override + public Catalog getCatalog(GetCatalogRequest request) { + String path = String.format("/api/2.0/postgres/%s", request.getName()); + try { + Request req = new Request("GET", path); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, Catalog.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public Database getDatabase(GetDatabaseRequest request) { String path = String.format("/api/2.0/postgres/%s", request.getName()); @@ -311,6 +398,23 @@ public Role getRole(GetRoleRequest request) { } } + @Override + public SyncedTable getSyncedTable(GetSyncedTableRequest request) { + String path = String.format("/api/2.0/postgres/%s", request.getName()); + try { + Request req = new Request("GET", path); + + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + if (apiClient.workspaceId() != null) { + req.withHeader("X-Databricks-Org-Id", apiClient.workspaceId()); + } + return apiClient.execute(req, SyncedTable.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + @Override public ListBranchesResponse listBranches(ListBranchesRequest request) { String path = String.format("/api/2.0/postgres/%s/branches", request.getParent()); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresService.java index 6de73ad6d..7c3889ab1 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresService.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/PostgresService.java @@ -26,6 +26,9 @@ public interface PostgresService { /** Creates a new database branch in the project. */ Operation createBranch(CreateBranchRequest createBranchRequest); + /** Register a Postgres database in the Unity Catalog. */ + Operation createCatalog(CreateCatalogRequest createCatalogRequest); + /** * Create a Database. * @@ -45,9 +48,15 @@ public interface PostgresService { /** Creates a new Postgres role in the branch. */ Operation createRole(CreateRoleRequest createRoleRequest); + /** Create a Synced Table. */ + Operation createSyncedTable(CreateSyncedTableRequest createSyncedTableRequest); + /** Deletes the specified database branch. */ Operation deleteBranch(DeleteBranchRequest deleteBranchRequest); + /** Delete a Database Catalog. */ + Operation deleteCatalog(DeleteCatalogRequest deleteCatalogRequest); + /** Delete a Database. */ Operation deleteDatabase(DeleteDatabaseRequest deleteDatabaseRequest); @@ -60,6 +69,9 @@ public interface PostgresService { /** Deletes the specified Postgres role. */ Operation deleteRole(DeleteRoleRequest deleteRoleRequest); + /** Delete a Synced Table. */ + Operation deleteSyncedTable(DeleteSyncedTableRequest deleteSyncedTableRequest); + /** Generate OAuth credentials for a Postgres database. */ DatabaseCredential generateDatabaseCredential( GenerateDatabaseCredentialRequest generateDatabaseCredentialRequest); @@ -67,6 +79,9 @@ DatabaseCredential generateDatabaseCredential( /** Retrieves information about the specified database branch. */ Branch getBranch(GetBranchRequest getBranchRequest); + /** Get a Database Catalog. */ + Catalog getCatalog(GetCatalogRequest getCatalogRequest); + /** Get a Database. */ Database getDatabase(GetDatabaseRequest getDatabaseRequest); @@ -88,6 +103,9 @@ DatabaseCredential generateDatabaseCredential( */ Role getRole(GetRoleRequest getRoleRequest); + /** Get a Synced Table. */ + SyncedTable getSyncedTable(GetSyncedTableRequest getSyncedTableRequest); + /** Returns a paginated list of database branches in the project. */ ListBranchesResponse listBranches(ListBranchesRequest listBranchesRequest); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectSpec.java index 8ac2166dc..dbda32f08 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectSpec.java @@ -28,6 +28,13 @@ public class ProjectSpec { @JsonProperty("custom_tags") private Collection customTags; + /** + * The full resource path for the default branch of the project Format: + * projects/{project_id}/branches/{branch_id} + */ + @JsonProperty("default_branch") + private String defaultBranch; + /** */ @JsonProperty("default_endpoint_settings") private ProjectDefaultEndpointSettings defaultEndpointSettings; @@ -71,6 +78,15 @@ public Collection getCustomTags() { return customTags; } + public ProjectSpec setDefaultBranch(String defaultBranch) { + this.defaultBranch = defaultBranch; + return this; + } + + public String getDefaultBranch() { + return defaultBranch; + } + public ProjectSpec setDefaultEndpointSettings( ProjectDefaultEndpointSettings defaultEndpointSettings) { this.defaultEndpointSettings = defaultEndpointSettings; @@ -124,6 +140,7 @@ public boolean equals(Object o) { ProjectSpec that = (ProjectSpec) o; return Objects.equals(budgetPolicyId, that.budgetPolicyId) && Objects.equals(customTags, that.customTags) + && Objects.equals(defaultBranch, that.defaultBranch) && Objects.equals(defaultEndpointSettings, that.defaultEndpointSettings) && Objects.equals(displayName, that.displayName) && Objects.equals(enablePgNativeLogin, that.enablePgNativeLogin) @@ -136,6 +153,7 @@ public int hashCode() { return Objects.hash( budgetPolicyId, customTags, + defaultBranch, defaultEndpointSettings, displayName, enablePgNativeLogin, @@ -148,6 +166,7 @@ public String toString() { return new ToStringer(ProjectSpec.class) .add("budgetPolicyId", budgetPolicyId) .add("customTags", customTags) + .add("defaultBranch", defaultBranch) .add("defaultEndpointSettings", defaultEndpointSettings) .add("displayName", displayName) .add("enablePgNativeLogin", enablePgNativeLogin) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectStatus.java index fb0890412..c01a7b285 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectStatus.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProjectStatus.java @@ -23,6 +23,10 @@ public class ProjectStatus { @JsonProperty("custom_tags") private Collection customTags; + /** The full resource path of the default branch of the project */ + @JsonProperty("default_branch") + private String defaultBranch; + /** The effective default endpoint settings. */ @JsonProperty("default_endpoint_settings") private ProjectDefaultEndpointSettings defaultEndpointSettings; @@ -78,6 +82,15 @@ public Collection getCustomTags() { return customTags; } + public ProjectStatus setDefaultBranch(String defaultBranch) { + this.defaultBranch = defaultBranch; + return this; + } + + public String getDefaultBranch() { + return defaultBranch; + } + public ProjectStatus setDefaultEndpointSettings( ProjectDefaultEndpointSettings defaultEndpointSettings) { this.defaultEndpointSettings = defaultEndpointSettings; @@ -150,6 +163,7 @@ public boolean equals(Object o) { return Objects.equals(branchLogicalSizeLimitBytes, that.branchLogicalSizeLimitBytes) && Objects.equals(budgetPolicyId, that.budgetPolicyId) && Objects.equals(customTags, that.customTags) + && Objects.equals(defaultBranch, that.defaultBranch) && Objects.equals(defaultEndpointSettings, that.defaultEndpointSettings) && Objects.equals(displayName, that.displayName) && Objects.equals(enablePgNativeLogin, that.enablePgNativeLogin) @@ -165,6 +179,7 @@ public int hashCode() { branchLogicalSizeLimitBytes, budgetPolicyId, customTags, + defaultBranch, defaultEndpointSettings, displayName, enablePgNativeLogin, @@ -180,6 +195,7 @@ public String toString() { .add("branchLogicalSizeLimitBytes", branchLogicalSizeLimitBytes) .add("budgetPolicyId", budgetPolicyId) .add("customTags", customTags) + .add("defaultBranch", defaultBranch) .add("defaultEndpointSettings", defaultEndpointSettings) .add("displayName", displayName) .add("enablePgNativeLogin", enablePgNativeLogin) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningInfoState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningInfoState.java new file mode 100755 index 000000000..9c99b7de8 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningInfoState.java @@ -0,0 +1,15 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; + +@Generated +public enum ProvisioningInfoState { + ACTIVE, + DEGRADED, + DELETING, + FAILED, + PROVISIONING, + UPDATING, +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningPhase.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningPhase.java new file mode 100755 index 000000000..520039dac --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/ProvisioningPhase.java @@ -0,0 +1,13 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; + +/** Copied from database_table_statuses.proto to decouple SDK packages. */ +@Generated +public enum ProvisioningPhase { + PROVISIONING_PHASE_INDEX_SCAN, + PROVISIONING_PHASE_INDEX_SORT, + PROVISIONING_PHASE_MAIN, +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTable.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTable.java new file mode 100755 index 000000000..cde9daee3 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTable.java @@ -0,0 +1,117 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.protobuf.Timestamp; +import java.util.Objects; + +@Generated +public class SyncedTable { + /** */ + @JsonProperty("create_time") + private Timestamp createTime; + + /** + * Output only. The Full resource name of the synced table in Postgres where (catalog, schema, + * table) are the UC entity names. + * + *

Format "synced_tables/{catalog}.{schema}.{table}" + * + *

For the corresponding source table in the Unity catalog look for the + * "source_table_full_name" attribute. + */ + @JsonProperty("name") + private String name; + + /** + * Configuration details of the synced table, such as the source table, scheduling policy, etc. + * This attribute is specified at creation time and most fields are returned as is on subsequent + * queries. + */ + @JsonProperty("spec") + private SyncedTableSyncedTableSpec spec; + + /** Synced Table data synchronization status. */ + @JsonProperty("status") + private SyncedTableSyncedTableStatus status; + + /** */ + @JsonProperty("uid") + private String uid; + + public SyncedTable setCreateTime(Timestamp createTime) { + this.createTime = createTime; + return this; + } + + public Timestamp getCreateTime() { + return createTime; + } + + public SyncedTable setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + public SyncedTable setSpec(SyncedTableSyncedTableSpec spec) { + this.spec = spec; + return this; + } + + public SyncedTableSyncedTableSpec getSpec() { + return spec; + } + + public SyncedTable setStatus(SyncedTableSyncedTableStatus status) { + this.status = status; + return this; + } + + public SyncedTableSyncedTableStatus getStatus() { + return status; + } + + public SyncedTable setUid(String uid) { + this.uid = uid; + return this; + } + + public String getUid() { + return uid; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncedTable that = (SyncedTable) o; + return Objects.equals(createTime, that.createTime) + && Objects.equals(name, that.name) + && Objects.equals(spec, that.spec) + && Objects.equals(status, that.status) + && Objects.equals(uid, that.uid); + } + + @Override + public int hashCode() { + return Objects.hash(createTime, name, spec, status, uid); + } + + @Override + public String toString() { + return new ToStringer(SyncedTable.class) + .add("createTime", createTime) + .add("name", name) + .add("spec", spec) + .add("status", status) + .add("uid", uid) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableOperationMetadata.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableOperationMetadata.java new file mode 100755 index 000000000..6a7c92764 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableOperationMetadata.java @@ -0,0 +1,29 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import java.util.Objects; + +/** Metadata for SyncedTable long-running operations. */ +@Generated +public class SyncedTableOperationMetadata { + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + return true; + } + + @Override + public int hashCode() { + return Objects.hash(); + } + + @Override + public String toString() { + return new ToStringer(SyncedTableOperationMetadata.class).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePipelineProgress.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePipelineProgress.java new file mode 100755 index 000000000..f32a63c24 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePipelineProgress.java @@ -0,0 +1,117 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Progress information of the Synced Table data synchronization pipeline. */ +@Generated +public class SyncedTablePipelineProgress { + /** The estimated time remaining to complete this update in seconds. */ + @JsonProperty("estimated_completion_time_seconds") + private Double estimatedCompletionTimeSeconds; + + /** + * The source table Delta version that was last processed by the pipeline. The pipeline may not + * have completely processed this version yet. + */ + @JsonProperty("latest_version_currently_processing") + private Long latestVersionCurrentlyProcessing; + + /** The completion ratio of this update. This is a number between 0 and 1. */ + @JsonProperty("sync_progress_completion") + private Double syncProgressCompletion; + + /** The number of rows that have been synced in this update. */ + @JsonProperty("synced_row_count") + private Long syncedRowCount; + + /** + * The total number of rows that need to be synced in this update. This number may be an estimate. + */ + @JsonProperty("total_row_count") + private Long totalRowCount; + + public SyncedTablePipelineProgress setEstimatedCompletionTimeSeconds( + Double estimatedCompletionTimeSeconds) { + this.estimatedCompletionTimeSeconds = estimatedCompletionTimeSeconds; + return this; + } + + public Double getEstimatedCompletionTimeSeconds() { + return estimatedCompletionTimeSeconds; + } + + public SyncedTablePipelineProgress setLatestVersionCurrentlyProcessing( + Long latestVersionCurrentlyProcessing) { + this.latestVersionCurrentlyProcessing = latestVersionCurrentlyProcessing; + return this; + } + + public Long getLatestVersionCurrentlyProcessing() { + return latestVersionCurrentlyProcessing; + } + + public SyncedTablePipelineProgress setSyncProgressCompletion(Double syncProgressCompletion) { + this.syncProgressCompletion = syncProgressCompletion; + return this; + } + + public Double getSyncProgressCompletion() { + return syncProgressCompletion; + } + + public SyncedTablePipelineProgress setSyncedRowCount(Long syncedRowCount) { + this.syncedRowCount = syncedRowCount; + return this; + } + + public Long getSyncedRowCount() { + return syncedRowCount; + } + + public SyncedTablePipelineProgress setTotalRowCount(Long totalRowCount) { + this.totalRowCount = totalRowCount; + return this; + } + + public Long getTotalRowCount() { + return totalRowCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncedTablePipelineProgress that = (SyncedTablePipelineProgress) o; + return Objects.equals(estimatedCompletionTimeSeconds, that.estimatedCompletionTimeSeconds) + && Objects.equals(latestVersionCurrentlyProcessing, that.latestVersionCurrentlyProcessing) + && Objects.equals(syncProgressCompletion, that.syncProgressCompletion) + && Objects.equals(syncedRowCount, that.syncedRowCount) + && Objects.equals(totalRowCount, that.totalRowCount); + } + + @Override + public int hashCode() { + return Objects.hash( + estimatedCompletionTimeSeconds, + latestVersionCurrentlyProcessing, + syncProgressCompletion, + syncedRowCount, + totalRowCount); + } + + @Override + public String toString() { + return new ToStringer(SyncedTablePipelineProgress.class) + .add("estimatedCompletionTimeSeconds", estimatedCompletionTimeSeconds) + .add("latestVersionCurrentlyProcessing", latestVersionCurrentlyProcessing) + .add("syncProgressCompletion", syncProgressCompletion) + .add("syncedRowCount", syncedRowCount) + .add("totalRowCount", totalRowCount) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePosition.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePosition.java new file mode 100755 index 000000000..f9d6620c5 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTablePosition.java @@ -0,0 +1,82 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.protobuf.Timestamp; +import java.util.Objects; + +@Generated +public class SyncedTablePosition { + /** */ + @JsonProperty("delta_table_sync_info") + private DeltaTableSyncInfo deltaTableSyncInfo; + + /** + * The end timestamp of the most recent successful synchronization. This is the time when the data + * is available in the synced table. + */ + @JsonProperty("sync_end_time") + private Timestamp syncEndTime; + + /** + * The starting timestamp of the most recent successful synchronization from the source table to + * the destination (synced) table. Note this is the starting timestamp of the sync operation, not + * the end time. E.g., for a batch, this is the time when the sync operation started. + */ + @JsonProperty("sync_start_time") + private Timestamp syncStartTime; + + public SyncedTablePosition setDeltaTableSyncInfo(DeltaTableSyncInfo deltaTableSyncInfo) { + this.deltaTableSyncInfo = deltaTableSyncInfo; + return this; + } + + public DeltaTableSyncInfo getDeltaTableSyncInfo() { + return deltaTableSyncInfo; + } + + public SyncedTablePosition setSyncEndTime(Timestamp syncEndTime) { + this.syncEndTime = syncEndTime; + return this; + } + + public Timestamp getSyncEndTime() { + return syncEndTime; + } + + public SyncedTablePosition setSyncStartTime(Timestamp syncStartTime) { + this.syncStartTime = syncStartTime; + return this; + } + + public Timestamp getSyncStartTime() { + return syncStartTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncedTablePosition that = (SyncedTablePosition) o; + return Objects.equals(deltaTableSyncInfo, that.deltaTableSyncInfo) + && Objects.equals(syncEndTime, that.syncEndTime) + && Objects.equals(syncStartTime, that.syncStartTime); + } + + @Override + public int hashCode() { + return Objects.hash(deltaTableSyncInfo, syncEndTime, syncStartTime); + } + + @Override + public String toString() { + return new ToStringer(SyncedTablePosition.class) + .add("deltaTableSyncInfo", deltaTableSyncInfo) + .add("syncEndTime", syncEndTime) + .add("syncStartTime", syncStartTime) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableState.java new file mode 100755 index 000000000..d90b09b4d --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableState.java @@ -0,0 +1,23 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; + +/** + * The state of a synced table. Copied from database_table_statuses.proto to decouple SDK packages. + */ +@Generated +public enum SyncedTableState { + SYNCED_TABLE_OFFLINE, + SYNCED_TABLE_OFFLINE_FAILED, + SYNCED_TABLE_ONLINE, + SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE, + SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE, + SYNCED_TABLE_ONLINE_PIPELINE_FAILED, + SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE, + SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES, + SYNCED_TABLE_PROVISIONING, + SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT, + SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES, +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpec.java new file mode 100755 index 000000000..ecccf27c8 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpec.java @@ -0,0 +1,231 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Collection; +import java.util.Objects; + +@Generated +public class SyncedTableSyncedTableSpec { + /** + * The full resource name the branch associated with the table. + * + *

Format: "projects/{project_id}/branches/{branch_id}". + */ + @JsonProperty("branch") + private String branch; + + /** + * If true, the synced table's logical database and schema resources in PG will be created if they + * do not already exist. The request will fail if this is false and the database/schema do not + * exist. + * + *

Defaults to true if omitted. + */ + @JsonProperty("create_database_objects_if_missing") + private Boolean createDatabaseObjectsIfMissing; + + /** + * ID of an existing pipeline to bin-pack this synced table into. At most one of + * existing_pipeline_id and new_pipeline_spec should be defined. + * + *

The pipeline used for the synced table is returned via the top level pipeline_id attribute. + */ + @JsonProperty("existing_pipeline_id") + private String existingPipelineId; + + /** + * Specification for creating a new pipeline. At most one of existing_pipeline_id and + * new_pipeline_spec should be defined. + * + *

The pipeline used for the synced table is returned via the top level pipeline_id attribute. + */ + @JsonProperty("new_pipeline_spec") + private NewPipelineSpec newPipelineSpec; + + /** + * The Postgres database name where the synced table will be created in. + * + *

If this synced table is created inside a Lakebase Catalog, this attribute can be omitted on + * creation and is inferred from the postgres_database associated with the Lakebase Catalog. If + * specified when inside a Lakebase Catalog, the value must match. + * + *

A value must be specified when creating a synced table inside a Standard Catalog. + */ + @JsonProperty("postgres_database") + private String postgresDatabase; + + /** Primary Key columns to be used for data insert/update in the destination. */ + @JsonProperty("primary_key_columns") + private Collection primaryKeyColumns; + + /** + * The full resource name of the project associated with the table. + * + *

Format: "projects/{project_id}". + */ + @JsonProperty("project") + private String project; + + /** Scheduling policy of the underlying pipeline. */ + @JsonProperty("scheduling_policy") + private SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy schedulingPolicy; + + /** + * Three-part (catalog, schema, table) name of the source Delta table. + * + *

For the corresponding destination table, use any of the two: + * + *

* synced_table_id used at the creation of the SyncedTable * "name" consisting of + * "synced_tables/" prefix and the full name of the destination table. + */ + @JsonProperty("source_table_full_name") + private String sourceTableFullName; + + /** Time series key to deduplicate (tie-break) rows with the same primary key. */ + @JsonProperty("timeseries_key") + private String timeseriesKey; + + public SyncedTableSyncedTableSpec setBranch(String branch) { + this.branch = branch; + return this; + } + + public String getBranch() { + return branch; + } + + public SyncedTableSyncedTableSpec setCreateDatabaseObjectsIfMissing( + Boolean createDatabaseObjectsIfMissing) { + this.createDatabaseObjectsIfMissing = createDatabaseObjectsIfMissing; + return this; + } + + public Boolean getCreateDatabaseObjectsIfMissing() { + return createDatabaseObjectsIfMissing; + } + + public SyncedTableSyncedTableSpec setExistingPipelineId(String existingPipelineId) { + this.existingPipelineId = existingPipelineId; + return this; + } + + public String getExistingPipelineId() { + return existingPipelineId; + } + + public SyncedTableSyncedTableSpec setNewPipelineSpec(NewPipelineSpec newPipelineSpec) { + this.newPipelineSpec = newPipelineSpec; + return this; + } + + public NewPipelineSpec getNewPipelineSpec() { + return newPipelineSpec; + } + + public SyncedTableSyncedTableSpec setPostgresDatabase(String postgresDatabase) { + this.postgresDatabase = postgresDatabase; + return this; + } + + public String getPostgresDatabase() { + return postgresDatabase; + } + + public SyncedTableSyncedTableSpec setPrimaryKeyColumns(Collection primaryKeyColumns) { + this.primaryKeyColumns = primaryKeyColumns; + return this; + } + + public Collection getPrimaryKeyColumns() { + return primaryKeyColumns; + } + + public SyncedTableSyncedTableSpec setProject(String project) { + this.project = project; + return this; + } + + public String getProject() { + return project; + } + + public SyncedTableSyncedTableSpec setSchedulingPolicy( + SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy schedulingPolicy) { + this.schedulingPolicy = schedulingPolicy; + return this; + } + + public SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy getSchedulingPolicy() { + return schedulingPolicy; + } + + public SyncedTableSyncedTableSpec setSourceTableFullName(String sourceTableFullName) { + this.sourceTableFullName = sourceTableFullName; + return this; + } + + public String getSourceTableFullName() { + return sourceTableFullName; + } + + public SyncedTableSyncedTableSpec setTimeseriesKey(String timeseriesKey) { + this.timeseriesKey = timeseriesKey; + return this; + } + + public String getTimeseriesKey() { + return timeseriesKey; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncedTableSyncedTableSpec that = (SyncedTableSyncedTableSpec) o; + return Objects.equals(branch, that.branch) + && Objects.equals(createDatabaseObjectsIfMissing, that.createDatabaseObjectsIfMissing) + && Objects.equals(existingPipelineId, that.existingPipelineId) + && Objects.equals(newPipelineSpec, that.newPipelineSpec) + && Objects.equals(postgresDatabase, that.postgresDatabase) + && Objects.equals(primaryKeyColumns, that.primaryKeyColumns) + && Objects.equals(project, that.project) + && Objects.equals(schedulingPolicy, that.schedulingPolicy) + && Objects.equals(sourceTableFullName, that.sourceTableFullName) + && Objects.equals(timeseriesKey, that.timeseriesKey); + } + + @Override + public int hashCode() { + return Objects.hash( + branch, + createDatabaseObjectsIfMissing, + existingPipelineId, + newPipelineSpec, + postgresDatabase, + primaryKeyColumns, + project, + schedulingPolicy, + sourceTableFullName, + timeseriesKey); + } + + @Override + public String toString() { + return new ToStringer(SyncedTableSyncedTableSpec.class) + .add("branch", branch) + .add("createDatabaseObjectsIfMissing", createDatabaseObjectsIfMissing) + .add("existingPipelineId", existingPipelineId) + .add("newPipelineSpec", newPipelineSpec) + .add("postgresDatabase", postgresDatabase) + .add("primaryKeyColumns", primaryKeyColumns) + .add("project", project) + .add("schedulingPolicy", schedulingPolicy) + .add("sourceTableFullName", sourceTableFullName) + .add("timeseriesKey", timeseriesKey) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy.java new file mode 100755 index 000000000..c500de83a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy.java @@ -0,0 +1,13 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; + +/** Scheduling policy of the synced table's underlying pipeline. */ +@Generated +public enum SyncedTableSyncedTableSpecSyncedTableSchedulingPolicy { + CONTINUOUS, + SNAPSHOT, + TRIGGERED, +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableStatus.java new file mode 100755 index 000000000..33df9b315 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/postgres/SyncedTableSyncedTableStatus.java @@ -0,0 +1,180 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.postgres; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.protobuf.Timestamp; +import java.util.Objects; + +@Generated +public class SyncedTableSyncedTableStatus { + /** The state of the synced table. */ + @JsonProperty("detailed_state") + private SyncedTableState detailedState; + + /** The last source table Delta version that was successfully synced to the synced table. */ + @JsonProperty("last_processed_commit_version") + private Long lastProcessedCommitVersion; + + /** Summary of the last successful synchronization from source to destination. */ + @JsonProperty("last_sync") + private SyncedTablePosition lastSync; + + /** + * The end timestamp of the last time any data was synchronized from the source table to the + * synced table. This is when the data is available in the synced table. + */ + @JsonProperty("last_sync_time") + private Timestamp lastSyncTime; + + /** A text description of the current state of the synced table. */ + @JsonProperty("message") + private String message; + + /** */ + @JsonProperty("ongoing_sync_progress") + private SyncedTablePipelineProgress ongoingSyncProgress; + + /** ID of the associated pipeline. */ + @JsonProperty("pipeline_id") + private String pipelineId; + + /** The current phase of the data synchronization pipeline. */ + @JsonProperty("provisioning_phase") + private ProvisioningPhase provisioningPhase; + + /** The provisioning state of the synced table entity in Unity Catalog. */ + @JsonProperty("unity_catalog_provisioning_state") + private ProvisioningInfoState unityCatalogProvisioningState; + + public SyncedTableSyncedTableStatus setDetailedState(SyncedTableState detailedState) { + this.detailedState = detailedState; + return this; + } + + public SyncedTableState getDetailedState() { + return detailedState; + } + + public SyncedTableSyncedTableStatus setLastProcessedCommitVersion( + Long lastProcessedCommitVersion) { + this.lastProcessedCommitVersion = lastProcessedCommitVersion; + return this; + } + + public Long getLastProcessedCommitVersion() { + return lastProcessedCommitVersion; + } + + public SyncedTableSyncedTableStatus setLastSync(SyncedTablePosition lastSync) { + this.lastSync = lastSync; + return this; + } + + public SyncedTablePosition getLastSync() { + return lastSync; + } + + public SyncedTableSyncedTableStatus setLastSyncTime(Timestamp lastSyncTime) { + this.lastSyncTime = lastSyncTime; + return this; + } + + public Timestamp getLastSyncTime() { + return lastSyncTime; + } + + public SyncedTableSyncedTableStatus setMessage(String message) { + this.message = message; + return this; + } + + public String getMessage() { + return message; + } + + public SyncedTableSyncedTableStatus setOngoingSyncProgress( + SyncedTablePipelineProgress ongoingSyncProgress) { + this.ongoingSyncProgress = ongoingSyncProgress; + return this; + } + + public SyncedTablePipelineProgress getOngoingSyncProgress() { + return ongoingSyncProgress; + } + + public SyncedTableSyncedTableStatus setPipelineId(String pipelineId) { + this.pipelineId = pipelineId; + return this; + } + + public String getPipelineId() { + return pipelineId; + } + + public SyncedTableSyncedTableStatus setProvisioningPhase(ProvisioningPhase provisioningPhase) { + this.provisioningPhase = provisioningPhase; + return this; + } + + public ProvisioningPhase getProvisioningPhase() { + return provisioningPhase; + } + + public SyncedTableSyncedTableStatus setUnityCatalogProvisioningState( + ProvisioningInfoState unityCatalogProvisioningState) { + this.unityCatalogProvisioningState = unityCatalogProvisioningState; + return this; + } + + public ProvisioningInfoState getUnityCatalogProvisioningState() { + return unityCatalogProvisioningState; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncedTableSyncedTableStatus that = (SyncedTableSyncedTableStatus) o; + return Objects.equals(detailedState, that.detailedState) + && Objects.equals(lastProcessedCommitVersion, that.lastProcessedCommitVersion) + && Objects.equals(lastSync, that.lastSync) + && Objects.equals(lastSyncTime, that.lastSyncTime) + && Objects.equals(message, that.message) + && Objects.equals(ongoingSyncProgress, that.ongoingSyncProgress) + && Objects.equals(pipelineId, that.pipelineId) + && Objects.equals(provisioningPhase, that.provisioningPhase) + && Objects.equals(unityCatalogProvisioningState, that.unityCatalogProvisioningState); + } + + @Override + public int hashCode() { + return Objects.hash( + detailedState, + lastProcessedCommitVersion, + lastSync, + lastSyncTime, + message, + ongoingSyncProgress, + pipelineId, + provisioningPhase, + unityCatalogProvisioningState); + } + + @Override + public String toString() { + return new ToStringer(SyncedTableSyncedTableStatus.class) + .add("detailedState", detailedState) + .add("lastProcessedCommitVersion", lastProcessedCommitVersion) + .add("lastSync", lastSync) + .add("lastSyncTime", lastSyncTime) + .add("message", message) + .add("ongoingSyncProgress", ongoingSyncProgress) + .add("pipelineId", pipelineId) + .add("provisioningPhase", provisioningPhase) + .add("unityCatalogProvisioningState", unityCatalogProvisioningState) + .toString(); + } +}