databrickslabs / lsql

Lightweight SQL execution wrapper only on top of Databricks SDK
https://pypi.org/project/databricks-labs-lsql/
Other
8 stars 3 forks source link

Release v0.7.0 #223

Closed nfx closed 1 month ago

nfx commented 1 month ago
github-actions[bot] commented 1 month ago

❌ 31/35 passed, 4 failed, 2 skipped, 11m24s total

❌ test_runtime_backend_errors_handled[\nfrom databricks.labs.lsql.backends import RuntimeBackend\nfrom databricks.sdk.errors import NotFound\nbackend = RuntimeBackend()\ntry:\n backend.execute("USE __NON_EXISTENT__")\n return "FAILED"\nexcept NotFound as e:\n return "PASSED"\n]: databricks.sdk.errors.platform.InternalError: ContextNotFound: (12.296s) ``` databricks.sdk.errors.platform.InternalError: ContextNotFound: 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication [gw0] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmpnsaod3e9/working-copy in /tmp/tmpnsaod3e9 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels/databricks_labs_lsql-0.6.1+320240715184219-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020846 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020847 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "675259720166632092" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 200 OK < { < "id": "675259720166632092", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=675259720166632092: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 200 OK < { < "id": "675259720166632092", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=675259720166632092: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmpnsaod3e9/working-copy in /tmp/tmpnsaod3e9 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels/databricks_labs_lsql-0.6.1+320240715184219-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020846 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.DGdk/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020847 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "675259720166632092" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 200 OK < { < "id": "675259720166632092", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=675259720166632092: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 200 OK < { < "id": "675259720166632092", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=675259720166632092: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=675259720166632092 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } [gw0] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python ```
❌ test_runtime_backend_errors_handled[\nfrom databricks.labs.lsql.backends import RuntimeBackend\nfrom databricks.sdk.errors import Unknown\nbackend = RuntimeBackend()\ntry:\n grants = backend.fetch("SHOW GRANTS ON METASTORE")\n return "FAILED"\nexcept Unknown:\n return "PASSED"\n]: databricks.sdk.errors.platform.InternalError: ContextNotFound: (1m24.997s) ``` databricks.sdk.errors.platform.InternalError: ContextNotFound: 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication [gw5] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmplh_x03sx/working-copy in /tmp/tmplh_x03sx 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels/databricks_labs_lsql-0.6.1+320240715184216-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020838 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020841 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "8202255721984383245" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmplh_x03sx/working-copy in /tmp/tmplh_x03sx 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels/databricks_labs_lsql-0.6.1+320240715184216-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020838 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.I0GU/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020841 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "8202255721984383245" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 200 OK < { < "id": "8202255721984383245", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=8202255721984383245: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=8202255721984383245 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } [gw5] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python ```
❌ test_runtime_backend_errors_handled[\nfrom databricks.labs.lsql.backends import RuntimeBackend\nfrom databricks.sdk.errors import BadRequest\nbackend = RuntimeBackend()\ntry:\n backend.execute("SHWO DTABASES")\n return "FAILED"\nexcept BadRequest:\n return "PASSED"\n]: databricks.sdk.errors.platform.InternalError: ContextNotFound: (1m28.331s) ``` databricks.sdk.errors.platform.InternalError: ContextNotFound: 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication [gw1] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmp89big39d/working-copy in /tmp/tmp89big39d 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels/databricks_labs_lsql-0.6.1+320240715184213-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020834 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020835 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "317641431459748351" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmp89big39d/working-copy in /tmp/tmp89big39d 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels/databricks_labs_lsql-0.6.1+320240715184213-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020834 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.vKwd/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020835 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "317641431459748351" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 200 OK < { < "id": "317641431459748351", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=317641431459748351: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=317641431459748351 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } [gw1] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python ```
❌ test_runtime_backend_errors_handled[\nfrom databricks.labs.lsql.backends import RuntimeBackend\nfrom databricks.sdk.errors import NotFound\nbackend = RuntimeBackend()\ntry:\n query_response = backend.fetch("DESCRIBE __RANDOM__")\n return "FAILED"\nexcept NotFound as e:\n return "PASSED"\n]: databricks.sdk.errors.platform.InternalError: ContextNotFound: (1m25.225s) ``` databricks.sdk.errors.platform.InternalError: ContextNotFound: 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication [gw2] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmp6zngvvi2/working-copy in /tmp/tmp6zngvvi2 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels/databricks_labs_lsql-0.6.1+320240715184217-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020842 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020843 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "3231857345767419895" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } 18:42 DEBUG [databricks.sdk] Loaded from environment 18:42 DEBUG [databricks.sdk] Ignoring pat auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Ignoring basic auth, because metadata-service is preferred 18:42 DEBUG [databricks.sdk] Attempting to configure auth: metadata-service 18:42 INFO [databricks.sdk] Using Databricks Metadata Service authentication 18:42 DEBUG [databricks.sdk] GET /api/2.0/preview/scim/v2/Me < 200 OK < { < "active": true, < "displayName": "labs-runtime-identity", < "emails": [ < { < "primary": true, < "type": "work", < "value": "**REDACTED**" < } < ], < "externalId": "d0f9bd2c-5651-45fd-b648-12a3fc6375c4", < "groups": [ < { < "$ref": "Groups/300667344111082", < "display": "labs.scope.runtime", < "type": "direct", < "value": "**REDACTED**" < } < ], < "id": "4643477475987733", < "name": { < "givenName": "labs-runtime-identity" < }, < "schemas": [ < "urn:ietf:params:scim:schemas:core:2.0:User", < "... (1 additional elements)" < ], < "userName": "4106dc97-a963-48f0-a079-a578238959a6" < } 18:42 DEBUG [databricks.labs.blueprint.wheels] Building wheel for /tmp/tmp6zngvvi2/working-copy in /tmp/tmp6zngvvi2 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels/databricks_labs_lsql-0.6.1+320240715184217-py3-none-any.whl 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 404 Not Found < { < "error_code": "RESOURCE_DOES_NOT_EXIST", < "message": "The parent folder (/Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels) does not exist." < } 18:42 DEBUG [databricks.labs.blueprint.installation] Creating missing folders: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/mkdirs > { > "path": "/Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/wheels" > } < 200 OK < {} 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020842 < } 18:42 DEBUG [databricks.labs.blueprint.installation] Converting Version into JSON format 18:42 DEBUG [databricks.labs.blueprint.installation] Uploading: /Users/4106dc97-a963-48f0-a079-a578238959a6/.HIa4/version.json 18:42 DEBUG [databricks.sdk] POST /api/2.0/workspace/import > [raw stream] < 200 OK < { < "object_id": 2329992558020843 < } 18:42 DEBUG [databricks.sdk] GET /api/2.0/clusters/get?cluster_id=DATABRICKS_CLUSTER_ID < 200 OK < { < "autotermination_minutes": 60, < "CLOUD_ENV_attributes": { < "availability": "SPOT_WITH_FALLBACK_AZURE", < "first_on_demand": 2147483647, < "spot_bid_max_price": -1.0 < }, < "cluster_cores": 4.0, < "cluster_id": "DATABRICKS_CLUSTER_ID", < "cluster_memory_mb": 16384, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "cluster_source": "UI", < "creator_user_name": "serge.smertin@databricks.com", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "TEST_SCHEMA_tags": { < "Budget": "opex.sales.labs", < "ClusterId": "DATABRICKS_CLUSTER_ID", < "ClusterName": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "Creator": "serge.smertin@databricks.com", < "DatabricksInstanceGroupId": "-6761583442533957193", < "DatabricksInstancePoolCreatorId": "4183391249163402", < "DatabricksInstancePoolId": "TEST_INSTANCE_POOL_ID", < "Owner": "labs-oss@databricks.com", < "Vendor": "Databricks" < }, < "disk_spec": {}, < "driver": { < "host_private_ip": "10.179.0.21", < "instance_id": "1cfbff98af7b4e89aeaf078c314e1c62", < "node_attributes": { < "is_spot": false < }, < "node_id": "93dcc92002f143cf924bdd6cc13e2de8", < "private_ip": "10.179.2.20", < "start_timestamp": 1721060624496 < }, < "driver_healthy": true, < "driver_instance_pool_id": "TEST_INSTANCE_POOL_ID", < "driver_instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "driver_node_type_id": "Standard_D4s_v3", < "effective_spark_version": "15.3.x-scala2.12", < "enable_elastic_disk": true, < "enable_local_disk_encryption": false, < "init_scripts_safe_mode": false, < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "instance_source": { < "instance_pool_id": "TEST_INSTANCE_POOL_ID" < }, < "jdbc_port": 10000, < "last_activity_time": 1721060793084, < "last_restarted_time": 1721060742113, < "last_state_loss_time": 1721060742044, < "node_type_id": "Standard_D4s_v3", < "num_workers": 0, < "pinned_by_user_name": "4183391249163402", < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_context_id": 2482239588115337042, < "spark_version": "15.3.x-scala2.12", < "spec": { < "autotermination_minutes": 60, < "cluster_name": "Scoped MSI Cluster: runtime (Single Node, Single User)", < "custom_tags": { < "ResourceClass": "SingleNode" < }, < "data_security_mode": "SINGLE_USER", < "instance_pool_id": "TEST_INSTANCE_POOL_ID", < "num_workers": 0, < "single_user_name": "4106dc97-a963-48f0-a079-a578238959a6", < "spark_conf": { < "spark.databricks.cluster.profile": "singleNode", < "spark.master": "local[*]" < }, < "spark_version": "15.3.x-scala2.12" < }, < "start_time": 1720469141075, < "state": "RUNNING", < "state_message": "" < } 18:42 DEBUG [databricks.sdk] POST /api/1.2/contexts/create > { > "clusterId": "DATABRICKS_CLUSTER_ID", > "language": "python" > } < 200 OK < { < "id": "3231857345767419895" < } 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 200 OK < { < "id": "3231857345767419895", < "status": "Pending" < } 18:42 DEBUG [databricks.sdk] cluster_id=DATABRICKS_CLUSTER_ID, context_id=3231857345767419895: (ContextStatus.PENDING) current status: ContextStatus.PENDING (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~1s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~2s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~3s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~4s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~5s) 18:42 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:42 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~6s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~7s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~8s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~9s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ClusterNotReadyException: Cluster DATABRICKS_CLUSTER_ID not currently ready for driver client (cu... (16 more bytes)" < } 18:43 DEBUG [databricks.sdk.retries] Retrying: matched ClusterNotReadyException (sleeping ~10s) 18:43 DEBUG [databricks.sdk] GET /api/1.2/contexts/status?clusterId=DATABRICKS_CLUSTER_ID&contextId=3231857345767419895 < 500 Internal Server Error < { < "error": "ContextNotFound: " < } [gw2] linux -- Python 3.10.14 /home/runner/work/lsql/lsql/.venv/bin/python ```

Running from acceptance #314