diff --git a/.github/workflows/release-lakebase.yml b/.github/workflows/release-lakebase.yml new file mode 100644 index 00000000..9bd910f0 --- /dev/null +++ b/.github/workflows/release-lakebase.yml @@ -0,0 +1,61 @@ +name: Release @databricks/lakebase + +on: + workflow_dispatch: + inputs: + dry-run: + description: "Dry run (no actual release)" + required: false + type: boolean + default: false + +jobs: + release: + runs-on: + group: databricks-protected-runner-group + labels: linux-ubuntu-latest + + environment: release + + permissions: + contents: write + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Git + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 24 + registry-url: "https://registry.npmjs.org" + cache: "pnpm" + + - name: Update npm + run: npm install -g npm@latest + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Release + working-directory: packages/lakebase + run: | + if [ "${{ inputs.dry-run }}" == "true" ]; then + pnpm release:dry + else + pnpm release:ci + fi + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/CLAUDE.md b/CLAUDE.md index 1f380700..9a17f870 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -233,6 +233,39 @@ The AnalyticsPlugin provides SQL query execution: - Built-in caching with configurable TTL - Databricks SQL Warehouse connector for execution +### Lakebase Autoscaling Connector + +**Location:** `packages/appkit/src/connectors/lakebase/` + +AppKit provides `createLakebasePool()` - a factory function that returns a standard `pg.Pool` configured with automatic OAuth token refresh for Databricks Lakebase (OLTP) databases. + +**Key Features:** +- Returns standard `pg.Pool` (compatible with all ORMs) +- Automatic OAuth token refresh (1-hour tokens, 2-minute buffer) +- Token caching to minimize API calls +- Battle-tested pattern (same as AWS RDS IAM authentication) + +**Quick Example:** +```typescript +import { createLakebasePool } from '@databricks/appkit'; + +// Reads from PGHOST, PGDATABASE, LAKEBASE_ENDPOINT env vars +const pool = createLakebasePool(); + +// Standard pg.Pool API +const result = await pool.query('SELECT * FROM users'); +``` + +**ORM Integration:** +Works with Drizzle, Prisma, TypeORM - see [Lakebase Integration Docs](docs/docs/integrations/lakebase.md) for examples. + +**Architecture:** +- Connector files: `packages/appkit/src/connectors/lakebase/` + - `pool.ts` - Pool factory with OAuth token refresh + - `types.ts` - TypeScript interfaces (`LakebasePoolConfig`) + - `utils.ts` - Helper functions (`generateDatabaseCredential`) + - `auth-types.ts` - Lakebase v2 API types + ### Frontend-Backend Interaction ``` diff --git a/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md b/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md new file mode 100644 index 00000000..7f9431f8 --- /dev/null +++ b/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md @@ -0,0 +1,13 @@ +# Enumeration: RequestedClaimsPermissionSet + +Permission set for Unity Catalog table access + +## Enumeration Members + +### READ\_ONLY + +```ts +READ_ONLY: "READ_ONLY"; +``` + +Read-only access to specified UC tables diff --git a/docs/docs/api/appkit/Function.createLakebasePool.md b/docs/docs/api/appkit/Function.createLakebasePool.md new file mode 100644 index 00000000..c3b5b010 --- /dev/null +++ b/docs/docs/api/appkit/Function.createLakebasePool.md @@ -0,0 +1,20 @@ +# Function: createLakebasePool() + +```ts +function createLakebasePool(config?: Partial): Pool; +``` + +Create a Lakebase pool with appkit's logger integration. +Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Lakebase pool configuration | + +## Returns + +`Pool` + +PostgreSQL pool with appkit integration diff --git a/docs/docs/api/appkit/Function.generateDatabaseCredential.md b/docs/docs/api/appkit/Function.generateDatabaseCredential.md new file mode 100644 index 00000000..01c7755d --- /dev/null +++ b/docs/docs/api/appkit/Function.generateDatabaseCredential.md @@ -0,0 +1,55 @@ +# Function: generateDatabaseCredential() + +```ts +function generateDatabaseCredential(workspaceClient: WorkspaceClient, request: GenerateDatabaseCredentialRequest): Promise; +``` + +Generate OAuth credentials for Postgres database connection using the proper Postgres API. + +This generates a time-limited OAuth token (expires after 1 hour) that can be used +as a password when connecting to Lakebase Postgres databases. + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `workspaceClient` | `WorkspaceClient` | Databricks workspace client for authentication | +| `request` | [`GenerateDatabaseCredentialRequest`](Interface.GenerateDatabaseCredentialRequest.md) | Request parameters including endpoint path and optional UC claims | + +## Returns + +`Promise`\<[`DatabaseCredential`](Interface.DatabaseCredential.md)\> + +Database credentials with OAuth token and expiration time + +## See + +https://docs.databricks.com/aws/en/oltp/projects/authentication + +## Examples + +```typescript +// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +// Note: Use actual IDs from Databricks (project-id is a UUID) +const credential = await generateDatabaseCredential(workspaceClient, { + endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +}); + +// Use credential.token as password +const conn = await pg.connect({ + host: "ep-abc123.database.us-east-1.databricks.com", + user: "user@example.com", + password: credential.token +}); +``` + +```typescript +// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +const credential = await generateDatabaseCredential(workspaceClient, { + endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0", + claims: [{ + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [{ table_name: "catalog.schema.users" }] + }] +}); +``` diff --git a/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md b/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md new file mode 100644 index 00000000..4e98e74c --- /dev/null +++ b/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md @@ -0,0 +1,84 @@ +# Function: getLakebaseOrmConfig() + +```ts +function getLakebaseOrmConfig(config?: Partial): { + password: string | () => string | () => Promise | undefined; + ssl: | boolean + | { + rejectUnauthorized: boolean | undefined; + }; + username: string | undefined; +}; +``` + +Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. + +Designed for ORMs like TypeORM and Sequelize that need connection parameters +rather than a pre-configured pool instance. + +Returns connection config with field names compatible with common ORMs: +- `username` instead of `user` +- Simplified SSL config +- Password callback support for OAuth token refresh + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Optional configuration (reads from environment if not provided) | + +## Returns + +```ts +{ + password: string | () => string | () => Promise | undefined; + ssl: | boolean + | { + rejectUnauthorized: boolean | undefined; + }; + username: string | undefined; +} +``` + +ORM-compatible connection configuration + +### password + +```ts +password: string | () => string | () => Promise | undefined; +``` + +### ssl + +```ts +ssl: + | boolean + | { + rejectUnauthorized: boolean | undefined; +}; +``` + +### username + +```ts +username: string | undefined = user; +``` + +## Example + +```typescript +// TypeORM +const dataSource = new DataSource({ + type: 'postgres', + ...getLakebaseOrmConfig(), + entities: [User], + synchronize: true, +}); + +// Sequelize +const sequelize = new Sequelize({ + dialect: 'postgres', + ...getLakebaseOrmConfig(), + logging: false, +}); +``` diff --git a/docs/docs/api/appkit/Function.getLakebasePgConfig.md b/docs/docs/api/appkit/Function.getLakebasePgConfig.md new file mode 100644 index 00000000..e604f43b --- /dev/null +++ b/docs/docs/api/appkit/Function.getLakebasePgConfig.md @@ -0,0 +1,31 @@ +# Function: getLakebasePgConfig() + +```ts +function getLakebasePgConfig( + config?: Partial, + telemetry?: DriverTelemetry, + logger?: Logger): PoolConfig; +``` + +Get Lakebase connection configuration for PostgreSQL clients. + +Returns pg.PoolConfig with OAuth token authentication configured. +Best used with pg.Pool directly or ORMs that accept pg.Pool instances (like Drizzle). + +For ORMs that need connection parameters (TypeORM, Sequelize), use getLakebaseOrmConfig() instead. + +Used internally by createLakebasePool(). + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Optional configuration (reads from environment if not provided) | +| `telemetry?` | `DriverTelemetry` | Optional pre-initialized telemetry (created internally if not provided) | +| `logger?` | `Logger` | Optional logger (silent if not provided) | + +## Returns + +`PoolConfig` + +PostgreSQL pool configuration with OAuth token refresh diff --git a/docs/docs/api/appkit/Function.getWorkspaceClient.md b/docs/docs/api/appkit/Function.getWorkspaceClient.md new file mode 100644 index 00000000..f8b856b2 --- /dev/null +++ b/docs/docs/api/appkit/Function.getWorkspaceClient.md @@ -0,0 +1,17 @@ +# Function: getWorkspaceClient() + +```ts +function getWorkspaceClient(config: Partial): Promise; +``` + +Get workspace client from config or SDK default auth chain + +## Parameters + +| Parameter | Type | +| ------ | ------ | +| `config` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | + +## Returns + +`Promise`\<`WorkspaceClient`\> diff --git a/docs/docs/api/appkit/Interface.DatabaseCredential.md b/docs/docs/api/appkit/Interface.DatabaseCredential.md new file mode 100644 index 00000000..b2a0b255 --- /dev/null +++ b/docs/docs/api/appkit/Interface.DatabaseCredential.md @@ -0,0 +1,30 @@ +# Interface: DatabaseCredential + +Database credentials with OAuth token for Postgres connection + +## Properties + +### expire\_time + +```ts +expire_time: string; +``` + +Token expiration time in UTC (ISO 8601 format) +Tokens expire after 1 hour from generation + +#### Example + +```ts +"2026-02-06T17:07:00Z" +``` + +*** + +### token + +```ts +token: string; +``` + +OAuth token to use as the password when connecting to Postgres diff --git a/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md b/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md new file mode 100644 index 00000000..766a82cc --- /dev/null +++ b/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md @@ -0,0 +1,50 @@ +# Interface: GenerateDatabaseCredentialRequest + +Request parameters for generating database OAuth credentials + +## Properties + +### claims? + +```ts +optional claims: RequestedClaims[]; +``` + +Optional claims for fine-grained UC table permissions. +When specified, the token will only grant access to the specified tables. + +#### Example + +```typescript +{ + claims: [{ + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [{ table_name: "catalog.schema.users" }] + }] +} +``` + +*** + +### endpoint + +```ts +endpoint: string; +``` + +Endpoint resource path with IDs assigned by Databricks. + +All segments are IDs from Databricks (not names you create): +- project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) +- branch-id: Identifier from Databricks (e.g., `main`, `dev`) +- endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + +Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + +**Important:** Copy from Databricks Lakebase UI - do not construct manually. + +#### Example + +```ts +"projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +``` diff --git a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md new file mode 100644 index 00000000..19610a9a --- /dev/null +++ b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md @@ -0,0 +1,117 @@ +# Interface: LakebasePoolConfig + +Configuration for creating a Lakebase connection pool + +Supports two authentication methods: +1. OAuth token authentication - Provide workspaceClient + endpoint (automatic token rotation) +2. Native Postgres password authentication - Provide password string or function + +Extends pg.PoolConfig to support all standard PostgreSQL pool options. + +## See + +https://docs.databricks.com/aws/en/oltp/projects/authentication + +## Extends + +- `PoolConfig` + +## Properties + +### endpoint? + +```ts +optional endpoint: string; +``` + +Endpoint resource path for OAuth token generation. + +All segments are IDs assigned by Databricks (not names you create): +- project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) +- branch-id: Identifier from Databricks (e.g., `main`, `dev`) +- endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + +Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + +Required for OAuth authentication (unless password is provided) +Can also be set via LAKEBASE_ENDPOINT environment variable + +#### Example + +```ts +"projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +``` + +*** + +### logger? + +```ts +optional logger: Logger | LoggerConfig; +``` + +Optional logger configuration. + +Supports three modes: +1. Logger instance - Use your own logger implementation +2. LoggerConfig - Enable/disable specific log levels (uses console) +3. Undefined - Defaults to error logging only + +#### Examples + +```typescript +import { createLogger } from '@databricks/appkit'; +const pool = createLakebasePool({ + logger: createLogger('connectors:lakebase') +}); +``` + +```typescript +const pool = createLakebasePool({ + logger: { debug: true, info: true, error: true } +}); +``` + +*** + +### sslMode? + +```ts +optional sslMode: "require" | "disable" | "prefer"; +``` + +SSL mode for the connection (convenience helper) +Can also be set via PGSSLMODE environment variable + +#### Default + +```ts +"require" +``` + +*** + +### telemetry? + +```ts +optional telemetry: TelemetryOptions; +``` + +Telemetry configuration + +- `true` or omitted: enable all telemetry (traces, metrics) -- no-op when OTEL is not configured +- `false`: disable all telemetry +- `{ traces?, metrics? }`: fine-grained control + +*** + +### workspaceClient? + +```ts +optional workspaceClient: WorkspaceClient; +``` + +Databricks workspace client for OAuth authentication +If not provided along with endpoint, will attempt to use ServiceContext + +Note: If password is provided, OAuth auth is not used diff --git a/docs/docs/api/appkit/Interface.RequestedClaims.md b/docs/docs/api/appkit/Interface.RequestedClaims.md new file mode 100644 index 00000000..38095e7e --- /dev/null +++ b/docs/docs/api/appkit/Interface.RequestedClaims.md @@ -0,0 +1,24 @@ +# Interface: RequestedClaims + +Optional claims for fine-grained Unity Catalog table permissions +When specified, the returned token will be scoped to only the requested tables + +## Properties + +### permission\_set? + +```ts +optional permission_set: READ_ONLY; +``` + +Permission level to request + +*** + +### resources? + +```ts +optional resources: RequestedResource[]; +``` + +List of UC resources to request access to diff --git a/docs/docs/api/appkit/Interface.RequestedResource.md b/docs/docs/api/appkit/Interface.RequestedResource.md new file mode 100644 index 00000000..c91637bd --- /dev/null +++ b/docs/docs/api/appkit/Interface.RequestedResource.md @@ -0,0 +1,29 @@ +# Interface: RequestedResource + +Resource to request permissions for in Unity Catalog + +## Properties + +### table\_name? + +```ts +optional table_name: string; +``` + +Unity Catalog table name to request access to + +#### Example + +```ts +"catalog.schema.table" +``` + +*** + +### unspecified\_resource\_name? + +```ts +optional unspecified_resource_name: string; +``` + +Generic resource name for non-table resources diff --git a/docs/docs/api/appkit/index.md b/docs/docs/api/appkit/index.md index f1a0e5f8..91f9a66e 100644 --- a/docs/docs/api/appkit/index.md +++ b/docs/docs/api/appkit/index.md @@ -7,6 +7,7 @@ plugin architecture, and React integration. | Enumeration | Description | | ------ | ------ | +| [RequestedClaimsPermissionSet](Enumeration.RequestedClaimsPermissionSet.md) | Permission set for Unity Catalog table access | | [ResourceType](Enumeration.ResourceType.md) | Supported resource types that plugins can depend on. Each type has its own set of valid permissions. | ## Classes @@ -31,8 +32,13 @@ plugin architecture, and React integration. | ------ | ------ | | [BasePluginConfig](Interface.BasePluginConfig.md) | Base configuration interface for AppKit plugins | | [CacheConfig](Interface.CacheConfig.md) | Configuration for caching | +| [DatabaseCredential](Interface.DatabaseCredential.md) | Database credentials with OAuth token for Postgres connection | +| [GenerateDatabaseCredentialRequest](Interface.GenerateDatabaseCredentialRequest.md) | Request parameters for generating database OAuth credentials | | [ITelemetry](Interface.ITelemetry.md) | Plugin-facing interface for OpenTelemetry instrumentation. Provides a thin abstraction over OpenTelemetry APIs for plugins. | +| [LakebasePoolConfig](Interface.LakebasePoolConfig.md) | Configuration for creating a Lakebase connection pool | | [PluginManifest](Interface.PluginManifest.md) | Plugin manifest that declares metadata and resource requirements. Attached to plugin classes as a static property. | +| [RequestedClaims](Interface.RequestedClaims.md) | Optional claims for fine-grained Unity Catalog table permissions When specified, the returned token will be scoped to only the requested tables | +| [RequestedResource](Interface.RequestedResource.md) | Resource to request permissions for in Unity Catalog | | [ResourceEntry](Interface.ResourceEntry.md) | Internal representation of a resource in the registry. Extends ResourceRequirement with resolution state and plugin ownership. | | [ResourceFieldEntry](Interface.ResourceFieldEntry.md) | Defines a single field for a resource. Each field has its own environment variable and optional description. Single-value types use one key (e.g. id); multi-value types (database, secret) use multiple (e.g. instance_name, database_name or scope, key). | | [ResourceRequirement](Interface.ResourceRequirement.md) | Declares a resource requirement for a plugin. Can be defined statically in a manifest or dynamically via getResourceRequirements(). | @@ -60,7 +66,12 @@ plugin architecture, and React integration. | ------ | ------ | | [appKitTypesPlugin](Function.appKitTypesPlugin.md) | Vite plugin to generate types for AppKit queries. Calls generateFromEntryPoint under the hood. | | [createApp](Function.createApp.md) | Bootstraps AppKit with the provided configuration. | +| [createLakebasePool](Function.createLakebasePool.md) | Create a Lakebase pool with appkit's logger integration. Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. | +| [generateDatabaseCredential](Function.generateDatabaseCredential.md) | Generate OAuth credentials for Postgres database connection using the proper Postgres API. | | [getExecutionContext](Function.getExecutionContext.md) | Get the current execution context. | +| [getLakebaseOrmConfig](Function.getLakebaseOrmConfig.md) | Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. | +| [getLakebasePgConfig](Function.getLakebasePgConfig.md) | Get Lakebase connection configuration for PostgreSQL clients. | | [getPluginManifest](Function.getPluginManifest.md) | Loads and validates the manifest from a plugin constructor. Normalizes string type/permission to strict ResourceType/ResourcePermission. | | [getResourceRequirements](Function.getResourceRequirements.md) | Gets the resource requirements from a plugin's manifest. | +| [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or SDK default auth chain | | [isSQLTypeMarker](Function.isSQLTypeMarker.md) | Type guard to check if a value is a SQL type marker | diff --git a/docs/docs/api/appkit/typedoc-sidebar.ts b/docs/docs/api/appkit/typedoc-sidebar.ts index aa114b63..3421d7ee 100644 --- a/docs/docs/api/appkit/typedoc-sidebar.ts +++ b/docs/docs/api/appkit/typedoc-sidebar.ts @@ -5,6 +5,11 @@ const typedocSidebar: SidebarsConfig = { type: "category", label: "Enumerations", items: [ + { + type: "doc", + id: "api/appkit/Enumeration.RequestedClaimsPermissionSet", + label: "RequestedClaimsPermissionSet" + }, { type: "doc", id: "api/appkit/Enumeration.ResourceType", @@ -87,16 +92,41 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Interface.CacheConfig", label: "CacheConfig" }, + { + type: "doc", + id: "api/appkit/Interface.DatabaseCredential", + label: "DatabaseCredential" + }, + { + type: "doc", + id: "api/appkit/Interface.GenerateDatabaseCredentialRequest", + label: "GenerateDatabaseCredentialRequest" + }, { type: "doc", id: "api/appkit/Interface.ITelemetry", label: "ITelemetry" }, + { + type: "doc", + id: "api/appkit/Interface.LakebasePoolConfig", + label: "LakebasePoolConfig" + }, { type: "doc", id: "api/appkit/Interface.PluginManifest", label: "PluginManifest" }, + { + type: "doc", + id: "api/appkit/Interface.RequestedClaims", + label: "RequestedClaims" + }, + { + type: "doc", + id: "api/appkit/Interface.RequestedResource", + label: "RequestedResource" + }, { type: "doc", id: "api/appkit/Interface.ResourceEntry", @@ -175,11 +205,31 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Function.createApp", label: "createApp" }, + { + type: "doc", + id: "api/appkit/Function.createLakebasePool", + label: "createLakebasePool" + }, + { + type: "doc", + id: "api/appkit/Function.generateDatabaseCredential", + label: "generateDatabaseCredential" + }, { type: "doc", id: "api/appkit/Function.getExecutionContext", label: "getExecutionContext" }, + { + type: "doc", + id: "api/appkit/Function.getLakebaseOrmConfig", + label: "getLakebaseOrmConfig" + }, + { + type: "doc", + id: "api/appkit/Function.getLakebasePgConfig", + label: "getLakebasePgConfig" + }, { type: "doc", id: "api/appkit/Function.getPluginManifest", @@ -190,6 +240,11 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Function.getResourceRequirements", label: "getResourceRequirements" }, + { + type: "doc", + id: "api/appkit/Function.getWorkspaceClient", + label: "getWorkspaceClient" + }, { type: "doc", id: "api/appkit/Function.isSQLTypeMarker", diff --git a/package.json b/package.json index a08b06eb..a88ea962 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "husky": "^9.1.7", "jsdom": "^27.0.0", "lint-staged": "^15.5.1", - "pg": "^8.16.3", + "pg": "^8.18.0", "plop": "^4.0.4", "publint": "^0.3.15", "release-it": "^19.1.0", diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 3a0a622f..7b6373d4 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -42,7 +42,8 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@databricks/sdk-experimental": "^0.15.0", + "@databricks/lakebase": "workspace:*", + "@databricks/sdk-experimental": "^0.16.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.208.0", "@opentelemetry/auto-instrumentations-node": "^0.67.0", @@ -61,7 +62,7 @@ "dotenv": "^16.6.1", "express": "^4.22.0", "obug": "^2.1.1", - "pg": "^8.16.3", + "pg": "^8.18.0", "semver": "^7.7.3", "shared": "workspace:*", "vite": "npm:rolldown-vite@7.1.14", @@ -71,7 +72,7 @@ "devDependencies": { "@types/express": "^4.17.25", "@types/json-schema": "^7.0.15", - "@types/pg": "^8.15.6", + "@types/pg": "^8.16.0", "@types/ws": "^8.18.1", "@vitejs/plugin-react": "^5.1.1" }, diff --git a/packages/appkit/src/cache/index.ts b/packages/appkit/src/cache/index.ts index 952a7987..bdc6c10d 100644 --- a/packages/appkit/src/cache/index.ts +++ b/packages/appkit/src/cache/index.ts @@ -1,7 +1,7 @@ import { createHash } from "node:crypto"; import { WorkspaceClient } from "@databricks/sdk-experimental"; import type { CacheConfig, CacheStorage } from "shared"; -import { LakebaseV1Connector } from "@/connectors"; +import { createLakebasePool } from "@/connectors/lakebase"; import { AppKitError, ExecutionError, InitializationError } from "../errors"; import { createLogger } from "../logging/logger"; import type { Counter, TelemetryProvider } from "../telemetry"; @@ -147,14 +147,17 @@ export class CacheManager { // try to use lakebase storage try { const workspaceClient = new WorkspaceClient({}); - const connector = new LakebaseV1Connector({ workspaceClient }); - const isHealthy = await connector.healthCheck(); + const pool = createLakebasePool({ workspaceClient }); + const persistentStorage = new PersistentStorage(config, pool); + const isHealthy = await persistentStorage.healthCheck(); if (isHealthy) { - const persistentStorage = new PersistentStorage(config, connector); await persistentStorage.initialize(); return new CacheManager(persistentStorage, config); } + + // Health check failed, close the pool and fallback + await pool.end(); } catch { // lakebase unavailable, continue with in-memory storage } diff --git a/packages/appkit/src/cache/storage/persistent.ts b/packages/appkit/src/cache/storage/persistent.ts index 393385cd..d9affd73 100644 --- a/packages/appkit/src/cache/storage/persistent.ts +++ b/packages/appkit/src/cache/storage/persistent.ts @@ -1,6 +1,6 @@ import { createHash } from "node:crypto"; +import type pg from "pg"; import type { CacheConfig, CacheEntry, CacheStorage } from "shared"; -import type { LakebaseV1Connector } from "../../connectors"; import { InitializationError, ValidationError } from "../../errors"; import { createLogger } from "../../logging/logger"; import { lakebaseStorageDefaults } from "./defaults"; @@ -12,7 +12,8 @@ const logger = createLogger("cache:persistent"); * to manage memory usage and ensure efficient cache operations. * * @example - * const persistentStorage = new PersistentStorage(config, connector); + * const pool = createLakebasePool({ workspaceClient }); + * const persistentStorage = new PersistentStorage(config, pool); * await persistentStorage.initialize(); * await persistentStorage.get("my-key"); * await persistentStorage.set("my-key", "my-value"); @@ -22,7 +23,7 @@ const logger = createLogger("cache:persistent"); * */ export class PersistentStorage implements CacheStorage { - private readonly connector: LakebaseV1Connector; + private readonly pool: pg.Pool; private readonly tableName: string; private readonly maxBytes: number; private readonly maxEntryBytes: number; @@ -30,8 +31,8 @@ export class PersistentStorage implements CacheStorage { private readonly evictionCheckProbability: number; private initialized: boolean; - constructor(config: CacheConfig, connector: LakebaseV1Connector) { - this.connector = connector; + constructor(config: CacheConfig, pool: pg.Pool) { + this.pool = pool; this.maxBytes = config.maxBytes ?? lakebaseStorageDefaults.maxBytes; this.maxEntryBytes = config.maxEntryBytes ?? lakebaseStorageDefaults.maxEntryBytes; @@ -66,7 +67,7 @@ export class PersistentStorage implements CacheStorage { const keyHash = this.hashKey(key); - const result = await this.connector.query<{ + const result = await this.pool.query<{ value: Buffer; expiry: string; }>(`SELECT value, expiry FROM ${this.tableName} WHERE key_hash = $1`, [ @@ -78,7 +79,7 @@ export class PersistentStorage implements CacheStorage { const entry = result.rows[0]; // fire-and-forget update - this.connector + this.pool .query( `UPDATE ${this.tableName} SET last_accessed = NOW() WHERE key_hash = $1`, [keyHash], @@ -123,7 +124,7 @@ export class PersistentStorage implements CacheStorage { } } - await this.connector.query( + await this.pool.query( `INSERT INTO ${this.tableName} (key_hash, key, value, byte_size, expiry, created_at, last_accessed) VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) ON CONFLICT (key_hash) @@ -141,16 +142,15 @@ export class PersistentStorage implements CacheStorage { async delete(key: string): Promise { await this.ensureInitialized(); const keyHash = this.hashKey(key); - await this.connector.query( - `DELETE FROM ${this.tableName} WHERE key_hash = $1`, - [keyHash], - ); + await this.pool.query(`DELETE FROM ${this.tableName} WHERE key_hash = $1`, [ + keyHash, + ]); } /** Clear the persistent storage */ async clear(): Promise { await this.ensureInitialized(); - await this.connector.query(`TRUNCATE TABLE ${this.tableName}`); + await this.pool.query(`TRUNCATE TABLE ${this.tableName}`); } /** @@ -162,7 +162,7 @@ export class PersistentStorage implements CacheStorage { await this.ensureInitialized(); const keyHash = this.hashKey(key); - const result = await this.connector.query<{ exists: boolean }>( + const result = await this.pool.query<{ exists: boolean }>( `SELECT EXISTS(SELECT 1 FROM ${this.tableName} WHERE key_hash = $1) as exists`, [keyHash], ); @@ -177,7 +177,7 @@ export class PersistentStorage implements CacheStorage { async size(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ count: string }>( + const result = await this.pool.query<{ count: string }>( `SELECT COUNT(*) as count FROM ${this.tableName}`, ); return parseInt(result.rows[0]?.count ?? "0", 10); @@ -187,7 +187,7 @@ export class PersistentStorage implements CacheStorage { async totalBytes(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ total: string }>( + const result = await this.pool.query<{ total: string }>( `SELECT COALESCE(SUM(byte_size), 0) as total FROM ${this.tableName}`, ); return parseInt(result.rows[0]?.total ?? "0", 10); @@ -207,7 +207,8 @@ export class PersistentStorage implements CacheStorage { */ async healthCheck(): Promise { try { - return await this.connector.healthCheck(); + await this.pool.query("SELECT 1"); + return true; } catch { return false; } @@ -215,7 +216,7 @@ export class PersistentStorage implements CacheStorage { /** Close the persistent storage */ async close(): Promise { - await this.connector.close(); + await this.pool.end(); } /** @@ -224,7 +225,7 @@ export class PersistentStorage implements CacheStorage { */ async cleanupExpired(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ count: string }>( + const result = await this.pool.query<{ count: string }>( `WITH deleted as (DELETE FROM ${this.tableName} WHERE expiry < $1 RETURNING *) SELECT COUNT(*) as count FROM deleted`, [Date.now()], ); @@ -241,7 +242,7 @@ export class PersistentStorage implements CacheStorage { } } - await this.connector.query( + await this.pool.query( `DELETE FROM ${this.tableName} WHERE key_hash IN (SELECT key_hash FROM ${this.tableName} ORDER BY last_accessed ASC LIMIT $1)`, [this.evictionBatchSize], @@ -275,7 +276,7 @@ export class PersistentStorage implements CacheStorage { /** Run migrations for the persistent storage */ private async runMigrations(): Promise { try { - await this.connector.query(` + await this.pool.query(` CREATE TABLE IF NOT EXISTS ${this.tableName} ( id BIGSERIAL PRIMARY KEY, key_hash BIGINT NOT NULL, @@ -289,22 +290,22 @@ export class PersistentStorage implements CacheStorage { `); // unique index on key_hash for fast lookups - await this.connector.query( + await this.pool.query( `CREATE UNIQUE INDEX IF NOT EXISTS idx_${this.tableName}_key_hash ON ${this.tableName} (key_hash);`, ); // index on expiry for cleanup queries - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_expiry ON ${this.tableName} (expiry); `, ); // index on last_accessed for LRU eviction - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_last_accessed ON ${this.tableName} (last_accessed); `, ); // index on byte_size for monitoring - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_byte_size ON ${this.tableName} (byte_size); `, ); } catch (error) { diff --git a/packages/appkit/src/cache/tests/cache-manager.test.ts b/packages/appkit/src/cache/tests/cache-manager.test.ts index a4e54438..8e45d679 100644 --- a/packages/appkit/src/cache/tests/cache-manager.test.ts +++ b/packages/appkit/src/cache/tests/cache-manager.test.ts @@ -2,18 +2,19 @@ import type { CacheStorage } from "shared"; import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; import { CacheManager } from "../../index"; -// Mock LakebaseV1Connector -const mockLakebaseHealthCheck = vi.fn(); -vi.mock("@/connectors", () => ({ - LakebaseV1Connector: vi.fn().mockImplementation(() => ({ - healthCheck: mockLakebaseHealthCheck, - close: vi.fn().mockResolvedValue(undefined), +// Mock createLakebasePool +const mockPoolQuery = vi.fn(); +const mockPoolEnd = vi.fn(); +vi.mock("@/connectors/lakebase", () => ({ + createLakebasePool: vi.fn().mockImplementation(() => ({ + query: mockPoolQuery, + end: mockPoolEnd.mockResolvedValue(undefined), })), })); // Mock PersistentStorage vi.mock("../storage/persistent", () => ({ - PersistentStorage: vi.fn().mockImplementation(() => { + PersistentStorage: vi.fn().mockImplementation((_config: any, pool: any) => { const cache = new Map(); return { initialize: vi.fn().mockResolvedValue(undefined), @@ -32,8 +33,16 @@ vi.mock("../storage/persistent", () => ({ has: vi.fn().mockImplementation(async (key: string) => cache.has(key)), size: vi.fn().mockImplementation(async () => cache.size), isPersistent: vi.fn().mockReturnValue(true), - healthCheck: vi.fn().mockResolvedValue(true), - close: vi.fn().mockResolvedValue(undefined), + healthCheck: vi.fn().mockImplementation(async () => { + // Simulate real healthCheck: calls pool.query('SELECT 1') + try { + await pool.query("SELECT 1"); + return true; + } catch { + return false; + } + }), + close: vi.fn().mockImplementation(async () => pool.end()), cleanupExpired: vi.fn().mockResolvedValue(0), }; }), @@ -87,7 +96,7 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; // Default: Lakebase unavailable (most tests pass explicit storage) - mockLakebaseHealthCheck.mockResolvedValue(false); + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); }); afterEach(() => { @@ -621,8 +630,11 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Make Lakebase healthy - mockLakebaseHealthCheck.mockResolvedValue(true); + // Make pool.query succeed for healthCheck ('SELECT 1') + mockPoolQuery.mockResolvedValue({ + rows: [{ "?column?": 1 }], + rowCount: 1, + }); const cache = await CacheManager.getInstance({}); @@ -636,8 +648,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy (default in beforeEach) - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy (pool.query fails, default in beforeEach) + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); const cache = await CacheManager.getInstance({}); @@ -656,8 +668,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy (pool.query fails) + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); const cache = await CacheManager.getInstance({ strictPersistence: true, @@ -677,8 +689,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy - pool.query('SELECT 1') fails + mockPoolQuery.mockRejectedValue(new Error("Health check failed")); const cache = await CacheManager.getInstance({}); @@ -693,9 +705,7 @@ describe("CacheManager", () => { (CacheManager as any).initPromise = null; // Lakebase throws - mockLakebaseHealthCheck.mockRejectedValue( - new Error("Connection refused"), - ); + mockPoolQuery.mockRejectedValue(new Error("Connection refused")); const cache = await CacheManager.getInstance({}); diff --git a/packages/appkit/src/cache/tests/persistent.test.ts b/packages/appkit/src/cache/tests/persistent.test.ts index fc3ead27..82706b0d 100644 --- a/packages/appkit/src/cache/tests/persistent.test.ts +++ b/packages/appkit/src/cache/tests/persistent.test.ts @@ -1,26 +1,25 @@ import { beforeEach, describe, expect, test, vi } from "vitest"; import { PersistentStorage } from "../storage"; -/** Mock LakebaseV1Connector for testing */ -const createMockConnector = () => ({ +/** Mock pg.Pool for testing */ +const createMockPool = () => ({ query: vi.fn(), - healthCheck: vi.fn().mockResolvedValue(true), - close: vi.fn().mockResolvedValue(undefined), + end: vi.fn().mockResolvedValue(undefined), }); describe("PersistentStorage", () => { let storage: PersistentStorage; - let mockConnector: ReturnType; + let mockPool: ReturnType; beforeEach(() => { - mockConnector = createMockConnector(); + mockPool = createMockPool(); // Default: migrations succeed - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); storage = new PersistentStorage( { maxBytes: 1024 * 1024 }, // 1MB - mockConnector as any, + mockPool as any, ); }); @@ -29,12 +28,12 @@ describe("PersistentStorage", () => { await storage.initialize(); // Should create table - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE IF NOT EXISTS"), ); // Should create unique index on key_hash - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE UNIQUE INDEX IF NOT EXISTS"), ); }); @@ -44,7 +43,7 @@ describe("PersistentStorage", () => { await storage.initialize(); // CREATE TABLE should only be called once (first initialization) - const createTableCalls = mockConnector.query.mock.calls.filter((call) => + const createTableCalls = mockPool.query.mock.calls.filter((call) => call[0].includes("CREATE TABLE"), ); expect(createTableCalls.length).toBe(1); @@ -55,7 +54,7 @@ describe("PersistentStorage", () => { .spyOn(console, "error") .mockImplementation(() => {}); - mockConnector.query.mockRejectedValue(new Error("migration failed")); + mockPool.query.mockRejectedValue(new Error("migration failed")); await expect(storage.initialize()).rejects.toThrow( "Error in running migrations for persistent storage", @@ -68,7 +67,7 @@ describe("PersistentStorage", () => { describe("get", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return cached entry", async () => { @@ -78,7 +77,7 @@ describe("PersistentStorage", () => { "utf-8", ); - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ value: valueBuffer, expiry: String(expiry) }], }); @@ -88,14 +87,14 @@ describe("PersistentStorage", () => { value: { data: "test" }, expiry, }); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("SELECT value, expiry"), [expect.any(BigInt)], // key_hash is bigint ); }); test("should return null for non-existent key", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.get("non-existent"); @@ -109,7 +108,7 @@ describe("PersistentStorage", () => { "utf-8", ); - mockConnector.query + mockPool.query .mockResolvedValueOnce({ rows: [{ value: valueBuffer, expiry: String(expiry) }], }) @@ -120,7 +119,7 @@ describe("PersistentStorage", () => { // Wait for fire-and-forget update await new Promise((resolve) => setTimeout(resolve, 10)); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("UPDATE"), [expect.any(BigInt)], // key_hash ); @@ -130,7 +129,7 @@ describe("PersistentStorage", () => { describe("set", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should insert new entry", async () => { @@ -138,14 +137,14 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.5); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.set("test-key", { value: { data: "test" }, expiry: Date.now() + 10000, }); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("INSERT INTO"), expect.arrayContaining([ expect.any(BigInt), // key_hash @@ -164,17 +163,17 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.05); // totalBytes() returns maxBytes (triggers eviction) - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: String(1024 * 1024) }], // 1MB (at limit) }); // cleanupExpired returns 0 - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); // eviction DELETE succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.set("new-key", { value: { data: "new" }, @@ -182,7 +181,7 @@ describe("PersistentStorage", () => { }); // Should have called DELETE for LRU eviction - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), expect.any(Array), ); @@ -195,7 +194,7 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.5); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const value = { nested: { array: [1, 2, 3] } }; await storage.set("test-key", { @@ -203,7 +202,7 @@ describe("PersistentStorage", () => { expiry: Date.now() + 10000, }); - const insertCall = mockConnector.query.mock.calls.find((call) => + const insertCall = mockPool.query.mock.calls.find((call) => call[0].includes("INSERT"), ); @@ -219,15 +218,15 @@ describe("PersistentStorage", () => { describe("delete", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should delete entry by key_hash", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.delete("test-key"); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), [expect.any(BigInt)], // key_hash ); @@ -237,15 +236,15 @@ describe("PersistentStorage", () => { describe("clear", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should truncate table", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.clear(); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("TRUNCATE TABLE"), ); }); @@ -254,25 +253,25 @@ describe("PersistentStorage", () => { describe("has", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return true when key exists", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ exists: true }], }); const result = await storage.has("test-key"); expect(result).toBe(true); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("SELECT EXISTS"), [expect.any(BigInt)], // key_hash ); }); test("should return false when key does not exist", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ exists: false }], }); @@ -282,7 +281,7 @@ describe("PersistentStorage", () => { }); test("should return false when query returns no rows", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.has("test-key"); @@ -293,11 +292,11 @@ describe("PersistentStorage", () => { describe("size", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return count of entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "42" }], }); @@ -307,7 +306,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when empty", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); @@ -317,7 +316,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when no rows", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.size(); @@ -328,11 +327,11 @@ describe("PersistentStorage", () => { describe("totalBytes", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return sum of byte_size", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: "1048576" }], // 1MB }); @@ -342,7 +341,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when empty", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: "0" }], }); @@ -355,25 +354,25 @@ describe("PersistentStorage", () => { describe("cleanupExpired", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should delete expired entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "5" }], }); const deleted = await storage.cleanupExpired(); expect(deleted).toBe(5); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), expect.arrayContaining([expect.any(Number)]), ); }); test("should return 0 when no expired entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); @@ -388,27 +387,27 @@ describe("PersistentStorage", () => { expect(storage.isPersistent()).toBe(true); }); - test("should delegate healthCheck to connector", async () => { - mockConnector.healthCheck.mockResolvedValueOnce(true); + test("should implement healthCheck using pool query", async () => { + mockPool.query.mockResolvedValueOnce({ rows: [{ "?column?": 1 }] }); const result = await storage.healthCheck(); expect(result).toBe(true); - expect(mockConnector.healthCheck).toHaveBeenCalled(); + expect(mockPool.query).toHaveBeenCalledWith("SELECT 1"); }); test("should return false on healthCheck error", async () => { - mockConnector.healthCheck.mockRejectedValueOnce(new Error("failed")); + mockPool.query.mockRejectedValueOnce(new Error("Connection failed")); const result = await storage.healthCheck(); expect(result).toBe(false); }); - test("should close connector on close", async () => { + test("should close pool on close", async () => { await storage.close(); - expect(mockConnector.close).toHaveBeenCalled(); + expect(mockPool.end).toHaveBeenCalled(); }); }); @@ -416,15 +415,15 @@ describe("PersistentStorage", () => { test("should auto-initialize on get if not initialized", async () => { const uninitializedStorage = new PersistentStorage( { maxBytes: 1024 * 1024 }, - mockConnector as any, + mockPool as any, ); - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); await uninitializedStorage.get("test-key"); // Should have run migrations - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE"), ); }); @@ -432,10 +431,10 @@ describe("PersistentStorage", () => { test("should auto-initialize on set if not initialized", async () => { const uninitializedStorage = new PersistentStorage( { maxBytes: 1024 * 1024 }, - mockConnector as any, + mockPool as any, ); - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); await uninitializedStorage.set("test-key", { value: "test", @@ -443,7 +442,7 @@ describe("PersistentStorage", () => { }); // Should have run migrations - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE"), ); }); diff --git a/packages/appkit/src/connectors/index.ts b/packages/appkit/src/connectors/index.ts index 01d23cf2..fdb1cc69 100644 --- a/packages/appkit/src/connectors/index.ts +++ b/packages/appkit/src/connectors/index.ts @@ -1,2 +1,3 @@ +export * from "./lakebase"; export * from "./lakebase-v1"; export * from "./sql-warehouse"; diff --git a/packages/appkit/src/connectors/lakebase/index.ts b/packages/appkit/src/connectors/lakebase/index.ts new file mode 100644 index 00000000..e33ac077 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/index.ts @@ -0,0 +1,42 @@ +import { + createLakebasePool as createLakebasePoolBase, + type LakebasePoolConfig, +} from "@databricks/lakebase"; +import type pg from "pg"; +import { createLogger } from "@/logging/logger"; + +/** + * Create a Lakebase pool with appkit's logger integration. + * Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. + * + * @param config - Lakebase pool configuration + * @returns PostgreSQL pool with appkit integration + */ +export function createLakebasePool( + config?: Partial, +): pg.Pool { + const logger = createLogger("connectors:lakebase"); + + return createLakebasePoolBase({ + ...config, + logger, + }); +} + +// Re-export everything else from lakebase +export { + createTokenRefreshCallback, + type DatabaseCredential, + type DriverTelemetry, + type GenerateDatabaseCredentialRequest, + generateDatabaseCredential, + getLakebaseOrmConfig, + getLakebasePgConfig, + getWorkspaceClient, + type LakebasePoolConfig, + type Logger, + type RequestedClaims, + RequestedClaimsPermissionSet, + type RequestedResource, + type TokenRefreshDeps, +} from "@databricks/lakebase"; diff --git a/packages/appkit/src/index.ts b/packages/appkit/src/index.ts index b0745592..8ba528ef 100644 --- a/packages/appkit/src/index.ts +++ b/packages/appkit/src/index.ts @@ -14,6 +14,22 @@ export type { } from "shared"; export { isSQLTypeMarker, sql } from "shared"; export { CacheManager } from "./cache"; +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + LakebasePoolConfig, + RequestedClaims, + RequestedResource, +} from "./connectors/lakebase"; +// Lakebase Autoscaling connector +export { + createLakebasePool, + generateDatabaseCredential, + getLakebaseOrmConfig, + getLakebasePgConfig, + getWorkspaceClient, + RequestedClaimsPermissionSet, +} from "./connectors/lakebase"; export { getExecutionContext } from "./context"; export { createApp } from "./core"; // Errors diff --git a/packages/appkit/tsconfig.json b/packages/appkit/tsconfig.json index 8d1cbf59..5265a688 100644 --- a/packages/appkit/tsconfig.json +++ b/packages/appkit/tsconfig.json @@ -6,7 +6,8 @@ "paths": { "@/*": ["src/*"], "@tools/*": ["../../tools/*"], - "shared": ["../../packages/shared/src"] + "shared": ["../../packages/shared/src"], + "@databricks/lakebase": ["../../packages/lakebase/src"] } }, "include": ["src/**/*"], diff --git a/packages/lakebase/.release-it.json b/packages/lakebase/.release-it.json new file mode 100644 index 00000000..dc8e0b2c --- /dev/null +++ b/packages/lakebase/.release-it.json @@ -0,0 +1,35 @@ +{ + "$schema": "https://unpkg.com/release-it@19/schema/release-it.json", + "git": { + "commitMessage": "chore(lakebase): release v${version} [skip ci]", + "tagName": "lakebase-v${version}", + "tagAnnotation": "Release @databricks/lakebase v${version}", + "requireBranch": "main", + "requireCleanWorkingDir": true, + "push": true, + "pushArgs": ["--follow-tags"] + }, + "github": { + "release": true, + "releaseName": "@databricks/lakebase v${version}", + "autoGenerate": false, + "draft": false, + "preRelease": false, + "tokenRef": "GITHUB_TOKEN" + }, + "npm": false, + "hooks": { + "before:release": "pnpm build && pnpm --filter=@databricks/lakebase dist", + "after:release": "npm publish packages/lakebase/tmp --access public --provenance" + }, + "plugins": { + "@release-it/conventional-changelog": { + "preset": { + "name": "conventionalcommits", + "bumpStrict": true + }, + "infile": "CHANGELOG.md", + "header": "# Changelog\n\nAll notable changes to @databricks/lakebase will be documented in this file." + } + } +} diff --git a/packages/lakebase/README.md b/packages/lakebase/README.md new file mode 100644 index 00000000..4db37421 --- /dev/null +++ b/packages/lakebase/README.md @@ -0,0 +1,264 @@ +# @databricks/lakebase + +PostgreSQL driver for Databricks Lakebase Autoscaling with automatic OAuth token refresh. + +## Overview + +`@databricks/lakebase` provides a drop-in replacement for the standard `pg` connection pool that automatically handles OAuth authentication for Databricks Lakebase Autoscaling (OLTP) databases. + +It: + +- Returns a standard `pg.Pool` - works with any PostgreSQL library or ORM +- Automatically refreshes OAuth tokens (1-hour lifetime, with 2-minute buffer) +- Caches tokens to minimize API calls +- Zero configuration with environment variables +- Optional OpenTelemetry instrumentation + +**NOTE:** This package is NOT compatible with the Databricks Lakebase Provisioned. + +## Installation + +```bash +npm install @databricks/lakebase +``` + +## Quick Start + +### Using Environment Variables + +Set the following environment variables: + +```bash +export PGHOST=your-lakebase-host.databricks.com +export PGDATABASE=your_database_name +export LAKEBASE_ENDPOINT=projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +export PGUSER=your-service-principal-id +export PGSSLMODE=require +``` + +Then use the driver: + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const result = await pool.query("SELECT * FROM users"); +console.log(result.rows); +``` + +### With Explicit Configuration + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool({ + host: "your-lakebase-host.databricks.com", + database: "your_database_name", + endpoint: + "projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}", + user: "service-principal-id", // Optional, defaults to DATABRICKS_CLIENT_ID + max: 10, // Connection pool size +}); +``` + +## Authentication + +The driver supports Databricks authentication via: + +1. **Default auth chain** (`.databrickscfg`, environment variables) +2. **Service principal** (`DATABRICKS_CLIENT_ID` + `DATABRICKS_CLIENT_SECRET`) +3. **OAuth tokens** (via Databricks SDK) + +See [Databricks authentication docs](https://docs.databricks.com/en/dev-tools/auth/index.html) for configuration. + +## Configuration + +| Option | Environment Variable | Description | Default | +| ------------------------- | ---------------------------------- | --------------------------------------- | ----------------------- | +| `host` | `PGHOST` | Lakebase host | _Required_ | +| `database` | `PGDATABASE` | Database name | _Required_ | +| `endpoint` | `LAKEBASE_ENDPOINT` | Endpoint resource path | _Required_ | +| `user` | `PGUSER` or `DATABRICKS_CLIENT_ID` | Username or service principal ID | Auto-detected | +| `port` | `PGPORT` | Port number | `5432` | +| `sslMode` | `PGSSLMODE` | SSL mode | `require` | +| `max` | - | Max pool connections | `10` | +| `idleTimeoutMillis` | - | Idle connection timeout | `30000` | +| `connectionTimeoutMillis` | - | Connection timeout | `10000` | +| `logger` | - | Logger instance or config | `{ error: true }` | + +## Logging + +By default, the driver logs errors only. You can configure logging in three ways: + +### 1. Config-Based Logger (Simple) + +Enable/disable specific log levels using boolean flags: + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +// Development mode: enable debug and error logs +const pool = createLakebasePool({ + logger: { debug: true, error: true }, +}); + +// Production mode: errors only (same as default) +const pool = createLakebasePool({ + logger: { error: true }, +}); + +// Verbose mode: all logs enabled +const pool = createLakebasePool({ + logger: { debug: true, info: true, warn: true, error: true }, +}); + +// Silent mode: all logs disabled +const pool = createLakebasePool({ + logger: { debug: false, info: false, warn: false, error: false }, +}); +``` + +### 2. Custom Logger (Advanced) + +Inject your own logger implementation for custom formatting or integrations: + +```typescript +const logger = { + debug: (msg: string, ...args: unknown[]) => console.debug(msg, ...args), + info: (msg: string, ...args: unknown[]) => console.log(msg, ...args), + warn: (msg: string, ...args: unknown[]) => console.warn(msg, ...args), + error: (msg: string, ...args: unknown[]) => console.error(msg, ...args), +}; + +const pool = createLakebasePool({ logger }); +``` + +### 3. Default Behavior + +If no logger is provided, the driver defaults to error-only logging: + +```typescript +// These are equivalent: +const pool1 = createLakebasePool(); +const pool2 = createLakebasePool({ logger: { error: true } }); +``` + +When used with AppKit, logging is automatically configured - see the [AppKit Integration](#appkit-integration) section. + +## ORM Examples + +### Drizzle ORM + +```typescript +import { drizzle } from "drizzle-orm/node-postgres"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const db = drizzle(pool); + +const users = await db.select().from(usersTable); +``` + +### Prisma + +```typescript +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "@prisma/client"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const adapter = new PrismaPg(pool); +const prisma = new PrismaClient({ adapter }); + +const users = await prisma.user.findMany(); +``` + +### TypeORM + +```typescript +import { DataSource } from "typeorm"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); + +const dataSource = new DataSource({ + type: "postgres", + synchronize: true, + ...getLakebaseOrmConfig(), + entities: [ + // Your entity classes + ], +}); + +await dataSource.initialize(); +``` + +### Sequelize + +```typescript +import { Sequelize } from "sequelize"; +import { getLakebaseOrmConfig } from "@databricks/lakebase"; + +const sequelize = new Sequelize({ + dialect: "postgres", + ...getLakebaseOrmConfig(), +}); +``` + +## OpenTelemetry Integration + +The driver automatically uses OpenTelemetry's global registry when available. If your application initializes OpenTelemetry providers, the driver will automatically instrument queries and metrics with no additional configuration needed. + +### Setup + +Install OpenTelemetry in your application: + +```bash +npm install @opentelemetry/api @opentelemetry/sdk-node +``` + +Initialize OpenTelemetry in your application: + +```typescript +import { NodeSDK } from "@opentelemetry/sdk-node"; + +const sdk = new NodeSDK({ + // Your OTEL configuration +}); + +sdk.start(); // Registers global providers + +// Now create your pool - it automatically uses the global providers +import { createLakebasePool } from "@databricks/lakebase"; +const pool = createLakebasePool(); +``` + +The driver calls `trace.getTracer('@databricks/lakebase')` and `metrics.getMeter('@databricks/lakebase')` internally. If no global providers are registered, operations are automatic no-ops. + +### Metrics Exported + +- `lakebase.token.refresh.duration` - OAuth token refresh duration (histogram, ms) +- `lakebase.query.duration` - Query execution duration (histogram, ms) +- `lakebase.pool.connections.total` - Total connections in pool (gauge) +- `lakebase.pool.connections.idle` - Idle connections (gauge) +- `lakebase.pool.connections.waiting` - Clients waiting for connection (gauge) +- `lakebase.pool.errors` - Pool errors by error code (counter) + +## AppKit Integration + +This driver is also available as part of [@databricks/appkit](https://www.npmjs.com/package/@databricks/appkit): + +```typescript +import { createLakebasePool } from "@databricks/appkit"; + +const pool = createLakebasePool(); +``` + +**Differences between standalone and AppKit:** + +- **Standalone** (`@databricks/lakebase`): Silent by default - no logger configured +- **AppKit** (`@databricks/appkit`): Automatically injects AppKit's logger with scope `appkit:connectors:lakebase`. + +## Learn more about Lakebase Autoscaling + +For Lakebase Autoscaling documentation, see [docs.databricks.com/aws/en/oltp/projects](https://docs.databricks.com/aws/en/oltp/projects/). diff --git a/packages/lakebase/package.json b/packages/lakebase/package.json new file mode 100644 index 00000000..09c186c2 --- /dev/null +++ b/packages/lakebase/package.json @@ -0,0 +1,72 @@ +{ + "name": "@databricks/lakebase", + "type": "module", + "version": "0.1.0", + "description": "PostgreSQL driver for Databricks Lakebase with automatic OAuth token refresh", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "packageManager": "pnpm@10.21.0", + "repository": { + "type": "git", + "url": "git+https://github.com/databricks/appkit.git", + "directory": "packages/lakebase" + }, + "keywords": [ + "databricks", + "lakebase", + "postgres", + "postgresql", + "driver", + "oauth", + "oltp" + ], + "license": "Apache-2.0", + "files": [ + "dist", + "README.md", + "LICENSE" + ], + "exports": { + ".": { + "types": "./dist/index.d.ts", + "development": "./src/index.ts", + "default": "./dist/index.js" + }, + "./package.json": "./package.json" + }, + "scripts": { + "build:package": "tsdown --config tsdown.config.ts", + "build:watch": "tsdown --config tsdown.config.ts --watch", + "clean:full": "rm -rf dist node_modules tmp", + "clean": "rm -rf dist tmp", + "dist": "tsx ../../tools/dist.ts", + "tarball": "rm -rf tmp && pnpm dist && npm pack ./tmp --pack-destination ./tmp", + "typecheck": "tsc --noEmit", + "release": "release-it", + "release:dry": "release-it --dry-run", + "release:ci": "release-it --ci" + }, + "dependencies": { + "@databricks/sdk-experimental": "^0.16.0", + "pg": "^8.18.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.9.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + } + }, + "devDependencies": { + "@types/pg": "^8.16.0", + "@opentelemetry/api": "^1.9.0" + }, + "module": "./dist/index.js", + "publishConfig": { + "exports": { + ".": "./dist/index.js", + "./package.json": "./package.json" + } + } +} diff --git a/packages/lakebase/src/__tests__/credentials.test.ts b/packages/lakebase/src/__tests__/credentials.test.ts new file mode 100644 index 00000000..837163b1 --- /dev/null +++ b/packages/lakebase/src/__tests__/credentials.test.ts @@ -0,0 +1,187 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import { ApiClient, Config } from "@databricks/sdk-experimental"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { generateDatabaseCredential } from "../credentials"; +import { + type DatabaseCredential, + RequestedClaimsPermissionSet, +} from "../types"; + +// Mock the @databricks/sdk-experimental module +vi.mock("@databricks/sdk-experimental", () => { + const mockRequest = vi.fn(); + + return { + Config: vi.fn(), + ApiClient: vi.fn().mockImplementation(() => ({ + request: mockRequest, + })), + }; +}); + +describe("Lakebase Authentication", () => { + let mockWorkspaceClient: WorkspaceClient; + let mockApiClient: ApiClient; + + beforeEach(() => { + vi.clearAllMocks(); + + // Get the mocked ApiClient constructor + const ApiClientConstructor = ApiClient as unknown as ReturnType< + typeof vi.fn + >; + mockApiClient = new ApiClientConstructor( + new Config({ host: "https://test.databricks.com" }), + ); + + // Setup mock workspace client with apiClient + mockWorkspaceClient = { + config: { + host: "https://test.databricks.com", + }, + apiClient: mockApiClient, + } as WorkspaceClient; + }); + + describe("generateDatabaseCredential", () => { + it("should generate database credentials with proper endpoint format", async () => { + const mockCredential: DatabaseCredential = { + token: "mock-oauth-token-abc123", + expire_time: "2026-02-06T18:00:00Z", + }; + + // Setup mock response + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + const credential = await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }); + + // Verify API call + expect(mockApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }, + }); + + // Verify response + expect(credential).toEqual(mockCredential); + expect(credential.token).toBe("mock-oauth-token-abc123"); + expect(credential.expire_time).toBe("2026-02-06T18:00:00Z"); + }); + + it("should include claims when provided", async () => { + const mockCredential: DatabaseCredential = { + token: "mock-oauth-token-with-claims", + expire_time: "2026-02-06T18:00:00Z", + }; + + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + claims: [ + { + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [ + { table_name: "catalog.schema.users" }, + { table_name: "catalog.schema.orders" }, + ], + }, + ], + }); + + // Verify claims are included in payload + expect(mockApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test-project/branches/main/endpoints/primary", + claims: [ + { + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [ + { table_name: "catalog.schema.users" }, + { table_name: "catalog.schema.orders" }, + ], + }, + ], + }, + }); + }); + + it("should handle token expiration time parsing", async () => { + const futureTime = new Date(Date.now() + 60 * 60 * 1000).toISOString(); // 1 hour from now + const mockCredential: DatabaseCredential = { + token: "mock-token", + expire_time: futureTime, + }; + + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + const credential = await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }); + + // Verify expiration time is in the future + const expiresAt = new Date(credential.expire_time).getTime(); + expect(expiresAt).toBeGreaterThan(Date.now()); + }); + + it("should handle API errors gracefully", async () => { + const mockError = new Error("API request failed"); + vi.mocked(mockApiClient.request).mockRejectedValue(mockError); + + await expect( + generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/invalid/branches/main/endpoints/primary", + }), + ).rejects.toThrow("API request failed"); + }); + + it("should use correct workspace host for API calls", async () => { + const customHost = "https://custom-workspace.databricks.com"; + + // Create a new mock API client for the custom workspace + const ApiClientConstructor = ApiClient as unknown as ReturnType< + typeof vi.fn + >; + const customApiClient = new ApiClientConstructor( + new Config({ host: customHost }), + ); + + const customWorkspaceClient = { + config: { host: customHost }, + apiClient: customApiClient, + } as WorkspaceClient; + + const mockCredential: DatabaseCredential = { + token: "mock-token", + expire_time: "2026-02-06T18:00:00Z", + }; + + vi.mocked(customApiClient.request).mockResolvedValue(mockCredential); + + await generateDatabaseCredential(customWorkspaceClient, { + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + // Verify the request was made with the correct workspace client + expect(customApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test/branches/main/endpoints/primary", + }, + }); + }); + }); +}); diff --git a/packages/lakebase/src/__tests__/logger.test.ts b/packages/lakebase/src/__tests__/logger.test.ts new file mode 100644 index 00000000..0de3a208 --- /dev/null +++ b/packages/lakebase/src/__tests__/logger.test.ts @@ -0,0 +1,184 @@ +import { describe, expect, test, vi } from "vitest"; +import { resolveLogger } from "../logger"; +import type { Logger, LoggerConfig } from "../types"; + +describe("resolveLogger", () => { + describe("Logger instance passthrough", () => { + test("should return Logger instance as-is", () => { + const mockLogger: Logger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + + const result = resolveLogger(mockLogger); + + expect(result).toBe(mockLogger); + }); + }); + + describe("LoggerConfig resolution", () => { + test("should create console logger with all levels enabled", () => { + const config: LoggerConfig = { + debug: true, + info: true, + warn: true, + error: true, + }; + + const logger = resolveLogger(config); + + expect(typeof logger.debug).toBe("function"); + expect(typeof logger.info).toBe("function"); + expect(typeof logger.warn).toBe("function"); + expect(typeof logger.error).toBe("function"); + }); + + test("should create console logger with selective levels", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + debug: true, + info: false, + warn: true, + error: false, + }; + + const logger = resolveLogger(config); + + // Test enabled levels + logger.debug("test debug"); + expect(consoleDebugSpy).toHaveBeenCalledWith("test debug"); + + logger.warn("test warn"); + expect(consoleWarnSpy).toHaveBeenCalledWith("test warn"); + + // Test disabled levels (should be noop) + logger.info("test info"); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + + logger.error("test error"); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should create noop logger when all levels disabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + debug: false, + info: false, + warn: false, + error: false, + }; + + const logger = resolveLogger(config); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should handle empty LoggerConfig", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = {}; + + const logger = resolveLogger(config); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should support format strings and args", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + error: true, + }; + + const logger = resolveLogger(config); + + logger.error("Error: %s %d", "test", 123); + + expect(consoleErrorSpy).toHaveBeenCalledWith("Error: %s %d", "test", 123); + + consoleErrorSpy.mockRestore(); + }); + }); + + describe("Default behavior", () => { + test("should create error-only logger when undefined", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const logger = resolveLogger(undefined); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test error"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).toHaveBeenCalledWith("test error"); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should create error-only logger when no argument provided", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); + + const logger = resolveLogger(); + + logger.error("test error"); + + expect(consoleErrorSpy).toHaveBeenCalledWith("test error"); + + consoleErrorSpy.mockRestore(); + }); + }); +}); diff --git a/packages/lakebase/src/__tests__/pool.test.ts b/packages/lakebase/src/__tests__/pool.test.ts new file mode 100644 index 00000000..879080e5 --- /dev/null +++ b/packages/lakebase/src/__tests__/pool.test.ts @@ -0,0 +1,811 @@ +import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; +import { createLakebasePool } from "../pool"; + +// ── Mocks ──────────────────────────────────────────────────────────── + +// Mock pg module +vi.mock("pg", () => { + const mockQuery = vi.fn(); + const mockConnect = vi.fn(); + const mockEnd = vi.fn().mockResolvedValue(undefined); + const mockOn = vi.fn(); + + const MockPool = vi.fn((config) => { + const listeners: Map void>> = new Map(); + return { + query: mockQuery, + connect: mockConnect, + end: mockEnd, + on: vi.fn((event: string, handler: (...args: any[]) => void) => { + if (!listeners.has(event)) { + listeners.set(event, []); + } + listeners.get(event)?.push(handler); + }), + emit: vi.fn((event: string, ...args: any[]) => { + listeners.get(event)?.forEach((handler) => { + handler(...args); + }); + }), + options: config, // Store config for inspection + totalCount: 3, + idleCount: 1, + waitingCount: 0, + }; + }); + + return { + default: { Pool: MockPool }, + Pool: MockPool, + __mockQuery: mockQuery, + __mockConnect: mockConnect, + __mockEnd: mockEnd, + __mockOn: mockOn, + __MockPool: MockPool, + }; +}); + +// Mock generateDatabaseCredential +vi.mock("../credentials", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + generateDatabaseCredential: vi.fn(), + }; +}); + +// Mock telemetry - create spies for all metric instruments +const mockSpanEnd = vi.fn(); +const mockSpanSetAttribute = vi.fn(); +const mockSpanSetStatus = vi.fn(); +const mockCounterAdd = vi.fn(); +const mockHistogramRecord = vi.fn(); +const mockAddCallback = vi.fn(); + +const mockTracer = { + startActiveSpan: vi.fn( + (_name: string, _opts: unknown, fn: (span: unknown) => T): T => { + const span = { + setAttribute: mockSpanSetAttribute, + setStatus: mockSpanSetStatus, + end: mockSpanEnd, + recordException: vi.fn(), + }; + return fn(span); + }, + ), +}; + +const mockMeter = { + createCounter: vi.fn(() => ({ add: mockCounterAdd })), + createHistogram: vi.fn(() => ({ record: mockHistogramRecord })), + createObservableGauge: vi.fn(() => ({ + addCallback: mockAddCallback, + })), +}; + +vi.mock("../telemetry", () => ({ + SpanStatusCode: { OK: 1, ERROR: 2 }, + SpanKind: { CLIENT: 3 }, + initTelemetry: vi.fn(() => + Promise.resolve({ + tracer: mockTracer, + meter: mockMeter, + tokenRefreshDuration: { record: mockHistogramRecord }, + queryDuration: { record: mockHistogramRecord }, + poolErrors: { add: mockCounterAdd }, + }), + ), + attachPoolMetrics: vi.fn(), + wrapPoolQuery: vi.fn((pool, telemetry) => { + // Simulate wrapping pool.query with telemetry + const origQuery = pool.query; + pool.query = function queryWithTelemetry(...args: unknown[]) { + return telemetry.tracer.startActiveSpan( + "lakebase.query", + { kind: 2, attributes: {} }, + (span: any) => { + const result = origQuery.apply(pool, args); + if (result && typeof result.then === "function") { + return result.finally(() => span.end()); + } + span.end(); + return result; + }, + ); + } as any; + }), +})); + +vi.mock("../logger", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + // Keep the real resolveLogger implementation + }; +}); + +// ── Test suite ─────────────────────────────────────────────────────── + +describe("createLakebasePool", () => { + let mockGenerateCredential: ReturnType; + + // Save original env vars to restore after each test + const originalEnv: Record = {}; + const envKeysUsed = [ + "PGHOST", + "PGDATABASE", + "LAKEBASE_ENDPOINT", + "PGUSER", + "PGPORT", + "PGSSLMODE", + "DATABRICKS_CLIENT_ID", + ]; + + beforeEach(async () => { + vi.clearAllMocks(); + + // Save original env vars + for (const key of envKeysUsed) { + originalEnv[key] = process.env[key]; + } + + // Setup environment variables + process.env.PGHOST = "ep-test.database.us-east-1.databricks.com"; + process.env.PGDATABASE = "databricks_postgres"; + process.env.LAKEBASE_ENDPOINT = + "projects/test-project/branches/main/endpoints/primary"; + process.env.PGUSER = "test-user@example.com"; + + // Setup mock for generateDatabaseCredential + const utils = await import("../credentials"); + mockGenerateCredential = utils.generateDatabaseCredential as any; + mockGenerateCredential.mockResolvedValue({ + token: "test-oauth-token-12345", + expire_time: new Date(Date.now() + 3600000).toISOString(), // 1 hour from now + }); + }); + + afterEach(() => { + // Restore original env vars + for (const key of envKeysUsed) { + if (originalEnv[key] === undefined) { + delete process.env[key]; + } else { + process.env[key] = originalEnv[key]; + } + } + }); + + describe("configuration", () => { + test("should create pool with environment variables", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool).toBeDefined(); + expect(pool.options.host).toBe( + "ep-test.database.us-east-1.databricks.com", + ); + expect(pool.options.database).toBe("databricks_postgres"); + expect(pool.options.user).toBe("test-user@example.com"); + expect(pool.options.port).toBe(5432); + }); + + test("should create pool with explicit configuration", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + endpoint: "projects/my-project/branches/dev/endpoints/test", + host: "ep-custom.database.us-west-2.databricks.com", + database: "custom_db", + user: "custom-user@example.com", // Explicit user overrides env + port: 5433, + max: 20, + }); + + expect(pool.options.host).toBe( + "ep-custom.database.us-west-2.databricks.com", + ); + expect(pool.options.database).toBe("custom_db"); + expect(pool.options.user).toBe("custom-user@example.com"); + expect(pool.options.port).toBe(5433); + expect(pool.options.max).toBe(20); + }); + + test("should throw error when endpoint is missing", () => { + delete process.env.LAKEBASE_ENDPOINT; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("LAKEBASE_ENDPOINT or config.endpoint"); + }); + + test("should throw error when host is missing", () => { + delete process.env.PGHOST; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGHOST or config.host"); + }); + + test("should throw error when database is missing", () => { + delete process.env.PGDATABASE; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGDATABASE or config.database"); + }); + + test("should throw error when user is missing", () => { + delete process.env.PGUSER; + delete process.env.DATABRICKS_CLIENT_ID; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGUSER, DATABRICKS_CLIENT_ID, or config.user"); + }); + + test("should use DATABRICKS_CLIENT_ID as fallback for user", () => { + delete process.env.PGUSER; + process.env.DATABRICKS_CLIENT_ID = "service-principal-123"; + + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool.options.user).toBe("service-principal-123"); + }); + + test("should use default values for optional config", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool.options.port).toBe(5432); + expect(pool.options.max).toBe(10); + expect(pool.options.idleTimeoutMillis).toBe(30000); + expect(pool.options.connectionTimeoutMillis).toBe(10000); + }); + + test("should configure SSL based on sslMode", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + sslMode: "require", + }); + + expect(pool.options.ssl).toEqual({ rejectUnauthorized: true }); + }); + + test("should allow custom SSL configuration", () => { + const customSSL = { rejectUnauthorized: false, ca: "custom-ca" }; + const pool = createLakebasePool({ + workspaceClient: {} as any, + ssl: customSSL, + }); + + expect(pool.options.ssl).toEqual(customSSL); + }); + + test("should throw on invalid PGSSLMODE", () => { + process.env.PGSSLMODE = "verify-full"; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("one of: require, disable, prefer"); + }); + + test("should accept valid PGSSLMODE values", () => { + for (const mode of ["require", "disable", "prefer"]) { + process.env.PGSSLMODE = mode; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).not.toThrow(); + } + }); + }); + + describe("password callback", () => { + test("should configure password as async function", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(typeof pool.options.password).toBe("function"); + }); + + test("should fetch OAuth token when password callback is invoked", async () => { + const workspaceClient = { + test: "client", + config: { host: "test" }, + } as any; + const pool = createLakebasePool({ + workspaceClient, + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + // Invoke the password callback + const passwordFn = pool.options.password as () => Promise; + const password = await passwordFn(); + + expect(mockGenerateCredential).toHaveBeenCalledWith(workspaceClient, { + endpoint: "projects/test/branches/main/endpoints/primary", + }); + expect(password).toBe("test-oauth-token-12345"); + }); + + test("should cache OAuth token for subsequent calls", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // First call - should fetch token + const password1 = await passwordFn(); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + + // Second call - should use cached token + const password2 = await passwordFn(); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); // Still 1 + expect(password2).toBe(password1); + }); + + test("should refresh token when it expires", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + // First token expires in 1 minute (within buffer) + mockGenerateCredential.mockResolvedValueOnce({ + token: "expiring-token", + expire_time: new Date(Date.now() + 60000).toISOString(), + }); + + // Second token expires in 1 hour + mockGenerateCredential.mockResolvedValueOnce({ + token: "new-token", + expire_time: new Date(Date.now() + 3600000).toISOString(), + }); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // First call - get expiring token + const password1 = await passwordFn(); + expect(password1).toBe("expiring-token"); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + + // Second call - token is expiring, should refresh + const password2 = await passwordFn(); + expect(password2).toBe("new-token"); + expect(mockGenerateCredential).toHaveBeenCalledTimes(2); + }); + + test("should handle token fetch errors", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + mockGenerateCredential.mockRejectedValue(new Error("Token fetch failed")); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + await expect(passwordFn()).rejects.toThrow("Token fetch failed"); + }); + + test("should deduplicate concurrent token refresh requests", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + // Make the credential generation slow + mockGenerateCredential.mockImplementation( + () => + new Promise((resolve) => + setTimeout( + () => + resolve({ + token: "deduped-token", + expire_time: new Date(Date.now() + 3600000).toISOString(), + }), + 50, + ), + ), + ); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // Fire multiple concurrent calls + const [p1, p2, p3] = await Promise.all([ + passwordFn(), + passwordFn(), + passwordFn(), + ]); + + // Only one API call should have been made + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + expect(p1).toBe("deduped-token"); + expect(p2).toBe("deduped-token"); + expect(p3).toBe("deduped-token"); + }); + }); + + describe("workspace client", () => { + test("should use provided workspace client", () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + expect(pool).toBeDefined(); + }); + + test("should fallback to SDK default auth when workspace client not provided", async () => { + const pool = createLakebasePool({ + // No workspace client provided - should use SDK default auth chain + }); + + // Pool should be created successfully + expect(pool).toBeDefined(); + expect(pool.options.password).toBeDefined(); + expect(typeof pool.options.password).toBe("function"); + }); + }); + + describe("pool behavior", () => { + test("should register error handler", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Pool should have on method for error handling + expect(pool.on).toBeDefined(); + expect(typeof pool.on).toBe("function"); + }); + + test("should return pg.Pool instance", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Standard pg.Pool methods should be available + expect(pool.query).toBeDefined(); + expect(pool.connect).toBeDefined(); + expect(pool.end).toBeDefined(); + expect(typeof pool.query).toBe("function"); + expect(typeof pool.connect).toBe("function"); + expect(typeof pool.end).toBe("function"); + }); + }); + + describe("ORM compatibility patterns", () => { + test("should work with Drizzle pattern", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Drizzle expects { client: pool } + const drizzleConfig = { client: pool }; + expect(drizzleConfig.client).toBe(pool); + expect(typeof drizzleConfig.client.query).toBe("function"); + }); + + test("should work with Prisma adapter pattern", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Prisma expects PrismaPg(pool) + // Mock PrismaPg adapter + const mockPrismaPg = (pgPool: any) => ({ pool: pgPool }); + const adapter = mockPrismaPg(pool); + + expect(adapter.pool).toBe(pool); + }); + + test("should expose standard pg.Pool interface", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Standard pg.Pool methods + expect(pool.query).toBeDefined(); + expect(pool.connect).toBeDefined(); + expect(pool.end).toBeDefined(); + expect(pool.on).toBeDefined(); + + // Options should be accessible + expect(pool.options).toBeDefined(); + expect(pool.options.host).toBeDefined(); + expect(pool.options.database).toBeDefined(); + }); + }); + + describe("native password authentication", () => { + test("should use static password when provided", () => { + const pool = createLakebasePool({ + password: "my-static-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + expect(pool.options.password).toBe("my-static-password"); + }); + + test("should prioritize password over OAuth when both provided", () => { + const pool = createLakebasePool({ + password: "my-password", + workspaceClient: {} as any, + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + expect(pool.options.password).toBe("my-password"); + }); + + test("should support custom password callback function", async () => { + const customCallback = vi.fn(async () => "custom-token"); + + const pool = createLakebasePool({ + password: customCallback, + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + expect(typeof pool.options.password).toBe("function"); + const passwordFn = pool.options.password as () => Promise; + const result = await passwordFn(); + + expect(result).toBe("custom-token"); + expect(customCallback).toHaveBeenCalled(); + }); + + test("should not require endpoint when password is provided", () => { + delete process.env.LAKEBASE_ENDPOINT; + + expect(() => + createLakebasePool({ + password: "my-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }), + ).not.toThrow(); + }); + + test("should not call OAuth token generation when password is provided", async () => { + const pool = createLakebasePool({ + password: "static-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + // Simulate pg calling the password - should return the string directly + expect(pool.options.password).toBe("static-password"); + + // OAuth credential generation should not have been called + expect(mockGenerateCredential).not.toHaveBeenCalled(); + }); + }); + + describe("telemetry", () => { + test("should initialize telemetry", async () => { + const { initTelemetry } = await import("../telemetry"); + + createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(initTelemetry).toHaveBeenCalled(); + }); + + test("should record token refresh duration on successful fetch", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + + const passwordFn = pool.options.password as () => Promise; + await passwordFn(); + + // Token refresh duration should be recorded (histogram captures count implicitly) + expect(mockHistogramRecord).toHaveBeenCalledWith(expect.any(Number)); + }); + + test("should set span attributes on token refresh", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + + const passwordFn = pool.options.password as () => Promise; + await passwordFn(); + + // Span should have token expiration attribute + expect(mockSpanSetAttribute).toHaveBeenCalledWith( + "lakebase.token.expires_at", + expect.any(String), + ); + // Note: We don't explicitly set span status to OK for successful operations + // (default UNSET status is interpreted as success) + expect(mockSpanEnd).toHaveBeenCalled(); + }); + + test("should wrap pool.query to add telemetry tracking", async () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + + // pool.query should be our wrapped function + expect(typeof pool.query).toBe("function"); + expect(pool.query.name).toBe("queryWithTelemetry"); + }); + }); + + describe("logger injection", () => { + test("should default to error-only logging when no logger provided", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool).toBeDefined(); + // Default behavior: only errors are logged + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + // Error logging would happen on actual errors + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should use injected Logger instance", () => { + const mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: mockLogger, + }); + + expect(pool).toBeDefined(); + expect(mockLogger.debug).toHaveBeenCalledWith( + expect.stringContaining("Created Lakebase connection pool"), + expect.any(String), + expect.any(String), + expect.any(String), + ); + }); + + test("should use LoggerConfig with selective levels", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: true, info: false, warn: false, error: true }, + }); + + expect(pool).toBeDefined(); + // Debug should be logged + expect(consoleDebugSpy).toHaveBeenCalledWith( + expect.stringContaining("Created Lakebase connection pool"), + expect.any(String), + expect.any(String), + expect.any(String), + ); + // Info and warn should not be called + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should use LoggerConfig with all levels enabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: true, info: true, warn: true, error: true }, + }); + + expect(pool).toBeDefined(); + expect(consoleDebugSpy).toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should use LoggerConfig with all levels disabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: false, info: false, warn: false, error: false }, + }); + + expect(pool).toBeDefined(); + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should pass resolved logger to error handlers", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: true, error: true }, + }); + + // Trigger a pool error to verify logger is attached + const testError = new Error("Test error") as Error & { code?: string }; + testError.code = "TEST_CODE"; + pool.emit("error", testError); + + // Verify error was logged with correct format + expect(consoleErrorSpy).toHaveBeenCalledWith( + expect.stringContaining("Connection pool error"), + expect.stringContaining("Test error"), + expect.stringContaining("TEST_CODE"), + ); + + consoleErrorSpy.mockRestore(); + }); + }); +}); diff --git a/packages/lakebase/src/config.ts b/packages/lakebase/src/config.ts new file mode 100644 index 00000000..43a44051 --- /dev/null +++ b/packages/lakebase/src/config.ts @@ -0,0 +1,145 @@ +import { WorkspaceClient } from "@databricks/sdk-experimental"; +import type pg from "pg"; +import { ConfigurationError, ValidationError } from "./errors"; +import type { LakebasePoolConfig } from "./types"; + +/** Default configuration values for the Lakebase connector */ +const defaults = { + port: 5432, + sslMode: "require" as const, + max: 10, + idleTimeoutMillis: 30_000, + connectionTimeoutMillis: 10_000, +}; + +const VALID_SSL_MODES = ["require", "disable", "prefer"] as const; +type SslMode = (typeof VALID_SSL_MODES)[number]; + +export interface ParsedPoolConfig { + endpoint?: string; + host: string; + database: string; + port: number; + sslMode: SslMode; + ssl?: pg.PoolConfig["ssl"]; + max: number; + idleTimeoutMillis: number; + connectionTimeoutMillis: number; +} + +/** Parse pool configuration from provided config and environment variables */ +export function parsePoolConfig( + userConfig?: Partial, +): ParsedPoolConfig { + // Get endpoint (required only for OAuth auth) + const endpoint = userConfig?.endpoint ?? process.env.LAKEBASE_ENDPOINT; + + // Only require endpoint if no password provided + if (!endpoint && !userConfig?.password) { + throw ConfigurationError.missingEnvVar( + "LAKEBASE_ENDPOINT or config.endpoint (or provide config.password for native auth)", + ); + } + + // Get host (required) + const host = userConfig?.host ?? process.env.PGHOST; + if (!host) { + throw ConfigurationError.missingEnvVar("PGHOST or config.host"); + } + + // Get database (required) + const database = userConfig?.database ?? process.env.PGDATABASE; + if (!database) { + throw ConfigurationError.missingEnvVar("PGDATABASE or config.database"); + } + + // Get port (optional, default from defaults) + const portStr = process.env.PGPORT; + const port = + userConfig?.port ?? + (portStr ? Number.parseInt(portStr, 10) : defaults.port); + + if (Number.isNaN(port)) { + throw ValidationError.invalidValue("port", portStr, "a number"); + } + + // Get SSL mode (optional, default from defaults) + const rawSslMode = userConfig?.sslMode ?? process.env.PGSSLMODE ?? undefined; + + const sslMode = validateSslMode(rawSslMode) ?? defaults.sslMode; + + // Pool options (with defaults) + const max = userConfig?.max ?? defaults.max; + const idleTimeoutMillis = + userConfig?.idleTimeoutMillis ?? defaults.idleTimeoutMillis; + const connectionTimeoutMillis = + userConfig?.connectionTimeoutMillis ?? defaults.connectionTimeoutMillis; + + return { + endpoint, + host, + database, + port, + sslMode, + ssl: userConfig?.ssl, + max, + idleTimeoutMillis, + connectionTimeoutMillis, + }; +} + +/** Validate and return the SSL mode, or undefined when not set */ +function validateSslMode(value: string | undefined): SslMode | undefined { + if (value === undefined) { + return undefined; + } + + if (!(VALID_SSL_MODES as readonly string[]).includes(value)) { + throw ValidationError.invalidValue( + "sslMode (PGSSLMODE)", + value, + `one of: ${VALID_SSL_MODES.join(", ")}`, + ); + } + + return value as SslMode; +} + +/** Get workspace client from config or SDK default auth chain */ +export async function getWorkspaceClient( + config: Partial, +): Promise { + // Priority 1: Explicit workspaceClient in config + if (config.workspaceClient) { + return config.workspaceClient; + } + + // Priority 2: Create with SDK default auth chain + // Use empty config to let SDK use .databrickscfg, DATABRICKS_HOST, DATABRICKS_TOKEN, etc. + // NOTE: config.host is the PostgreSQL host (PGHOST), not the Databricks workspace host + return new WorkspaceClient({}); +} + +/** Get username synchronously from config or environment */ +export function getUsernameSync(config: Partial): string { + // Priority 1: Explicit user in config + if (config.user) { + return config.user; + } + + // Priority 2: PGUSER environment variable + const pgUser = process.env.PGUSER; + if (pgUser) { + return pgUser; + } + + // Priority 3: DATABRICKS_CLIENT_ID (service principal ID) + const clientId = process.env.DATABRICKS_CLIENT_ID; + if (clientId) { + return clientId; + } + + throw ConfigurationError.missingEnvVar( + "PGUSER, DATABRICKS_CLIENT_ID, or config.user", + ); +} diff --git a/packages/lakebase/src/credentials.ts b/packages/lakebase/src/credentials.ts new file mode 100644 index 00000000..2e3c84bd --- /dev/null +++ b/packages/lakebase/src/credentials.ts @@ -0,0 +1,94 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import { ValidationError } from "./errors"; +import type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, +} from "./types"; + +/** + * Generate OAuth credentials for Postgres database connection using the proper Postgres API. + * + * This generates a time-limited OAuth token (expires after 1 hour) that can be used + * as a password when connecting to Lakebase Postgres databases. + * + * @param workspaceClient - Databricks workspace client for authentication + * @param request - Request parameters including endpoint path and optional UC claims + * @returns Database credentials with OAuth token and expiration time + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + * + * @example + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * // Note: Use actual IDs from Databricks (project-id is a UUID) + * const credential = await generateDatabaseCredential(workspaceClient, { + * endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + * }); + * + * // Use credential.token as password + * const conn = await pg.connect({ + * host: "ep-abc123.database.us-east-1.databricks.com", + * user: "user@example.com", + * password: credential.token + * }); + * ``` + * + * @example With UC table permissions + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * const credential = await generateDatabaseCredential(workspaceClient, { + * endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0", + * claims: [{ + * permission_set: RequestedClaimsPermissionSet.READ_ONLY, + * resources: [{ table_name: "catalog.schema.users" }] + * }] + * }); + * ``` + */ +export async function generateDatabaseCredential( + workspaceClient: WorkspaceClient, + request: GenerateDatabaseCredentialRequest, +): Promise { + const apiPath = "/api/2.0/postgres/credentials"; + + const response = await workspaceClient.apiClient.request({ + path: apiPath, + method: "POST", + headers: new Headers({ + Accept: "application/json", + "Content-Type": "application/json", + }), + raw: false, + payload: request, + }); + + return validateCredentialResponse(response); +} + +/** Validate the API response has the expected shape */ +function validateCredentialResponse(response: unknown): DatabaseCredential { + if ( + typeof response !== "object" || + response === null || + !("token" in response) || + !("expire_time" in response) + ) { + throw ValidationError.invalidValue( + "credential response", + response, + "an object with { token, expire_time }", + ); + } + + const { token, expire_time } = response as Record; + + if (typeof token !== "string" || typeof expire_time !== "string") { + throw ValidationError.invalidValue( + "credential response fields", + { tokenType: typeof token, expireTimeType: typeof expire_time }, + "token and expire_time to be strings", + ); + } + + return { token, expire_time }; +} diff --git a/packages/lakebase/src/errors.ts b/packages/lakebase/src/errors.ts new file mode 100644 index 00000000..86c3ecc1 --- /dev/null +++ b/packages/lakebase/src/errors.ts @@ -0,0 +1,66 @@ +/** + * Base error class for Lakebase driver errors. + */ +export abstract class LakebaseError extends Error { + abstract readonly code: string; + readonly cause?: Error; + readonly context?: Record; + + constructor( + message: string, + options?: { cause?: Error; context?: Record }, + ) { + super(message); + this.name = this.constructor.name; + this.cause = options?.cause; + this.context = options?.context; + + if (Error.captureStackTrace) { + Error.captureStackTrace(this, this.constructor); + } + } +} + +/** + * Error thrown when configuration is missing or invalid. + */ +export class ConfigurationError extends LakebaseError { + readonly code = "CONFIGURATION_ERROR"; + + /** + * Create a configuration error for missing environment variable + */ + static missingEnvVar(varName: string): ConfigurationError { + return new ConfigurationError( + `${varName} environment variable is required`, + { context: { envVar: varName } }, + ); + } +} + +/** + * Error thrown when input validation fails. + */ +export class ValidationError extends LakebaseError { + readonly code = "VALIDATION_ERROR"; + + /** + * Create a validation error for an invalid field value + */ + static invalidValue( + fieldName: string, + value: unknown, + expected?: string, + ): ValidationError { + const msg = expected + ? `Invalid value for ${fieldName}: expected ${expected}` + : `Invalid value for ${fieldName}`; + return new ValidationError(msg, { + context: { + field: fieldName, + valueType: value === null ? "null" : typeof value, + expected, + }, + }); + } +} diff --git a/packages/lakebase/src/index.ts b/packages/lakebase/src/index.ts new file mode 100644 index 00000000..b3c5a288 --- /dev/null +++ b/packages/lakebase/src/index.ts @@ -0,0 +1,20 @@ +export { getWorkspaceClient } from "./config"; +export { generateDatabaseCredential } from "./credentials"; +export { createLakebasePool } from "./pool"; +export { + getLakebaseOrmConfig, + getLakebasePgConfig, +} from "./pool-config"; +export type { DriverTelemetry } from "./telemetry"; +export type { TokenRefreshDeps } from "./token-refresh"; +export { createTokenRefreshCallback } from "./token-refresh"; +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + LakebasePoolConfig, + Logger, + LoggerConfig, + RequestedClaims, + RequestedResource, +} from "./types"; +export { RequestedClaimsPermissionSet } from "./types"; diff --git a/packages/lakebase/src/logger.ts b/packages/lakebase/src/logger.ts new file mode 100644 index 00000000..b6d80ce5 --- /dev/null +++ b/packages/lakebase/src/logger.ts @@ -0,0 +1,72 @@ +import type pg from "pg"; +import type { Logger, LoggerConfig } from "./types"; + +const LOGGER_METHODS = ["debug", "info", "warn", "error"] as const; + +/** + * Check if the provided value is a Logger instance + */ +function isLogger(value: unknown): value is Logger { + if (typeof value !== "object" || value === null) { + return false; + } + + return LOGGER_METHODS.every( + (method) => + method in value && + typeof (value as Record)[method] === "function", + ); +} + +/** + * Create a console-based logger from configuration + */ +function createConsoleLogger(config: LoggerConfig): Logger { + const noop = () => {}; + + return { + debug: config.debug ? console.debug.bind(console) : noop, + info: config.info ? console.info.bind(console) : noop, + warn: config.warn ? console.warn.bind(console) : noop, + error: config.error ? console.error.bind(console) : noop, + }; +} + +/** + * Resolve logger configuration to a Logger instance + * + * - If Logger instance provided, return as-is + * - If LoggerConfig provided, create console-based logger + * - If undefined, create error-only logger (default) + */ +export function resolveLogger(loggerConfig?: Logger | LoggerConfig): Logger { + // Already a Logger instance - use as-is + if (isLogger(loggerConfig)) { + return loggerConfig; + } + + // LoggerConfig provided - create console logger + if (loggerConfig && typeof loggerConfig === "object") { + return createConsoleLogger(loggerConfig); + } + + // Default: error-only logging + return createConsoleLogger({ error: true }); +} + +/** + * Attach error logging to the pool. + * This has no OpenTelemetry dependency and should always be called. + * + * @param pool - PostgreSQL connection pool + * @param logger - Logger for error logging + */ +export function attachPoolErrorLogging(pool: pg.Pool, logger: Logger): void { + pool.on("error", (error: Error & { code?: string }) => { + logger.error( + "Connection pool error: %s (code: %s)", + error.message, + error.code, + ); + }); +} diff --git a/packages/lakebase/src/pool-config.ts b/packages/lakebase/src/pool-config.ts new file mode 100644 index 00000000..7ff2ce72 --- /dev/null +++ b/packages/lakebase/src/pool-config.ts @@ -0,0 +1,127 @@ +import type pg from "pg"; +import { getUsernameSync, parsePoolConfig } from "./config"; +import type { DriverTelemetry } from "./telemetry"; +import { createNoopTelemetry } from "./telemetry-noop"; +import { createTokenRefreshCallback } from "./token-refresh"; +import type { LakebasePoolConfig, Logger } from "./types"; + +/** + * Map an SSL mode string to the corresponding `pg` SSL configuration. + * + * - `"require"` -- SSL enabled with certificate verification + * - `"prefer"` -- SSL enabled without certificate verification (try SSL, accept any cert) + * - `"disable"` -- SSL disabled + */ +function mapSslConfig( + sslMode: "require" | "prefer" | "disable", +): pg.PoolConfig["ssl"] { + switch (sslMode) { + case "require": + return { rejectUnauthorized: true }; + case "prefer": + return { rejectUnauthorized: false }; + case "disable": + return false; + } +} + +/** + * Get Lakebase connection configuration for PostgreSQL clients. + * + * Returns pg.PoolConfig with OAuth token authentication configured. + * Best used with pg.Pool directly or ORMs that accept pg.Pool instances (like Drizzle). + * + * For ORMs that need connection parameters (TypeORM, Sequelize), use getLakebaseOrmConfig() instead. + * + * Used internally by createLakebasePool(). + * + * @param config - Optional configuration (reads from environment if not provided) + * @param telemetry - Optional pre-initialized telemetry (created internally if not provided) + * @param logger - Optional logger (silent if not provided) + * @returns PostgreSQL pool configuration with OAuth token refresh + */ +export function getLakebasePgConfig( + config?: Partial, + telemetry?: DriverTelemetry, + logger?: Logger, +): pg.PoolConfig { + const userConfig = config ?? {}; + const poolConfig = parsePoolConfig(userConfig); + const username = getUsernameSync(userConfig); + + let passwordConfig: string | (() => string | Promise) | undefined; + + if (userConfig.password !== undefined) { + passwordConfig = userConfig.password; + } else if (poolConfig.endpoint) { + passwordConfig = createTokenRefreshCallback({ + userConfig, + endpoint: poolConfig.endpoint, + telemetry: telemetry ?? createNoopTelemetry(), + logger, + }); + } + + return { + host: poolConfig.host, + port: poolConfig.port, + user: username, + database: poolConfig.database, + password: passwordConfig, + ssl: poolConfig.ssl ?? mapSslConfig(poolConfig.sslMode), + max: poolConfig.max, + idleTimeoutMillis: poolConfig.idleTimeoutMillis, + connectionTimeoutMillis: poolConfig.connectionTimeoutMillis, + }; +} + +/** + * Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. + * + * Designed for ORMs like TypeORM and Sequelize that need connection parameters + * rather than a pre-configured pool instance. + * + * Returns connection config with field names compatible with common ORMs: + * - `username` instead of `user` + * - Simplified SSL config + * - Password callback support for OAuth token refresh + * + * @param config - Optional configuration (reads from environment if not provided) + * @returns ORM-compatible connection configuration + * + * @example + * ```typescript + * // TypeORM + * const dataSource = new DataSource({ + * type: 'postgres', + * ...getLakebaseOrmConfig(), + * entities: [User], + * synchronize: true, + * }); + * + * // Sequelize + * const sequelize = new Sequelize({ + * dialect: 'postgres', + * ...getLakebaseOrmConfig(), + * logging: false, + * }); + * ``` + */ +export function getLakebaseOrmConfig(config?: Partial) { + const { user, password, ssl, ...pgConfig } = getLakebasePgConfig(config); + + return { + ...pgConfig, + username: user, + password: password as + | string + | (() => string) + | (() => Promise) + | undefined, + ssl: ssl + ? typeof ssl === "boolean" + ? ssl + : { rejectUnauthorized: ssl.rejectUnauthorized } + : false, + }; +} diff --git a/packages/lakebase/src/pool.ts b/packages/lakebase/src/pool.ts new file mode 100644 index 00000000..085719f8 --- /dev/null +++ b/packages/lakebase/src/pool.ts @@ -0,0 +1,87 @@ +import pg from "pg"; +import { attachPoolErrorLogging, resolveLogger } from "./logger"; +import { getLakebasePgConfig } from "./pool-config"; +import { attachPoolMetrics, initTelemetry, wrapPoolQuery } from "./telemetry"; +import { createNoopTelemetry } from "./telemetry-noop"; +import type { LakebasePoolConfig } from "./types"; + +/** + * Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. + * + * This function returns a standard `pg.Pool` instance configured with a password callback + * that automatically fetches and caches OAuth tokens from Databricks. The returned pool + * works with any ORM or library that accepts a `pg.Pool` (Drizzle, Prisma, TypeORM, etc.). + * + * @param config - Configuration options (optional, reads from environment if not provided) + * @returns Standard pg.Pool instance with OAuth token refresh + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + * + * @example Using environment variables + * ```typescript + * // Set: PGHOST, PGDATABASE, LAKEBASE_ENDPOINT + * const pool = createLakebasePool(); + * const result = await pool.query('SELECT * FROM users'); + * ``` + * + * @example With explicit configuration + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * // Note: Use actual IDs from Databricks (project-id is a UUID) + * const pool = createLakebasePool({ + * endpoint: 'projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0', + * host: 'ep-abc.databricks.com', + * database: 'databricks_postgres', + * user: 'service-principal-id' + * }); + * ``` + * + * @example With Drizzle ORM + * ```typescript + * import { drizzle } from 'drizzle-orm/node-postgres'; + * const pool = createLakebasePool(); + * const db = drizzle({ client: pool }); + * ``` + * + * @example With Prisma + * ```typescript + * import { PrismaPg } from '@prisma/adapter-pg'; + * const pool = createLakebasePool(); + * const adapter = new PrismaPg(pool); + * const prisma = new PrismaClient({ adapter }); + * ``` + */ +export function createLakebasePool( + config?: Partial, +): pg.Pool { + const userConfig = config ?? {}; + const logger = resolveLogger(userConfig.logger); + + // Start with noop telemetry for token refresh + const telemetry = createNoopTelemetry(); + + const poolConfig = getLakebasePgConfig(userConfig, telemetry, logger); + const pool = new pg.Pool(poolConfig); + + initTelemetry() + .then((t) => { + // Mutate telemetry object in place (token refresh callback has this reference) + Object.assign(telemetry, t); + attachPoolMetrics(pool, telemetry); + wrapPoolQuery(pool, telemetry); + }) + .catch((err) => { + logger.error("Failed to initialize telemetry:", err); + }); + + attachPoolErrorLogging(pool, logger); + + logger?.debug( + "Created Lakebase connection pool for %s@%s/%s", + poolConfig.user, + poolConfig.host, + poolConfig.database, + ); + + return pool; +} diff --git a/packages/lakebase/src/telemetry-noop.ts b/packages/lakebase/src/telemetry-noop.ts new file mode 100644 index 00000000..6d5fc26c --- /dev/null +++ b/packages/lakebase/src/telemetry-noop.ts @@ -0,0 +1,101 @@ +import type { + Counter, + Histogram, + Meter, + ObservableResult, + Span, + Tracer, +} from "@opentelemetry/api"; +import type { DriverTelemetry } from "./telemetry"; + +// Re-export types for convenience +export type { Counter, Histogram, Meter, ObservableResult, Span, Tracer }; + +/** + * Invalid span context used for noop spans. + * Matches OpenTelemetry's INVALID_SPAN_CONTEXT format. + */ +export const INVALID_SPAN_CONTEXT = { + traceId: "00000000000000000000000000000000", + spanId: "0000000000000000", + traceFlags: 0, + isRemote: false, +}; + +/** + * Creates a universal noop proxy that handles all telemetry operations. + * + * This single proxy function handles spans, tracers, meters, histograms, + * counters, and observable gauges with intelligent behavior for special cases: + * + * Special behaviors: + * - `isRecording()` → returns `false` (not proxy) + * - `spanContext()` → returns `INVALID_SPAN_CONTEXT` object + * - `startActiveSpan()` → executes callback with noop span and returns result (critical for control flow) + * - Everything else → returns proxy for chaining + * + * @returns A self-referential proxy that handles all telemetry operations as no-ops + * + * @example + * ```typescript + * const noop = createNoopProxy(); + * const span = noop.startSpan("operation"); // Returns noop + * span.setAttribute("key", "value"); // Returns noop (chainable) + * span.isRecording(); // Returns false + * span.end(); // Returns noop + * ``` + */ +export function createNoopProxy(): any { + let proxy: any; + + const noop = () => proxy; + + proxy = new Proxy(noop, { + get(_target, prop) { + // Span + if (prop === "isRecording") return () => false; + if (prop === "spanContext") return () => INVALID_SPAN_CONTEXT; + + // Tracer + if (prop === "startActiveSpan") { + // Critical: Execute callback with noop span and return its result + // This ensures code like `tracer.startActiveSpan('name', span => doWork())` works correctly + return (_name: string, ...args: any[]) => { + const fn = args[args.length - 1]; + return typeof fn === "function" ? fn(proxy) : undefined; + }; + } + + // Everything else returns the proxy for chaining + return proxy; + }, + apply() { + return proxy; + }, + construct() { + return proxy; + }, + }); + + return proxy; +} + +/** + * Creates a noop telemetry object for use before OpenTelemetry is initialized. + * + * Returns a DriverTelemetry object with all properties set to noop proxies. + * This allows telemetry-dependent code (like token refresh) to work immediately + * while telemetry initialization happens in the background. + * + * @returns A DriverTelemetry object with noop implementations + */ +export function createNoopTelemetry(): DriverTelemetry { + const noop = createNoopProxy(); + return { + tracer: noop, + meter: noop, + tokenRefreshDuration: noop, + queryDuration: noop, + poolErrors: noop, + } as unknown as DriverTelemetry; +} diff --git a/packages/lakebase/src/telemetry.ts b/packages/lakebase/src/telemetry.ts new file mode 100644 index 00000000..425406f0 --- /dev/null +++ b/packages/lakebase/src/telemetry.ts @@ -0,0 +1,211 @@ +import type pg from "pg"; +import { + type Counter, + createNoopTelemetry, + type Histogram, + type Meter, + type ObservableResult, + type Span, + type Tracer, +} from "./telemetry-noop"; + +export type { Span, Tracer }; + +// Three states: undefined = not loaded, null = load failed, object = loaded +let otelApi: typeof import("@opentelemetry/api") | null | undefined; +let loadPromise: Promise | null = null; + +/** + * Ensures OpenTelemetry API is loaded (or load attempt has been made). + * Safe to call multiple times - will only attempt load once. + */ +async function ensureOtelLoaded(): Promise { + if (otelApi !== undefined) return; // Already loaded or failed + + if (!loadPromise) { + loadPromise = (async () => { + try { + otelApi = await import("@opentelemetry/api"); + } catch { + otelApi = null; // Mark as failed + } + })(); + } + + await loadPromise; +} + +/** + * Check if OpenTelemetry API is available. + * Returns false if not yet loaded or if load failed. + */ +export function isOtelAvailable(): boolean { + return otelApi !== null && otelApi !== undefined; +} + +/** Telemetry instruments for the driver */ +export interface DriverTelemetry { + tracer: Tracer; + meter: Meter; + tokenRefreshDuration: Histogram; + queryDuration: Histogram; + poolErrors: Counter; +} + +/** + * Initialize telemetry using OpenTelemetry's global registry. + * If OpenTelemetry is not installed, returns noop implementations. + */ +export async function initTelemetry(): Promise { + await ensureOtelLoaded(); + + if (!otelApi) { + return createNoopTelemetry(); + } + + const tracer = otelApi.trace.getTracer("@databricks/lakebase"); + const meter = otelApi.metrics.getMeter("@databricks/lakebase"); + + return { + tracer, + meter, + tokenRefreshDuration: meter.createHistogram( + "lakebase.token.refresh.duration", + { + description: "Duration of OAuth token refresh operations", + unit: "ms", + }, + ), + queryDuration: meter.createHistogram("lakebase.query.duration", { + description: "Duration of queries executed via pool.query", + unit: "ms", + }), + poolErrors: meter.createCounter("lakebase.pool.errors", { + description: "Connection pool errors by error code", + unit: "1", + }), + }; +} + +/** + * Wraps pool.query with telemetry tracing if OpenTelemetry is available. + * If OpenTelemetry is not available, does nothing (no overhead). + * + * @param pool - PostgreSQL connection pool + * @param telemetry - Telemetry instruments + */ +export function wrapPoolQuery(pool: pg.Pool, telemetry: DriverTelemetry): void { + if (!otelApi) { + return; + } + + const { SpanKind, SpanStatusCode } = otelApi; + const origQuery = pool.query.bind(pool); + const tracer = telemetry.tracer; + + pool.query = function queryWithTelemetry( + ...args: unknown[] + ): ReturnType { + const firstArg = args[0]; + const sql = + typeof firstArg === "string" + ? firstArg + : (firstArg as { text?: string } | undefined)?.text; + const metricAttrs = { + "db.statement": sql ? sql.substring(0, 100) : "unknown", + }; + + return tracer.startActiveSpan( + "lakebase.query", + { + kind: SpanKind.CLIENT, + attributes: { + "db.system": "lakebase", + "db.statement": sql ? sql.substring(0, 500) : "unknown", + }, + }, + (span: Span) => { + const start = Date.now(); + + const result = ( + origQuery as (...a: unknown[]) => Promise | undefined + )(...args); + + // Promise-based query: record duration and end span on completion + if (result && typeof result.then === "function") { + return (result as Promise<{ rowCount?: number | null }>) + .then( + (res) => { + span.setAttribute("db.rows_affected", res?.rowCount ?? 0); + span.setStatus({ code: SpanStatusCode.OK }); + return res; + }, + (err: Error) => { + span.recordException(err); + span.setStatus({ code: SpanStatusCode.ERROR }); + throw err; + }, + ) + .finally(() => { + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + }) as unknown as ReturnType; + } + + // Callback-based query (void return): duration is approximate + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + return result as ReturnType; + }, + ) as ReturnType; + } as typeof pool.query; +} + +/** + * Attach pool-level metrics collection using OpenTelemetry. + * Returns early if OpenTelemetry is not available (zero overhead). + * + * Uses observable gauges (pull model) for pool connection stats. + * + * @param pool - PostgreSQL connection pool + * @param telemetry - Telemetry instruments + */ +export function attachPoolMetrics( + pool: pg.Pool, + telemetry: DriverTelemetry, +): void { + if (!otelApi) { + return; + } + + const meter = telemetry.meter; + + const poolTotal = meter.createObservableGauge( + "lakebase.pool.connections.total", + { description: "Total connections in the pool" }, + ); + const poolIdle = meter.createObservableGauge( + "lakebase.pool.connections.idle", + { description: "Idle connections in the pool" }, + ); + const poolWaiting = meter.createObservableGauge( + "lakebase.pool.connections.waiting", + { description: "Clients waiting for a connection" }, + ); + + poolTotal.addCallback((result: ObservableResult) => + result.observe(pool.totalCount), + ); + poolIdle.addCallback((result: ObservableResult) => + result.observe(pool.idleCount), + ); + poolWaiting.addCallback((result: ObservableResult) => + result.observe(pool.waitingCount), + ); + + pool.on("error", (error: Error & { code?: string }) => { + telemetry.poolErrors.add(1, { + "error.code": error.code ?? "unknown", + }); + }); +} diff --git a/packages/lakebase/src/token-refresh.ts b/packages/lakebase/src/token-refresh.ts new file mode 100644 index 00000000..4e0cd292 --- /dev/null +++ b/packages/lakebase/src/token-refresh.ts @@ -0,0 +1,114 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import { getWorkspaceClient } from "./config"; +import { generateDatabaseCredential } from "./credentials"; +import type { DriverTelemetry, Span } from "./telemetry"; +import type { LakebasePoolConfig, Logger } from "./types"; + +// 2-minute buffer before token expiration to prevent race conditions +// Lakebase tokens expire after 1 hour, so we refresh when ~58 minutes remain +const CACHE_BUFFER_MS = 2 * 60 * 1000; + +export interface TokenRefreshDeps { + userConfig: Partial; + endpoint: string; + telemetry: DriverTelemetry; + logger?: Logger; +} + +/** Fetch a fresh OAuth token from Databricks */ +async function refreshToken( + workspaceClient: WorkspaceClient, + endpoint: string, +): Promise<{ token: string; expiresAt: number }> { + const credential = await generateDatabaseCredential(workspaceClient, { + endpoint, + }); + + return { + token: credential.token, + expiresAt: new Date(credential.expire_time).getTime(), + }; +} + +/** + * Build the password callback with token caching, deduplication, and telemetry. + * + * The returned async function is called by `pg.Pool` each time a new connection + * is established. It caches OAuth tokens and deduplicates concurrent refresh + * requests so only one API call is made even under parallel connection creation. + */ +export function createTokenRefreshCallback( + deps: TokenRefreshDeps, +): () => Promise { + let cachedToken: string | undefined; + let tokenExpiresAt = 0; + let workspaceClient: WorkspaceClient | null = null; + let refreshPromise: Promise | null = null; + + return async (): Promise => { + // Lazily initialize workspace client on first password fetch + if (!workspaceClient) { + try { + workspaceClient = await getWorkspaceClient(deps.userConfig); + } catch (error) { + deps.logger?.error("Failed to initialize workspace client: %O", error); + throw error; + } + } + + const now = Date.now(); + const hasValidToken = cachedToken && now < tokenExpiresAt - CACHE_BUFFER_MS; + if (hasValidToken) { + // Return cached token if still valid (with buffer) + const expiresIn = Math.round((tokenExpiresAt - now) / 1000 / 60); + deps.logger?.debug( + "Using cached OAuth token (expires in %d minutes at %s)", + expiresIn, + new Date(tokenExpiresAt).toISOString(), + ); + return cachedToken as string; + } + + const client = workspaceClient; + + // Deduplicate concurrent refresh requests + if (!refreshPromise) { + refreshPromise = (async () => { + const startTime = Date.now(); + try { + const result = await deps.telemetry.tracer.startActiveSpan( + "lakebase.token.refresh", + { + attributes: { "lakebase.endpoint": deps.endpoint }, + }, + async (span: Span) => { + const tokenResult = await refreshToken(client, deps.endpoint); + span.setAttribute( + "lakebase.token.expires_at", + new Date(tokenResult.expiresAt).toISOString(), + ); + span.end(); + return tokenResult; + }, + ); + + cachedToken = result.token; + tokenExpiresAt = result.expiresAt; + return result.token; + } catch (error) { + deps.logger?.error("Failed to fetch OAuth token: %O", { + error, + message: error instanceof Error ? error.message : String(error), + endpoint: deps.endpoint, + }); + throw error; + } finally { + deps.telemetry.tokenRefreshDuration.record(Date.now() - startTime); + refreshPromise = null; + } + })(); + } + + return refreshPromise as Promise; + }; +} diff --git a/packages/lakebase/src/types.ts b/packages/lakebase/src/types.ts new file mode 100644 index 00000000..0e930397 --- /dev/null +++ b/packages/lakebase/src/types.ts @@ -0,0 +1,217 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import type { PoolConfig } from "pg"; + +/** + * Optional logger interface for the Lakebase driver. + * When not provided, the driver operates silently (no logging). + */ +export interface Logger { + debug(message: string, ...args: unknown[]): void; + info(message: string, ...args: unknown[]): void; + warn(message: string, ...args: unknown[]): void; + error(message: string, ...args: unknown[]): void; +} + +/** + * Configuration for console-based logger. + * Specify which log levels should be enabled. + */ +export interface LoggerConfig { + /** Enable debug level logging */ + debug?: boolean; + /** Enable info level logging */ + info?: boolean; + /** Enable warning level logging */ + warn?: boolean; + /** Enable error level logging */ + error?: boolean; +} + +/** + * Telemetry configuration options + */ +export type TelemetryOptions = + | boolean + | { + traces?: boolean; + metrics?: boolean; + }; + +/** + * Configuration for creating a Lakebase connection pool + * + * Supports two authentication methods: + * 1. OAuth token authentication - Provide workspaceClient + endpoint (automatic token rotation) + * 2. Native Postgres password authentication - Provide password string or function + * + * Extends pg.PoolConfig to support all standard PostgreSQL pool options. + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + */ +export interface LakebasePoolConfig extends PoolConfig { + /** + * Databricks workspace client for OAuth authentication + * If not provided along with endpoint, will attempt to use ServiceContext + * + * Note: If password is provided, OAuth auth is not used + */ + workspaceClient?: WorkspaceClient; + + /** + * Endpoint resource path for OAuth token generation. + * + * All segments are IDs assigned by Databricks (not names you create): + * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) + * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) + * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + * + * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + * + * Required for OAuth authentication (unless password is provided) + * Can also be set via LAKEBASE_ENDPOINT environment variable + * + * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + */ + endpoint?: string; + + /** + * SSL mode for the connection (convenience helper) + * Can also be set via PGSSLMODE environment variable + * + * @default "require" + */ + sslMode?: "require" | "disable" | "prefer"; + + /** + * Telemetry configuration + * + * - `true` or omitted: enable all telemetry (traces, metrics) -- no-op when OTEL is not configured + * - `false`: disable all telemetry + * - `{ traces?, metrics? }`: fine-grained control + */ + telemetry?: TelemetryOptions; + + /** + * Optional logger configuration. + * + * Supports three modes: + * 1. Logger instance - Use your own logger implementation + * 2. LoggerConfig - Enable/disable specific log levels (uses console) + * 3. Undefined - Defaults to error logging only + * + * @example Using custom logger + * ```typescript + * import { createLogger } from '@databricks/appkit'; + * const pool = createLakebasePool({ + * logger: createLogger('connectors:lakebase') + * }); + * ``` + * + * @example Using config-based logger + * ```typescript + * const pool = createLakebasePool({ + * logger: { debug: true, info: true, error: true } + * }); + * ``` + */ + logger?: Logger | LoggerConfig; +} + +// --------------------------------------------------------------------------- +// Authentication types for Lakebase Postgres OAuth token generation +// @see https://docs.databricks.com/aws/en/oltp/projects/authentication +// --------------------------------------------------------------------------- + +/** + * Database credentials with OAuth token for Postgres connection + */ +export interface DatabaseCredential { + /** OAuth token to use as the password when connecting to Postgres */ + token: string; + + /** + * Token expiration time in UTC (ISO 8601 format) + * Tokens expire after 1 hour from generation + * @example "2026-02-06T17:07:00Z" + */ + expire_time: string; +} + +/** + * Permission set for Unity Catalog table access + */ +export enum RequestedClaimsPermissionSet { + /** + * Read-only access to specified UC tables + */ + READ_ONLY = "READ_ONLY", +} + +/** + * Resource to request permissions for in Unity Catalog + */ +export interface RequestedResource { + /** + * Unity Catalog table name to request access to + * @example "catalog.schema.table" + */ + table_name?: string; + + /** + * Generic resource name for non-table resources + */ + unspecified_resource_name?: string; +} + +/** + * Optional claims for fine-grained Unity Catalog table permissions + * When specified, the returned token will be scoped to only the requested tables + */ +export interface RequestedClaims { + /** + * Permission level to request + */ + permission_set?: RequestedClaimsPermissionSet; + + /** + * List of UC resources to request access to + */ + resources?: RequestedResource[]; +} + +/** + * Request parameters for generating database OAuth credentials + */ +export interface GenerateDatabaseCredentialRequest { + /** + * Endpoint resource path with IDs assigned by Databricks. + * + * All segments are IDs from Databricks (not names you create): + * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) + * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) + * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + * + * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + * + * **Important:** Copy from Databricks Lakebase UI - do not construct manually. + * + * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + */ + endpoint: string; + + /** + * Optional claims for fine-grained UC table permissions. + * When specified, the token will only grant access to the specified tables. + * + * @example + * ```typescript + * { + * claims: [{ + * permission_set: RequestedClaimsPermissionSet.READ_ONLY, + * resources: [{ table_name: "catalog.schema.users" }] + * }] + * } + * ``` + */ + claims?: RequestedClaims[]; +} diff --git a/packages/lakebase/tsconfig.json b/packages/lakebase/tsconfig.json new file mode 100644 index 00000000..4a6e68b3 --- /dev/null +++ b/packages/lakebase/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/lakebase/tsdown.config.ts b/packages/lakebase/tsdown.config.ts new file mode 100644 index 00000000..293e2fc3 --- /dev/null +++ b/packages/lakebase/tsdown.config.ts @@ -0,0 +1,28 @@ +import { defineConfig } from "tsdown"; + +export default defineConfig([ + { + publint: true, + name: "@databricks/lakebase", + entry: "src/index.ts", + outDir: "dist", + hash: false, + format: "esm", + platform: "node", + minify: false, + dts: { + resolve: true, + }, + sourcemap: false, + clean: false, + unbundle: true, + noExternal: [], + external: (id) => { + // Bundle all internal modules + if (id.startsWith("@/")) return false; + // Externalize all npm packages + return /^[^./]/.test(id) || id.includes("/node_modules/"); + }, + tsconfig: "./tsconfig.json", + }, +]); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4b598b89..f1c9010b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -48,8 +48,8 @@ importers: specifier: ^15.5.1 version: 15.5.2 pg: - specifier: ^8.16.3 - version: 8.16.3 + specifier: ^8.18.0 + version: 8.18.0 plop: specifier: ^4.0.4 version: 4.0.4(@types/node@24.7.2) @@ -236,9 +236,12 @@ importers: packages/appkit: dependencies: + '@databricks/lakebase': + specifier: workspace:* + version: link:../lakebase '@databricks/sdk-experimental': - specifier: ^0.15.0 - version: 0.15.0 + specifier: ^0.16.0 + version: 0.16.0 '@opentelemetry/api': specifier: ^1.9.0 version: 1.9.0 @@ -294,8 +297,8 @@ importers: specifier: ^2.1.1 version: 2.1.1 pg: - specifier: ^8.16.3 - version: 8.16.3 + specifier: ^8.18.0 + version: 8.18.0 semver: specifier: ^7.7.3 version: 7.7.3 @@ -319,8 +322,8 @@ importers: specifier: ^7.0.15 version: 7.0.15 '@types/pg': - specifier: ^8.15.6 - version: 8.15.6 + specifier: ^8.16.0 + version: 8.16.0 '@types/ws': specifier: ^8.18.1 version: 8.18.1 @@ -494,6 +497,22 @@ importers: specifier: ^1.4.0 version: 1.4.0 + packages/lakebase: + dependencies: + '@databricks/sdk-experimental': + specifier: ^0.16.0 + version: 0.16.0 + pg: + specifier: ^8.18.0 + version: 8.18.0 + devDependencies: + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 + '@types/pg': + specifier: ^8.16.0 + version: 8.16.0 + packages/shared: dependencies: '@ast-grep/napi': @@ -1761,8 +1780,8 @@ packages: peerDependencies: postcss: ^8.4 - '@databricks/sdk-experimental@0.15.0': - resolution: {integrity: sha512-HkoMiF7dNDt6WRW0xhi7oPlBJQfxJ9suJhEZRFt08VwLMaWcw2PiF8monfHlkD4lkufEYV6CTxi5njQkciqiHA==} + '@databricks/sdk-experimental@0.16.0': + resolution: {integrity: sha512-9c2RxWYoRDFupdt4ZnBc1IPE1XaXgN+/wyV4DVcEqOnIa31ep51OnwAD/3014BImfKdyXg32nmgrB9dwvB6+lg==} engines: {node: '>=22.0', npm: '>=10.0.0'} '@date-fns/tz@1.4.1': @@ -4588,6 +4607,9 @@ packages: '@types/pg@8.15.6': resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==} + '@types/pg@8.16.0': + resolution: {integrity: sha512-RmhMd/wD+CF8Dfo+cVIy3RR5cl8CyfXQ0tGgW6XBL8L4LM/UTEbNXYRbLwU6w+CgrKBNbrQWt4FUtTfaU5jSYQ==} + '@types/picomatch@4.0.2': resolution: {integrity: sha512-qHHxQ+P9PysNEGbALT8f8YOSHW0KJu6l2xU8DYY0fu/EmGxXdVnuTLvFUvBgPJMSqXq29SYHveejeAha+4AYgA==} @@ -8739,30 +8761,33 @@ packages: perfect-debounce@2.0.0: resolution: {integrity: sha512-fkEH/OBiKrqqI/yIgjR92lMfs2K8105zt/VT6+7eTjNwisrsh47CeIED9z58zI7DfKdH3uHAn25ziRZn3kgAow==} - pg-cloudflare@1.2.7: - resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} + pg-cloudflare@1.3.0: + resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} - pg-connection-string@2.9.1: - resolution: {integrity: sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==} + pg-connection-string@2.11.0: + resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} pg-int8@1.0.1: resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} engines: {node: '>=4.0.0'} - pg-pool@3.10.1: - resolution: {integrity: sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==} + pg-pool@3.11.0: + resolution: {integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w==} peerDependencies: pg: '>=8.0' pg-protocol@1.10.3: resolution: {integrity: sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==} + pg-protocol@1.11.0: + resolution: {integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g==} + pg-types@2.2.0: resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} engines: {node: '>=4'} - pg@8.16.3: - resolution: {integrity: sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==} + pg@8.18.0: + resolution: {integrity: sha512-xqrUDL1b9MbkydY/s+VZ6v+xiMUmOUk7SS9d/1kpyQxoJ6U9AO1oIJyUWVZojbfe5Cc/oluutcgFG4L9RDP1iQ==} engines: {node: '>= 16.0.0'} peerDependencies: pg-native: '>=3.0.1' @@ -12654,7 +12679,7 @@ snapshots: dependencies: postcss: 8.5.6 - '@databricks/sdk-experimental@0.15.0': + '@databricks/sdk-experimental@0.16.0': dependencies: google-auth-library: 10.5.0 ini: 6.0.0 @@ -16249,7 +16274,7 @@ snapshots: '@types/pg-pool@2.0.6': dependencies: - '@types/pg': 8.15.6 + '@types/pg': 8.16.0 '@types/pg@8.15.6': dependencies: @@ -16257,6 +16282,12 @@ snapshots: pg-protocol: 1.10.3 pg-types: 2.2.0 + '@types/pg@8.16.0': + dependencies: + '@types/node': 24.10.1 + pg-protocol: 1.10.3 + pg-types: 2.2.0 + '@types/picomatch@4.0.2': {} '@types/prismjs@1.26.5': {} @@ -21198,19 +21229,21 @@ snapshots: perfect-debounce@2.0.0: {} - pg-cloudflare@1.2.7: + pg-cloudflare@1.3.0: optional: true - pg-connection-string@2.9.1: {} + pg-connection-string@2.11.0: {} pg-int8@1.0.1: {} - pg-pool@3.10.1(pg@8.16.3): + pg-pool@3.11.0(pg@8.18.0): dependencies: - pg: 8.16.3 + pg: 8.18.0 pg-protocol@1.10.3: {} + pg-protocol@1.11.0: {} + pg-types@2.2.0: dependencies: pg-int8: 1.0.1 @@ -21219,15 +21252,15 @@ snapshots: postgres-date: 1.0.7 postgres-interval: 1.2.0 - pg@8.16.3: + pg@8.18.0: dependencies: - pg-connection-string: 2.9.1 - pg-pool: 3.10.1(pg@8.16.3) - pg-protocol: 1.10.3 + pg-connection-string: 2.11.0 + pg-pool: 3.11.0(pg@8.18.0) + pg-protocol: 1.11.0 pg-types: 2.2.0 pgpass: 1.0.5 optionalDependencies: - pg-cloudflare: 1.2.7 + pg-cloudflare: 1.3.0 pgpass@1.0.5: dependencies: diff --git a/tools/license-utils.ts b/tools/license-utils.ts index f8a7e479..43247f05 100644 --- a/tools/license-utils.ts +++ b/tools/license-utils.ts @@ -18,6 +18,7 @@ type PackageJson = { export const PUBLISHED_PACKAGES = [ "packages/appkit", "packages/appkit-ui", + "packages/lakebase", "packages/shared", ]; diff --git a/vitest.config.ts b/vitest.config.ts index da6ca040..8c2893b0 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -42,6 +42,14 @@ export default defineConfig({ environment: "node", }, }, + { + plugins: [tsconfigPaths()], + test: { + name: "lakebase", + root: "./packages/lakebase", + environment: "node", + }, + }, { plugins: [tsconfigPaths()], test: {