From 093037bf5596269aa64bcbe35526e031630b21da Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 10 Feb 2026 19:29:01 +0100 Subject: [PATCH 01/12] feat(appkit): introduce Lakebase Autoscaling driver # Conflicts: # docs/docs/api/appkit/index.md # docs/docs/api/appkit/typedoc-sidebar.ts # packages/appkit/package.json --- CLAUDE.md | 33 + ...numeration.RequestedClaimsPermissionSet.md | 13 + .../api/appkit/Function.createLakebasePool.md | 59 ++ .../Function.generateDatabaseCredential.md | 55 ++ .../appkit/Function.getLakebaseOrmConfig.md | 84 +++ .../appkit/Function.getLakebasePgConfig.md | 27 + .../appkit/Function.getLakebasePoolConfig.md | 19 + .../api/appkit/Function.getWorkspaceClient.md | 17 + .../appkit/Interface.DatabaseCredential.md | 30 + ...rface.GenerateDatabaseCredentialRequest.md | 50 ++ .../appkit/Interface.LakebasePoolConfig.md | 87 +++ .../api/appkit/Interface.RequestedClaims.md | 24 + .../api/appkit/Interface.RequestedResource.md | 29 + docs/docs/api/appkit/index.md | 25 +- docs/docs/api/appkit/typedoc-sidebar.ts | 78 +- package.json | 2 +- packages/appkit/package.json | 6 +- packages/appkit/src/cache/index.ts | 11 +- .../appkit/src/cache/storage/persistent.ts | 51 +- .../src/cache/tests/cache-manager.test.ts | 52 +- .../appkit/src/cache/tests/persistent.test.ts | 123 ++- packages/appkit/src/connectors/index.ts | 1 + .../src/connectors/lakebase/auth-types.ts | 98 +++ .../appkit/src/connectors/lakebase/config.ts | 146 ++++ .../src/connectors/lakebase/defaults.ts | 8 + .../appkit/src/connectors/lakebase/index.ts | 16 + .../src/connectors/lakebase/pool-config.ts | 115 +++ .../appkit/src/connectors/lakebase/pool.ts | 113 +++ .../src/connectors/lakebase/telemetry.ts | 91 +++ .../src/connectors/lakebase/token-refresh.ts | 127 ++++ .../appkit/src/connectors/lakebase/types.ts | 58 ++ .../appkit/src/connectors/lakebase/utils.ts | 147 ++++ .../connectors/tests/lakebase-auth.test.ts | 187 +++++ .../connectors/tests/lakebase-pool.test.ts | 712 ++++++++++++++++++ packages/appkit/src/index.ts | 17 + pnpm-lock.yaml | 72 +- 36 files changed, 2590 insertions(+), 193 deletions(-) create mode 100644 docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md create mode 100644 docs/docs/api/appkit/Function.createLakebasePool.md create mode 100644 docs/docs/api/appkit/Function.generateDatabaseCredential.md create mode 100644 docs/docs/api/appkit/Function.getLakebaseOrmConfig.md create mode 100644 docs/docs/api/appkit/Function.getLakebasePgConfig.md create mode 100644 docs/docs/api/appkit/Function.getLakebasePoolConfig.md create mode 100644 docs/docs/api/appkit/Function.getWorkspaceClient.md create mode 100644 docs/docs/api/appkit/Interface.DatabaseCredential.md create mode 100644 docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md create mode 100644 docs/docs/api/appkit/Interface.LakebasePoolConfig.md create mode 100644 docs/docs/api/appkit/Interface.RequestedClaims.md create mode 100644 docs/docs/api/appkit/Interface.RequestedResource.md create mode 100644 packages/appkit/src/connectors/lakebase/auth-types.ts create mode 100644 packages/appkit/src/connectors/lakebase/config.ts create mode 100644 packages/appkit/src/connectors/lakebase/defaults.ts create mode 100644 packages/appkit/src/connectors/lakebase/index.ts create mode 100644 packages/appkit/src/connectors/lakebase/pool-config.ts create mode 100644 packages/appkit/src/connectors/lakebase/pool.ts create mode 100644 packages/appkit/src/connectors/lakebase/telemetry.ts create mode 100644 packages/appkit/src/connectors/lakebase/token-refresh.ts create mode 100644 packages/appkit/src/connectors/lakebase/types.ts create mode 100644 packages/appkit/src/connectors/lakebase/utils.ts create mode 100644 packages/appkit/src/connectors/tests/lakebase-auth.test.ts create mode 100644 packages/appkit/src/connectors/tests/lakebase-pool.test.ts diff --git a/CLAUDE.md b/CLAUDE.md index 1f380700..9a17f870 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -233,6 +233,39 @@ The AnalyticsPlugin provides SQL query execution: - Built-in caching with configurable TTL - Databricks SQL Warehouse connector for execution +### Lakebase Autoscaling Connector + +**Location:** `packages/appkit/src/connectors/lakebase/` + +AppKit provides `createLakebasePool()` - a factory function that returns a standard `pg.Pool` configured with automatic OAuth token refresh for Databricks Lakebase (OLTP) databases. + +**Key Features:** +- Returns standard `pg.Pool` (compatible with all ORMs) +- Automatic OAuth token refresh (1-hour tokens, 2-minute buffer) +- Token caching to minimize API calls +- Battle-tested pattern (same as AWS RDS IAM authentication) + +**Quick Example:** +```typescript +import { createLakebasePool } from '@databricks/appkit'; + +// Reads from PGHOST, PGDATABASE, LAKEBASE_ENDPOINT env vars +const pool = createLakebasePool(); + +// Standard pg.Pool API +const result = await pool.query('SELECT * FROM users'); +``` + +**ORM Integration:** +Works with Drizzle, Prisma, TypeORM - see [Lakebase Integration Docs](docs/docs/integrations/lakebase.md) for examples. + +**Architecture:** +- Connector files: `packages/appkit/src/connectors/lakebase/` + - `pool.ts` - Pool factory with OAuth token refresh + - `types.ts` - TypeScript interfaces (`LakebasePoolConfig`) + - `utils.ts` - Helper functions (`generateDatabaseCredential`) + - `auth-types.ts` - Lakebase v2 API types + ### Frontend-Backend Interaction ``` diff --git a/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md b/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md new file mode 100644 index 00000000..7f9431f8 --- /dev/null +++ b/docs/docs/api/appkit/Enumeration.RequestedClaimsPermissionSet.md @@ -0,0 +1,13 @@ +# Enumeration: RequestedClaimsPermissionSet + +Permission set for Unity Catalog table access + +## Enumeration Members + +### READ\_ONLY + +```ts +READ_ONLY: "READ_ONLY"; +``` + +Read-only access to specified UC tables diff --git a/docs/docs/api/appkit/Function.createLakebasePool.md b/docs/docs/api/appkit/Function.createLakebasePool.md new file mode 100644 index 00000000..e73edbc4 --- /dev/null +++ b/docs/docs/api/appkit/Function.createLakebasePool.md @@ -0,0 +1,59 @@ +# Function: createLakebasePool() + +```ts +function createLakebasePool(config?: Partial): Pool; +``` + +Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. + +This function returns a standard `pg.Pool` instance configured with a password callback +that automatically fetches and caches OAuth tokens from Databricks. The returned pool +works with any ORM or library that accepts a `pg.Pool` (Drizzle, Prisma, TypeORM, etc.). + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Configuration options (optional, reads from environment if not provided) | + +## Returns + +`Pool` + +Standard pg.Pool instance with OAuth token refresh + +## See + +https://docs.databricks.com/aws/en/oltp/projects/authentication + +## Examples + +```typescript +// Set: PGHOST, PGDATABASE, LAKEBASE_ENDPOINT +const pool = createLakebasePool(); +const result = await pool.query('SELECT * FROM users'); +``` + +```typescript +// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +// Note: Use actual IDs from Databricks (project-id is a UUID) +const pool = createLakebasePool({ + endpoint: 'projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0', + host: 'ep-abc.databricks.com', + database: 'databricks_postgres', + user: 'service-principal-id' +}); +``` + +```typescript +import { drizzle } from 'drizzle-orm/node-postgres'; +const pool = createLakebasePool(); +const db = drizzle({ client: pool }); +``` + +```typescript +import { PrismaPg } from '@prisma/adapter-pg'; +const pool = createLakebasePool(); +const adapter = new PrismaPg(pool); +const prisma = new PrismaClient({ adapter }); +``` diff --git a/docs/docs/api/appkit/Function.generateDatabaseCredential.md b/docs/docs/api/appkit/Function.generateDatabaseCredential.md new file mode 100644 index 00000000..01c7755d --- /dev/null +++ b/docs/docs/api/appkit/Function.generateDatabaseCredential.md @@ -0,0 +1,55 @@ +# Function: generateDatabaseCredential() + +```ts +function generateDatabaseCredential(workspaceClient: WorkspaceClient, request: GenerateDatabaseCredentialRequest): Promise; +``` + +Generate OAuth credentials for Postgres database connection using the proper Postgres API. + +This generates a time-limited OAuth token (expires after 1 hour) that can be used +as a password when connecting to Lakebase Postgres databases. + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `workspaceClient` | `WorkspaceClient` | Databricks workspace client for authentication | +| `request` | [`GenerateDatabaseCredentialRequest`](Interface.GenerateDatabaseCredentialRequest.md) | Request parameters including endpoint path and optional UC claims | + +## Returns + +`Promise`\<[`DatabaseCredential`](Interface.DatabaseCredential.md)\> + +Database credentials with OAuth token and expiration time + +## See + +https://docs.databricks.com/aws/en/oltp/projects/authentication + +## Examples + +```typescript +// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +// Note: Use actual IDs from Databricks (project-id is a UUID) +const credential = await generateDatabaseCredential(workspaceClient, { + endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +}); + +// Use credential.token as password +const conn = await pg.connect({ + host: "ep-abc123.database.us-east-1.databricks.com", + user: "user@example.com", + password: credential.token +}); +``` + +```typescript +// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +const credential = await generateDatabaseCredential(workspaceClient, { + endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0", + claims: [{ + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [{ table_name: "catalog.schema.users" }] + }] +}); +``` diff --git a/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md b/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md new file mode 100644 index 00000000..4e98e74c --- /dev/null +++ b/docs/docs/api/appkit/Function.getLakebaseOrmConfig.md @@ -0,0 +1,84 @@ +# Function: getLakebaseOrmConfig() + +```ts +function getLakebaseOrmConfig(config?: Partial): { + password: string | () => string | () => Promise | undefined; + ssl: | boolean + | { + rejectUnauthorized: boolean | undefined; + }; + username: string | undefined; +}; +``` + +Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. + +Designed for ORMs like TypeORM and Sequelize that need connection parameters +rather than a pre-configured pool instance. + +Returns connection config with field names compatible with common ORMs: +- `username` instead of `user` +- Simplified SSL config +- Password callback support for OAuth token refresh + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Optional configuration (reads from environment if not provided) | + +## Returns + +```ts +{ + password: string | () => string | () => Promise | undefined; + ssl: | boolean + | { + rejectUnauthorized: boolean | undefined; + }; + username: string | undefined; +} +``` + +ORM-compatible connection configuration + +### password + +```ts +password: string | () => string | () => Promise | undefined; +``` + +### ssl + +```ts +ssl: + | boolean + | { + rejectUnauthorized: boolean | undefined; +}; +``` + +### username + +```ts +username: string | undefined = user; +``` + +## Example + +```typescript +// TypeORM +const dataSource = new DataSource({ + type: 'postgres', + ...getLakebaseOrmConfig(), + entities: [User], + synchronize: true, +}); + +// Sequelize +const sequelize = new Sequelize({ + dialect: 'postgres', + ...getLakebaseOrmConfig(), + logging: false, +}); +``` diff --git a/docs/docs/api/appkit/Function.getLakebasePgConfig.md b/docs/docs/api/appkit/Function.getLakebasePgConfig.md new file mode 100644 index 00000000..a3c2f7e7 --- /dev/null +++ b/docs/docs/api/appkit/Function.getLakebasePgConfig.md @@ -0,0 +1,27 @@ +# Function: getLakebasePgConfig() + +```ts +function getLakebasePgConfig(config?: Partial, telemetry?: DriverTelemetry): PoolConfig; +``` + +Get Lakebase connection configuration for PostgreSQL clients. + +Returns pg.PoolConfig with OAuth token authentication configured. +Best used with pg.Pool directly or ORMs that accept pg.Pool instances (like Drizzle). + +For ORMs that need connection parameters (TypeORM, Sequelize), use getLakebaseOrmConfig() instead. + +Used internally by createLakebasePool(). + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Optional configuration (reads from environment if not provided) | +| `telemetry?` | `DriverTelemetry` | Optional pre-initialized telemetry (created internally if not provided) | + +## Returns + +`PoolConfig` + +PostgreSQL pool configuration with OAuth token refresh diff --git a/docs/docs/api/appkit/Function.getLakebasePoolConfig.md b/docs/docs/api/appkit/Function.getLakebasePoolConfig.md new file mode 100644 index 00000000..c8ee5b7e --- /dev/null +++ b/docs/docs/api/appkit/Function.getLakebasePoolConfig.md @@ -0,0 +1,19 @@ +# ~~Function: getLakebasePoolConfig()~~ + +```ts +function getLakebasePoolConfig(config?: Partial): PoolConfig; +``` + +## Parameters + +| Parameter | Type | +| ------ | ------ | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | + +## Returns + +`PoolConfig` + +## Deprecated + +Use getLakebasePgConfig() instead. This function will be removed in a future version. diff --git a/docs/docs/api/appkit/Function.getWorkspaceClient.md b/docs/docs/api/appkit/Function.getWorkspaceClient.md new file mode 100644 index 00000000..5ff9d152 --- /dev/null +++ b/docs/docs/api/appkit/Function.getWorkspaceClient.md @@ -0,0 +1,17 @@ +# Function: getWorkspaceClient() + +```ts +function getWorkspaceClient(config: Partial): Promise; +``` + +Get workspace client from config or execution context + +## Parameters + +| Parameter | Type | +| ------ | ------ | +| `config` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | + +## Returns + +`Promise`\<`WorkspaceClient`\> diff --git a/docs/docs/api/appkit/Interface.DatabaseCredential.md b/docs/docs/api/appkit/Interface.DatabaseCredential.md new file mode 100644 index 00000000..b2a0b255 --- /dev/null +++ b/docs/docs/api/appkit/Interface.DatabaseCredential.md @@ -0,0 +1,30 @@ +# Interface: DatabaseCredential + +Database credentials with OAuth token for Postgres connection + +## Properties + +### expire\_time + +```ts +expire_time: string; +``` + +Token expiration time in UTC (ISO 8601 format) +Tokens expire after 1 hour from generation + +#### Example + +```ts +"2026-02-06T17:07:00Z" +``` + +*** + +### token + +```ts +token: string; +``` + +OAuth token to use as the password when connecting to Postgres diff --git a/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md b/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md new file mode 100644 index 00000000..766a82cc --- /dev/null +++ b/docs/docs/api/appkit/Interface.GenerateDatabaseCredentialRequest.md @@ -0,0 +1,50 @@ +# Interface: GenerateDatabaseCredentialRequest + +Request parameters for generating database OAuth credentials + +## Properties + +### claims? + +```ts +optional claims: RequestedClaims[]; +``` + +Optional claims for fine-grained UC table permissions. +When specified, the token will only grant access to the specified tables. + +#### Example + +```typescript +{ + claims: [{ + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [{ table_name: "catalog.schema.users" }] + }] +} +``` + +*** + +### endpoint + +```ts +endpoint: string; +``` + +Endpoint resource path with IDs assigned by Databricks. + +All segments are IDs from Databricks (not names you create): +- project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) +- branch-id: Identifier from Databricks (e.g., `main`, `dev`) +- endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + +Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + +**Important:** Copy from Databricks Lakebase UI - do not construct manually. + +#### Example + +```ts +"projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +``` diff --git a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md new file mode 100644 index 00000000..3d19e38a --- /dev/null +++ b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md @@ -0,0 +1,87 @@ +# Interface: LakebasePoolConfig + +Configuration for creating a Lakebase connection pool + +Supports two authentication methods: +1. OAuth token authentication - Provide workspaceClient + endpoint (automatic token rotation) +2. Native Postgres password authentication - Provide password string or function + +Extends pg.PoolConfig to support all standard PostgreSQL pool options. + +## See + +https://docs.databricks.com/aws/en/oltp/projects/authentication + +## Extends + +- `PoolConfig` + +## Properties + +### endpoint? + +```ts +optional endpoint: string; +``` + +Endpoint resource path for OAuth token generation. + +All segments are IDs assigned by Databricks (not names you create): +- project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) +- branch-id: Identifier from Databricks (e.g., `main`, `dev`) +- endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + +Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + +Required for OAuth authentication (unless password is provided) +Can also be set via LAKEBASE_ENDPOINT environment variable + +#### Example + +```ts +"projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" +``` + +*** + +### sslMode? + +```ts +optional sslMode: "require" | "disable" | "prefer"; +``` + +SSL mode for the connection (convenience helper) +Can also be set via PGSSLMODE environment variable + +#### Default + +```ts +"require" +``` + +*** + +### telemetry? + +```ts +optional telemetry: TelemetryOptions; +``` + +Telemetry configuration + +- `true` or omitted: enable all telemetry (traces, metrics, logs) -- no-op when OTEL is not configured +- `false`: disable all telemetry +- `{ traces?, metrics?, logs? }`: fine-grained control + +*** + +### workspaceClient? + +```ts +optional workspaceClient: WorkspaceClient; +``` + +Databricks workspace client for OAuth authentication +If not provided along with endpoint, will attempt to use ServiceContext + +Note: If password is provided, OAuth auth is not used diff --git a/docs/docs/api/appkit/Interface.RequestedClaims.md b/docs/docs/api/appkit/Interface.RequestedClaims.md new file mode 100644 index 00000000..38095e7e --- /dev/null +++ b/docs/docs/api/appkit/Interface.RequestedClaims.md @@ -0,0 +1,24 @@ +# Interface: RequestedClaims + +Optional claims for fine-grained Unity Catalog table permissions +When specified, the returned token will be scoped to only the requested tables + +## Properties + +### permission\_set? + +```ts +optional permission_set: READ_ONLY; +``` + +Permission level to request + +*** + +### resources? + +```ts +optional resources: RequestedResource[]; +``` + +List of UC resources to request access to diff --git a/docs/docs/api/appkit/Interface.RequestedResource.md b/docs/docs/api/appkit/Interface.RequestedResource.md new file mode 100644 index 00000000..c91637bd --- /dev/null +++ b/docs/docs/api/appkit/Interface.RequestedResource.md @@ -0,0 +1,29 @@ +# Interface: RequestedResource + +Resource to request permissions for in Unity Catalog + +## Properties + +### table\_name? + +```ts +optional table_name: string; +``` + +Unity Catalog table name to request access to + +#### Example + +```ts +"catalog.schema.table" +``` + +*** + +### unspecified\_resource\_name? + +```ts +optional unspecified_resource_name: string; +``` + +Generic resource name for non-table resources diff --git a/docs/docs/api/appkit/index.md b/docs/docs/api/appkit/index.md index f1a0e5f8..00f90768 100644 --- a/docs/docs/api/appkit/index.md +++ b/docs/docs/api/appkit/index.md @@ -7,7 +7,7 @@ plugin architecture, and React integration. | Enumeration | Description | | ------ | ------ | -| [ResourceType](Enumeration.ResourceType.md) | Supported resource types that plugins can depend on. Each type has its own set of valid permissions. | +| [RequestedClaimsPermissionSet](Enumeration.RequestedClaimsPermissionSet.md) | Permission set for Unity Catalog table access | ## Classes @@ -19,8 +19,7 @@ plugin architecture, and React integration. | [ConnectionError](Class.ConnectionError.md) | Error thrown when a connection or network operation fails. Use for database pool errors, API failures, timeouts, etc. | | [ExecutionError](Class.ExecutionError.md) | Error thrown when an operation execution fails. Use for statement failures, canceled operations, or unexpected states. | | [InitializationError](Class.InitializationError.md) | Error thrown when a service or component is not properly initialized. Use when accessing services before they are ready. | -| [Plugin](Class.Plugin.md) | Base abstract class for creating AppKit plugins. | -| [ResourceRegistry](Class.ResourceRegistry.md) | Central registry for tracking plugin resource requirements. Deduplication uses type + resourceKey (machine-stable); alias is for display only. | +| [Plugin](Class.Plugin.md) | Base abstract class for creating AppKit plugins | | [ServerError](Class.ServerError.md) | Error thrown when server lifecycle operations fail. Use for server start/stop issues, configuration conflicts, etc. | | [TunnelError](Class.TunnelError.md) | Error thrown when remote tunnel operations fail. Use for tunnel connection issues, message parsing failures, etc. | | [ValidationError](Class.ValidationError.md) | Error thrown when input validation fails. Use for invalid parameters, missing required fields, or type mismatches. | @@ -31,22 +30,20 @@ plugin architecture, and React integration. | ------ | ------ | | [BasePluginConfig](Interface.BasePluginConfig.md) | Base configuration interface for AppKit plugins | | [CacheConfig](Interface.CacheConfig.md) | Configuration for caching | +| [DatabaseCredential](Interface.DatabaseCredential.md) | Database credentials with OAuth token for Postgres connection | +| [GenerateDatabaseCredentialRequest](Interface.GenerateDatabaseCredentialRequest.md) | Request parameters for generating database OAuth credentials | | [ITelemetry](Interface.ITelemetry.md) | Plugin-facing interface for OpenTelemetry instrumentation. Provides a thin abstraction over OpenTelemetry APIs for plugins. | -| [PluginManifest](Interface.PluginManifest.md) | Plugin manifest that declares metadata and resource requirements. Attached to plugin classes as a static property. | -| [ResourceEntry](Interface.ResourceEntry.md) | Internal representation of a resource in the registry. Extends ResourceRequirement with resolution state and plugin ownership. | -| [ResourceFieldEntry](Interface.ResourceFieldEntry.md) | Defines a single field for a resource. Each field has its own environment variable and optional description. Single-value types use one key (e.g. id); multi-value types (database, secret) use multiple (e.g. instance_name, database_name or scope, key). | -| [ResourceRequirement](Interface.ResourceRequirement.md) | Declares a resource requirement for a plugin. Can be defined statically in a manifest or dynamically via getResourceRequirements(). | +| [LakebasePoolConfig](Interface.LakebasePoolConfig.md) | Configuration for creating a Lakebase connection pool | +| [RequestedClaims](Interface.RequestedClaims.md) | Optional claims for fine-grained Unity Catalog table permissions When specified, the returned token will be scoped to only the requested tables | +| [RequestedResource](Interface.RequestedResource.md) | Resource to request permissions for in Unity Catalog | | [StreamExecutionSettings](Interface.StreamExecutionSettings.md) | Configuration for streaming execution with default and user-scoped settings | | [TelemetryConfig](Interface.TelemetryConfig.md) | OpenTelemetry configuration for AppKit applications | -| [ValidationResult](Interface.ValidationResult.md) | Result of validating all registered resources against the environment. | ## Type Aliases | Type Alias | Description | | ------ | ------ | -| [ConfigSchema](TypeAlias.ConfigSchema.md) | Configuration schema definition for plugin config. Re-exported from the standard JSON Schema Draft 7 types. | | [IAppRouter](TypeAlias.IAppRouter.md) | Express router type for plugin route registration | -| [ResourcePermission](TypeAlias.ResourcePermission.md) | Union of all possible permission levels across all resource types. | ## Variables @@ -60,7 +57,11 @@ plugin architecture, and React integration. | ------ | ------ | | [appKitTypesPlugin](Function.appKitTypesPlugin.md) | Vite plugin to generate types for AppKit queries. Calls generateFromEntryPoint under the hood. | | [createApp](Function.createApp.md) | Bootstraps AppKit with the provided configuration. | +| [createLakebasePool](Function.createLakebasePool.md) | Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. | +| [generateDatabaseCredential](Function.generateDatabaseCredential.md) | Generate OAuth credentials for Postgres database connection using the proper Postgres API. | | [getExecutionContext](Function.getExecutionContext.md) | Get the current execution context. | -| [getPluginManifest](Function.getPluginManifest.md) | Loads and validates the manifest from a plugin constructor. Normalizes string type/permission to strict ResourceType/ResourcePermission. | -| [getResourceRequirements](Function.getResourceRequirements.md) | Gets the resource requirements from a plugin's manifest. | +| [getLakebaseOrmConfig](Function.getLakebaseOrmConfig.md) | Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. | +| [getLakebasePgConfig](Function.getLakebasePgConfig.md) | Get Lakebase connection configuration for PostgreSQL clients. | +| [~~getLakebasePoolConfig~~](Function.getLakebasePoolConfig.md) | - | +| [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or execution context | | [isSQLTypeMarker](Function.isSQLTypeMarker.md) | Type guard to check if a value is a SQL type marker | diff --git a/docs/docs/api/appkit/typedoc-sidebar.ts b/docs/docs/api/appkit/typedoc-sidebar.ts index aa114b63..27367112 100644 --- a/docs/docs/api/appkit/typedoc-sidebar.ts +++ b/docs/docs/api/appkit/typedoc-sidebar.ts @@ -7,8 +7,8 @@ const typedocSidebar: SidebarsConfig = { items: [ { type: "doc", - id: "api/appkit/Enumeration.ResourceType", - label: "ResourceType" + id: "api/appkit/Enumeration.RequestedClaimsPermissionSet", + label: "RequestedClaimsPermissionSet" } ] }, @@ -51,11 +51,6 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Class.Plugin", label: "Plugin" }, - { - type: "doc", - id: "api/appkit/Class.ResourceRegistry", - label: "ResourceRegistry" - }, { type: "doc", id: "api/appkit/Class.ServerError", @@ -89,28 +84,33 @@ const typedocSidebar: SidebarsConfig = { }, { type: "doc", - id: "api/appkit/Interface.ITelemetry", - label: "ITelemetry" + id: "api/appkit/Interface.DatabaseCredential", + label: "DatabaseCredential" + }, + { + type: "doc", + id: "api/appkit/Interface.GenerateDatabaseCredentialRequest", + label: "GenerateDatabaseCredentialRequest" }, { type: "doc", - id: "api/appkit/Interface.PluginManifest", - label: "PluginManifest" + id: "api/appkit/Interface.ITelemetry", + label: "ITelemetry" }, { type: "doc", - id: "api/appkit/Interface.ResourceEntry", - label: "ResourceEntry" + id: "api/appkit/Interface.LakebasePoolConfig", + label: "LakebasePoolConfig" }, { type: "doc", - id: "api/appkit/Interface.ResourceFieldEntry", - label: "ResourceFieldEntry" + id: "api/appkit/Interface.RequestedClaims", + label: "RequestedClaims" }, { type: "doc", - id: "api/appkit/Interface.ResourceRequirement", - label: "ResourceRequirement" + id: "api/appkit/Interface.RequestedResource", + label: "RequestedResource" }, { type: "doc", @@ -121,11 +121,6 @@ const typedocSidebar: SidebarsConfig = { type: "doc", id: "api/appkit/Interface.TelemetryConfig", label: "TelemetryConfig" - }, - { - type: "doc", - id: "api/appkit/Interface.ValidationResult", - label: "ValidationResult" } ] }, @@ -133,20 +128,10 @@ const typedocSidebar: SidebarsConfig = { type: "category", label: "Type Aliases", items: [ - { - type: "doc", - id: "api/appkit/TypeAlias.ConfigSchema", - label: "ConfigSchema" - }, { type: "doc", id: "api/appkit/TypeAlias.IAppRouter", label: "IAppRouter" - }, - { - type: "doc", - id: "api/appkit/TypeAlias.ResourcePermission", - label: "ResourcePermission" } ] }, @@ -175,6 +160,16 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Function.createApp", label: "createApp" }, + { + type: "doc", + id: "api/appkit/Function.createLakebasePool", + label: "createLakebasePool" + }, + { + type: "doc", + id: "api/appkit/Function.generateDatabaseCredential", + label: "generateDatabaseCredential" + }, { type: "doc", id: "api/appkit/Function.getExecutionContext", @@ -182,13 +177,24 @@ const typedocSidebar: SidebarsConfig = { }, { type: "doc", - id: "api/appkit/Function.getPluginManifest", - label: "getPluginManifest" + id: "api/appkit/Function.getLakebaseOrmConfig", + label: "getLakebaseOrmConfig" + }, + { + type: "doc", + id: "api/appkit/Function.getLakebasePgConfig", + label: "getLakebasePgConfig" + }, + { + type: "doc", + id: "api/appkit/Function.getLakebasePoolConfig", + label: "getLakebasePoolConfig", + className: "typedoc-sidebar-item-deprecated" }, { type: "doc", - id: "api/appkit/Function.getResourceRequirements", - label: "getResourceRequirements" + id: "api/appkit/Function.getWorkspaceClient", + label: "getWorkspaceClient" }, { type: "doc", diff --git a/package.json b/package.json index a08b06eb..a88ea962 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "husky": "^9.1.7", "jsdom": "^27.0.0", "lint-staged": "^15.5.1", - "pg": "^8.16.3", + "pg": "^8.18.0", "plop": "^4.0.4", "publint": "^0.3.15", "release-it": "^19.1.0", diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 3a0a622f..6880425c 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -42,7 +42,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@databricks/sdk-experimental": "^0.15.0", + "@databricks/sdk-experimental": "^0.16.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.208.0", "@opentelemetry/auto-instrumentations-node": "^0.67.0", @@ -61,7 +61,7 @@ "dotenv": "^16.6.1", "express": "^4.22.0", "obug": "^2.1.1", - "pg": "^8.16.3", + "pg": "^8.18.0", "semver": "^7.7.3", "shared": "workspace:*", "vite": "npm:rolldown-vite@7.1.14", @@ -71,7 +71,7 @@ "devDependencies": { "@types/express": "^4.17.25", "@types/json-schema": "^7.0.15", - "@types/pg": "^8.15.6", + "@types/pg": "^8.16.0", "@types/ws": "^8.18.1", "@vitejs/plugin-react": "^5.1.1" }, diff --git a/packages/appkit/src/cache/index.ts b/packages/appkit/src/cache/index.ts index 952a7987..bdc6c10d 100644 --- a/packages/appkit/src/cache/index.ts +++ b/packages/appkit/src/cache/index.ts @@ -1,7 +1,7 @@ import { createHash } from "node:crypto"; import { WorkspaceClient } from "@databricks/sdk-experimental"; import type { CacheConfig, CacheStorage } from "shared"; -import { LakebaseV1Connector } from "@/connectors"; +import { createLakebasePool } from "@/connectors/lakebase"; import { AppKitError, ExecutionError, InitializationError } from "../errors"; import { createLogger } from "../logging/logger"; import type { Counter, TelemetryProvider } from "../telemetry"; @@ -147,14 +147,17 @@ export class CacheManager { // try to use lakebase storage try { const workspaceClient = new WorkspaceClient({}); - const connector = new LakebaseV1Connector({ workspaceClient }); - const isHealthy = await connector.healthCheck(); + const pool = createLakebasePool({ workspaceClient }); + const persistentStorage = new PersistentStorage(config, pool); + const isHealthy = await persistentStorage.healthCheck(); if (isHealthy) { - const persistentStorage = new PersistentStorage(config, connector); await persistentStorage.initialize(); return new CacheManager(persistentStorage, config); } + + // Health check failed, close the pool and fallback + await pool.end(); } catch { // lakebase unavailable, continue with in-memory storage } diff --git a/packages/appkit/src/cache/storage/persistent.ts b/packages/appkit/src/cache/storage/persistent.ts index 393385cd..d9affd73 100644 --- a/packages/appkit/src/cache/storage/persistent.ts +++ b/packages/appkit/src/cache/storage/persistent.ts @@ -1,6 +1,6 @@ import { createHash } from "node:crypto"; +import type pg from "pg"; import type { CacheConfig, CacheEntry, CacheStorage } from "shared"; -import type { LakebaseV1Connector } from "../../connectors"; import { InitializationError, ValidationError } from "../../errors"; import { createLogger } from "../../logging/logger"; import { lakebaseStorageDefaults } from "./defaults"; @@ -12,7 +12,8 @@ const logger = createLogger("cache:persistent"); * to manage memory usage and ensure efficient cache operations. * * @example - * const persistentStorage = new PersistentStorage(config, connector); + * const pool = createLakebasePool({ workspaceClient }); + * const persistentStorage = new PersistentStorage(config, pool); * await persistentStorage.initialize(); * await persistentStorage.get("my-key"); * await persistentStorage.set("my-key", "my-value"); @@ -22,7 +23,7 @@ const logger = createLogger("cache:persistent"); * */ export class PersistentStorage implements CacheStorage { - private readonly connector: LakebaseV1Connector; + private readonly pool: pg.Pool; private readonly tableName: string; private readonly maxBytes: number; private readonly maxEntryBytes: number; @@ -30,8 +31,8 @@ export class PersistentStorage implements CacheStorage { private readonly evictionCheckProbability: number; private initialized: boolean; - constructor(config: CacheConfig, connector: LakebaseV1Connector) { - this.connector = connector; + constructor(config: CacheConfig, pool: pg.Pool) { + this.pool = pool; this.maxBytes = config.maxBytes ?? lakebaseStorageDefaults.maxBytes; this.maxEntryBytes = config.maxEntryBytes ?? lakebaseStorageDefaults.maxEntryBytes; @@ -66,7 +67,7 @@ export class PersistentStorage implements CacheStorage { const keyHash = this.hashKey(key); - const result = await this.connector.query<{ + const result = await this.pool.query<{ value: Buffer; expiry: string; }>(`SELECT value, expiry FROM ${this.tableName} WHERE key_hash = $1`, [ @@ -78,7 +79,7 @@ export class PersistentStorage implements CacheStorage { const entry = result.rows[0]; // fire-and-forget update - this.connector + this.pool .query( `UPDATE ${this.tableName} SET last_accessed = NOW() WHERE key_hash = $1`, [keyHash], @@ -123,7 +124,7 @@ export class PersistentStorage implements CacheStorage { } } - await this.connector.query( + await this.pool.query( `INSERT INTO ${this.tableName} (key_hash, key, value, byte_size, expiry, created_at, last_accessed) VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) ON CONFLICT (key_hash) @@ -141,16 +142,15 @@ export class PersistentStorage implements CacheStorage { async delete(key: string): Promise { await this.ensureInitialized(); const keyHash = this.hashKey(key); - await this.connector.query( - `DELETE FROM ${this.tableName} WHERE key_hash = $1`, - [keyHash], - ); + await this.pool.query(`DELETE FROM ${this.tableName} WHERE key_hash = $1`, [ + keyHash, + ]); } /** Clear the persistent storage */ async clear(): Promise { await this.ensureInitialized(); - await this.connector.query(`TRUNCATE TABLE ${this.tableName}`); + await this.pool.query(`TRUNCATE TABLE ${this.tableName}`); } /** @@ -162,7 +162,7 @@ export class PersistentStorage implements CacheStorage { await this.ensureInitialized(); const keyHash = this.hashKey(key); - const result = await this.connector.query<{ exists: boolean }>( + const result = await this.pool.query<{ exists: boolean }>( `SELECT EXISTS(SELECT 1 FROM ${this.tableName} WHERE key_hash = $1) as exists`, [keyHash], ); @@ -177,7 +177,7 @@ export class PersistentStorage implements CacheStorage { async size(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ count: string }>( + const result = await this.pool.query<{ count: string }>( `SELECT COUNT(*) as count FROM ${this.tableName}`, ); return parseInt(result.rows[0]?.count ?? "0", 10); @@ -187,7 +187,7 @@ export class PersistentStorage implements CacheStorage { async totalBytes(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ total: string }>( + const result = await this.pool.query<{ total: string }>( `SELECT COALESCE(SUM(byte_size), 0) as total FROM ${this.tableName}`, ); return parseInt(result.rows[0]?.total ?? "0", 10); @@ -207,7 +207,8 @@ export class PersistentStorage implements CacheStorage { */ async healthCheck(): Promise { try { - return await this.connector.healthCheck(); + await this.pool.query("SELECT 1"); + return true; } catch { return false; } @@ -215,7 +216,7 @@ export class PersistentStorage implements CacheStorage { /** Close the persistent storage */ async close(): Promise { - await this.connector.close(); + await this.pool.end(); } /** @@ -224,7 +225,7 @@ export class PersistentStorage implements CacheStorage { */ async cleanupExpired(): Promise { await this.ensureInitialized(); - const result = await this.connector.query<{ count: string }>( + const result = await this.pool.query<{ count: string }>( `WITH deleted as (DELETE FROM ${this.tableName} WHERE expiry < $1 RETURNING *) SELECT COUNT(*) as count FROM deleted`, [Date.now()], ); @@ -241,7 +242,7 @@ export class PersistentStorage implements CacheStorage { } } - await this.connector.query( + await this.pool.query( `DELETE FROM ${this.tableName} WHERE key_hash IN (SELECT key_hash FROM ${this.tableName} ORDER BY last_accessed ASC LIMIT $1)`, [this.evictionBatchSize], @@ -275,7 +276,7 @@ export class PersistentStorage implements CacheStorage { /** Run migrations for the persistent storage */ private async runMigrations(): Promise { try { - await this.connector.query(` + await this.pool.query(` CREATE TABLE IF NOT EXISTS ${this.tableName} ( id BIGSERIAL PRIMARY KEY, key_hash BIGINT NOT NULL, @@ -289,22 +290,22 @@ export class PersistentStorage implements CacheStorage { `); // unique index on key_hash for fast lookups - await this.connector.query( + await this.pool.query( `CREATE UNIQUE INDEX IF NOT EXISTS idx_${this.tableName}_key_hash ON ${this.tableName} (key_hash);`, ); // index on expiry for cleanup queries - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_expiry ON ${this.tableName} (expiry); `, ); // index on last_accessed for LRU eviction - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_last_accessed ON ${this.tableName} (last_accessed); `, ); // index on byte_size for monitoring - await this.connector.query( + await this.pool.query( `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_byte_size ON ${this.tableName} (byte_size); `, ); } catch (error) { diff --git a/packages/appkit/src/cache/tests/cache-manager.test.ts b/packages/appkit/src/cache/tests/cache-manager.test.ts index a4e54438..8e45d679 100644 --- a/packages/appkit/src/cache/tests/cache-manager.test.ts +++ b/packages/appkit/src/cache/tests/cache-manager.test.ts @@ -2,18 +2,19 @@ import type { CacheStorage } from "shared"; import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; import { CacheManager } from "../../index"; -// Mock LakebaseV1Connector -const mockLakebaseHealthCheck = vi.fn(); -vi.mock("@/connectors", () => ({ - LakebaseV1Connector: vi.fn().mockImplementation(() => ({ - healthCheck: mockLakebaseHealthCheck, - close: vi.fn().mockResolvedValue(undefined), +// Mock createLakebasePool +const mockPoolQuery = vi.fn(); +const mockPoolEnd = vi.fn(); +vi.mock("@/connectors/lakebase", () => ({ + createLakebasePool: vi.fn().mockImplementation(() => ({ + query: mockPoolQuery, + end: mockPoolEnd.mockResolvedValue(undefined), })), })); // Mock PersistentStorage vi.mock("../storage/persistent", () => ({ - PersistentStorage: vi.fn().mockImplementation(() => { + PersistentStorage: vi.fn().mockImplementation((_config: any, pool: any) => { const cache = new Map(); return { initialize: vi.fn().mockResolvedValue(undefined), @@ -32,8 +33,16 @@ vi.mock("../storage/persistent", () => ({ has: vi.fn().mockImplementation(async (key: string) => cache.has(key)), size: vi.fn().mockImplementation(async () => cache.size), isPersistent: vi.fn().mockReturnValue(true), - healthCheck: vi.fn().mockResolvedValue(true), - close: vi.fn().mockResolvedValue(undefined), + healthCheck: vi.fn().mockImplementation(async () => { + // Simulate real healthCheck: calls pool.query('SELECT 1') + try { + await pool.query("SELECT 1"); + return true; + } catch { + return false; + } + }), + close: vi.fn().mockImplementation(async () => pool.end()), cleanupExpired: vi.fn().mockResolvedValue(0), }; }), @@ -87,7 +96,7 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; // Default: Lakebase unavailable (most tests pass explicit storage) - mockLakebaseHealthCheck.mockResolvedValue(false); + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); }); afterEach(() => { @@ -621,8 +630,11 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Make Lakebase healthy - mockLakebaseHealthCheck.mockResolvedValue(true); + // Make pool.query succeed for healthCheck ('SELECT 1') + mockPoolQuery.mockResolvedValue({ + rows: [{ "?column?": 1 }], + rowCount: 1, + }); const cache = await CacheManager.getInstance({}); @@ -636,8 +648,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy (default in beforeEach) - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy (pool.query fails, default in beforeEach) + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); const cache = await CacheManager.getInstance({}); @@ -656,8 +668,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy (pool.query fails) + mockPoolQuery.mockRejectedValue(new Error("Connection failed")); const cache = await CacheManager.getInstance({ strictPersistence: true, @@ -677,8 +689,8 @@ describe("CacheManager", () => { (CacheManager as any).instance = null; (CacheManager as any).initPromise = null; - // Lakebase unhealthy - mockLakebaseHealthCheck.mockResolvedValue(false); + // Lakebase unhealthy - pool.query('SELECT 1') fails + mockPoolQuery.mockRejectedValue(new Error("Health check failed")); const cache = await CacheManager.getInstance({}); @@ -693,9 +705,7 @@ describe("CacheManager", () => { (CacheManager as any).initPromise = null; // Lakebase throws - mockLakebaseHealthCheck.mockRejectedValue( - new Error("Connection refused"), - ); + mockPoolQuery.mockRejectedValue(new Error("Connection refused")); const cache = await CacheManager.getInstance({}); diff --git a/packages/appkit/src/cache/tests/persistent.test.ts b/packages/appkit/src/cache/tests/persistent.test.ts index fc3ead27..82706b0d 100644 --- a/packages/appkit/src/cache/tests/persistent.test.ts +++ b/packages/appkit/src/cache/tests/persistent.test.ts @@ -1,26 +1,25 @@ import { beforeEach, describe, expect, test, vi } from "vitest"; import { PersistentStorage } from "../storage"; -/** Mock LakebaseV1Connector for testing */ -const createMockConnector = () => ({ +/** Mock pg.Pool for testing */ +const createMockPool = () => ({ query: vi.fn(), - healthCheck: vi.fn().mockResolvedValue(true), - close: vi.fn().mockResolvedValue(undefined), + end: vi.fn().mockResolvedValue(undefined), }); describe("PersistentStorage", () => { let storage: PersistentStorage; - let mockConnector: ReturnType; + let mockPool: ReturnType; beforeEach(() => { - mockConnector = createMockConnector(); + mockPool = createMockPool(); // Default: migrations succeed - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); storage = new PersistentStorage( { maxBytes: 1024 * 1024 }, // 1MB - mockConnector as any, + mockPool as any, ); }); @@ -29,12 +28,12 @@ describe("PersistentStorage", () => { await storage.initialize(); // Should create table - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE IF NOT EXISTS"), ); // Should create unique index on key_hash - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE UNIQUE INDEX IF NOT EXISTS"), ); }); @@ -44,7 +43,7 @@ describe("PersistentStorage", () => { await storage.initialize(); // CREATE TABLE should only be called once (first initialization) - const createTableCalls = mockConnector.query.mock.calls.filter((call) => + const createTableCalls = mockPool.query.mock.calls.filter((call) => call[0].includes("CREATE TABLE"), ); expect(createTableCalls.length).toBe(1); @@ -55,7 +54,7 @@ describe("PersistentStorage", () => { .spyOn(console, "error") .mockImplementation(() => {}); - mockConnector.query.mockRejectedValue(new Error("migration failed")); + mockPool.query.mockRejectedValue(new Error("migration failed")); await expect(storage.initialize()).rejects.toThrow( "Error in running migrations for persistent storage", @@ -68,7 +67,7 @@ describe("PersistentStorage", () => { describe("get", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return cached entry", async () => { @@ -78,7 +77,7 @@ describe("PersistentStorage", () => { "utf-8", ); - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ value: valueBuffer, expiry: String(expiry) }], }); @@ -88,14 +87,14 @@ describe("PersistentStorage", () => { value: { data: "test" }, expiry, }); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("SELECT value, expiry"), [expect.any(BigInt)], // key_hash is bigint ); }); test("should return null for non-existent key", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.get("non-existent"); @@ -109,7 +108,7 @@ describe("PersistentStorage", () => { "utf-8", ); - mockConnector.query + mockPool.query .mockResolvedValueOnce({ rows: [{ value: valueBuffer, expiry: String(expiry) }], }) @@ -120,7 +119,7 @@ describe("PersistentStorage", () => { // Wait for fire-and-forget update await new Promise((resolve) => setTimeout(resolve, 10)); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("UPDATE"), [expect.any(BigInt)], // key_hash ); @@ -130,7 +129,7 @@ describe("PersistentStorage", () => { describe("set", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should insert new entry", async () => { @@ -138,14 +137,14 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.5); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.set("test-key", { value: { data: "test" }, expiry: Date.now() + 10000, }); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("INSERT INTO"), expect.arrayContaining([ expect.any(BigInt), // key_hash @@ -164,17 +163,17 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.05); // totalBytes() returns maxBytes (triggers eviction) - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: String(1024 * 1024) }], // 1MB (at limit) }); // cleanupExpired returns 0 - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); // eviction DELETE succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.set("new-key", { value: { data: "new" }, @@ -182,7 +181,7 @@ describe("PersistentStorage", () => { }); // Should have called DELETE for LRU eviction - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), expect.any(Array), ); @@ -195,7 +194,7 @@ describe("PersistentStorage", () => { const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0.5); // INSERT succeeds - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const value = { nested: { array: [1, 2, 3] } }; await storage.set("test-key", { @@ -203,7 +202,7 @@ describe("PersistentStorage", () => { expiry: Date.now() + 10000, }); - const insertCall = mockConnector.query.mock.calls.find((call) => + const insertCall = mockPool.query.mock.calls.find((call) => call[0].includes("INSERT"), ); @@ -219,15 +218,15 @@ describe("PersistentStorage", () => { describe("delete", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should delete entry by key_hash", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.delete("test-key"); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), [expect.any(BigInt)], // key_hash ); @@ -237,15 +236,15 @@ describe("PersistentStorage", () => { describe("clear", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should truncate table", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); await storage.clear(); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("TRUNCATE TABLE"), ); }); @@ -254,25 +253,25 @@ describe("PersistentStorage", () => { describe("has", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return true when key exists", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ exists: true }], }); const result = await storage.has("test-key"); expect(result).toBe(true); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("SELECT EXISTS"), [expect.any(BigInt)], // key_hash ); }); test("should return false when key does not exist", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ exists: false }], }); @@ -282,7 +281,7 @@ describe("PersistentStorage", () => { }); test("should return false when query returns no rows", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.has("test-key"); @@ -293,11 +292,11 @@ describe("PersistentStorage", () => { describe("size", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return count of entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "42" }], }); @@ -307,7 +306,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when empty", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); @@ -317,7 +316,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when no rows", async () => { - mockConnector.query.mockResolvedValueOnce({ rows: [] }); + mockPool.query.mockResolvedValueOnce({ rows: [] }); const result = await storage.size(); @@ -328,11 +327,11 @@ describe("PersistentStorage", () => { describe("totalBytes", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should return sum of byte_size", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: "1048576" }], // 1MB }); @@ -342,7 +341,7 @@ describe("PersistentStorage", () => { }); test("should return 0 when empty", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ total: "0" }], }); @@ -355,25 +354,25 @@ describe("PersistentStorage", () => { describe("cleanupExpired", () => { beforeEach(async () => { await storage.initialize(); - mockConnector.query.mockClear(); + mockPool.query.mockClear(); }); test("should delete expired entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "5" }], }); const deleted = await storage.cleanupExpired(); expect(deleted).toBe(5); - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("DELETE FROM"), expect.arrayContaining([expect.any(Number)]), ); }); test("should return 0 when no expired entries", async () => { - mockConnector.query.mockResolvedValueOnce({ + mockPool.query.mockResolvedValueOnce({ rows: [{ count: "0" }], }); @@ -388,27 +387,27 @@ describe("PersistentStorage", () => { expect(storage.isPersistent()).toBe(true); }); - test("should delegate healthCheck to connector", async () => { - mockConnector.healthCheck.mockResolvedValueOnce(true); + test("should implement healthCheck using pool query", async () => { + mockPool.query.mockResolvedValueOnce({ rows: [{ "?column?": 1 }] }); const result = await storage.healthCheck(); expect(result).toBe(true); - expect(mockConnector.healthCheck).toHaveBeenCalled(); + expect(mockPool.query).toHaveBeenCalledWith("SELECT 1"); }); test("should return false on healthCheck error", async () => { - mockConnector.healthCheck.mockRejectedValueOnce(new Error("failed")); + mockPool.query.mockRejectedValueOnce(new Error("Connection failed")); const result = await storage.healthCheck(); expect(result).toBe(false); }); - test("should close connector on close", async () => { + test("should close pool on close", async () => { await storage.close(); - expect(mockConnector.close).toHaveBeenCalled(); + expect(mockPool.end).toHaveBeenCalled(); }); }); @@ -416,15 +415,15 @@ describe("PersistentStorage", () => { test("should auto-initialize on get if not initialized", async () => { const uninitializedStorage = new PersistentStorage( { maxBytes: 1024 * 1024 }, - mockConnector as any, + mockPool as any, ); - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); await uninitializedStorage.get("test-key"); // Should have run migrations - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE"), ); }); @@ -432,10 +431,10 @@ describe("PersistentStorage", () => { test("should auto-initialize on set if not initialized", async () => { const uninitializedStorage = new PersistentStorage( { maxBytes: 1024 * 1024 }, - mockConnector as any, + mockPool as any, ); - mockConnector.query.mockResolvedValue({ rows: [] }); + mockPool.query.mockResolvedValue({ rows: [] }); await uninitializedStorage.set("test-key", { value: "test", @@ -443,7 +442,7 @@ describe("PersistentStorage", () => { }); // Should have run migrations - expect(mockConnector.query).toHaveBeenCalledWith( + expect(mockPool.query).toHaveBeenCalledWith( expect.stringContaining("CREATE TABLE"), ); }); diff --git a/packages/appkit/src/connectors/index.ts b/packages/appkit/src/connectors/index.ts index 01d23cf2..fdb1cc69 100644 --- a/packages/appkit/src/connectors/index.ts +++ b/packages/appkit/src/connectors/index.ts @@ -1,2 +1,3 @@ +export * from "./lakebase"; export * from "./lakebase-v1"; export * from "./sql-warehouse"; diff --git a/packages/appkit/src/connectors/lakebase/auth-types.ts b/packages/appkit/src/connectors/lakebase/auth-types.ts new file mode 100644 index 00000000..9d9ba2d3 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/auth-types.ts @@ -0,0 +1,98 @@ +/** + * Authentication types for Lakebase Postgres OAuth token generation + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + */ + +/** + * Database credentials with OAuth token for Postgres connection + */ +export interface DatabaseCredential { + /** OAuth token to use as the password when connecting to Postgres */ + token: string; + + /** + * Token expiration time in UTC (ISO 8601 format) + * Tokens expire after 1 hour from generation + * @example "2026-02-06T17:07:00Z" + */ + expire_time: string; +} + +/** + * Permission set for Unity Catalog table access + */ +export enum RequestedClaimsPermissionSet { + /** + * Read-only access to specified UC tables + */ + READ_ONLY = "READ_ONLY", +} + +/** + * Resource to request permissions for in Unity Catalog + */ +export interface RequestedResource { + /** + * Unity Catalog table name to request access to + * @example "catalog.schema.table" + */ + table_name?: string; + + /** + * Generic resource name for non-table resources + */ + unspecified_resource_name?: string; +} + +/** + * Optional claims for fine-grained Unity Catalog table permissions + * When specified, the returned token will be scoped to only the requested tables + */ +export interface RequestedClaims { + /** + * Permission level to request + */ + permission_set?: RequestedClaimsPermissionSet; + + /** + * List of UC resources to request access to + */ + resources?: RequestedResource[]; +} + +/** + * Request parameters for generating database OAuth credentials + */ +export interface GenerateDatabaseCredentialRequest { + /** + * Endpoint resource path with IDs assigned by Databricks. + * + * All segments are IDs from Databricks (not names you create): + * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) + * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) + * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + * + * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + * + * **Important:** Copy from Databricks Lakebase UI - do not construct manually. + * + * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + */ + endpoint: string; + + /** + * Optional claims for fine-grained UC table permissions. + * When specified, the token will only grant access to the specified tables. + * + * @example + * ```typescript + * { + * claims: [{ + * permission_set: RequestedClaimsPermissionSet.READ_ONLY, + * resources: [{ table_name: "catalog.schema.users" }] + * }] + * } + * ``` + */ + claims?: RequestedClaims[]; +} diff --git a/packages/appkit/src/connectors/lakebase/config.ts b/packages/appkit/src/connectors/lakebase/config.ts new file mode 100644 index 00000000..3731b4c1 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/config.ts @@ -0,0 +1,146 @@ +import { WorkspaceClient } from "@databricks/sdk-experimental"; +import type pg from "pg"; +import { ConfigurationError, ValidationError } from "../../errors"; +import { lakebaseDefaults } from "./defaults"; +import type { LakebasePoolConfig } from "./types"; + +const VALID_SSL_MODES = ["require", "disable", "prefer"] as const; +type SslMode = (typeof VALID_SSL_MODES)[number]; + +export interface ParsedPoolConfig { + endpoint?: string; + host: string; + database: string; + port: number; + sslMode: SslMode; + ssl?: pg.PoolConfig["ssl"]; + max: number; + idleTimeoutMillis: number; + connectionTimeoutMillis: number; +} + +/** Parse pool configuration from provided config and environment variables */ +export function parsePoolConfig( + userConfig?: Partial, +): ParsedPoolConfig { + // Get endpoint (required only for OAuth auth) + const endpoint = userConfig?.endpoint ?? process.env.LAKEBASE_ENDPOINT; + + // Only require endpoint if no password provided + if (!endpoint && !userConfig?.password) { + throw ConfigurationError.missingEnvVar( + "LAKEBASE_ENDPOINT or config.endpoint (or provide config.password for native auth)", + ); + } + + // Get host (required) + const host = userConfig?.host ?? process.env.PGHOST; + if (!host) { + throw ConfigurationError.missingEnvVar("PGHOST or config.host"); + } + + // Get database (required) + const database = userConfig?.database ?? process.env.PGDATABASE; + if (!database) { + throw ConfigurationError.missingEnvVar("PGDATABASE or config.database"); + } + + // Get port (optional, default from defaults) + const portStr = process.env.PGPORT; + const port = + userConfig?.port ?? + (portStr ? Number.parseInt(portStr, 10) : lakebaseDefaults.port); + + if (Number.isNaN(port)) { + throw ValidationError.invalidValue("port", portStr, "a number"); + } + + // Get SSL mode (optional, default from defaults) + const rawSslMode = userConfig?.sslMode ?? process.env.PGSSLMODE ?? undefined; + + const sslMode = validateSslMode(rawSslMode) ?? lakebaseDefaults.sslMode; + + // Pool options (with defaults) + const max = userConfig?.max ?? lakebaseDefaults.max; + const idleTimeoutMillis = + userConfig?.idleTimeoutMillis ?? lakebaseDefaults.idleTimeoutMillis; + const connectionTimeoutMillis = + userConfig?.connectionTimeoutMillis ?? + lakebaseDefaults.connectionTimeoutMillis; + + return { + endpoint, + host, + database, + port, + sslMode, + ssl: userConfig?.ssl, + max, + idleTimeoutMillis, + connectionTimeoutMillis, + }; +} + +/** Validate and return the SSL mode, or undefined when not set */ +function validateSslMode(value: string | undefined): SslMode | undefined { + if (value === undefined) { + return undefined; + } + + if (!(VALID_SSL_MODES as readonly string[]).includes(value)) { + throw ValidationError.invalidValue( + "sslMode (PGSSLMODE)", + value, + `one of: ${VALID_SSL_MODES.join(", ")}`, + ); + } + + return value as SslMode; +} + +/** Get workspace client from config or execution context */ +export async function getWorkspaceClient( + config: Partial, +): Promise { + // Priority 1: Explicit workspaceClient in config + if (config.workspaceClient) { + return config.workspaceClient; + } + + // Priority 2: ServiceContext (when running in AppKit plugin) + try { + const { getWorkspaceClient: getClient } = await import("../../context"); + return getClient(); + } catch (_error) { + // ServiceContext not available - fall through to environment variables + } + + // Priority 3: Create with SDK default auth chain + // Use empty config to let SDK use .databrickscfg, DATABRICKS_HOST, DATABRICKS_TOKEN, etc. + // NOTE: config.host is the PostgreSQL host (PGHOST), not the Databricks workspace host + return new WorkspaceClient({}); +} + +/** Get username synchronously from config or environment */ +export function getUsernameSync(config: Partial): string { + // Priority 1: Explicit user in config + if (config.user) { + return config.user; + } + + // Priority 2: PGUSER environment variable + const pgUser = process.env.PGUSER; + if (pgUser) { + return pgUser; + } + + // Priority 3: DATABRICKS_CLIENT_ID (service principal) + const clientId = process.env.DATABRICKS_CLIENT_ID; + if (clientId) { + return clientId; + } + + throw ConfigurationError.missingEnvVar( + "PGUSER, DATABRICKS_CLIENT_ID, or config.user", + ); +} diff --git a/packages/appkit/src/connectors/lakebase/defaults.ts b/packages/appkit/src/connectors/lakebase/defaults.ts new file mode 100644 index 00000000..3153ce9d --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/defaults.ts @@ -0,0 +1,8 @@ +/** Default configuration values for the Lakebase connector */ +export const lakebaseDefaults = { + port: 5432, + sslMode: "require" as const, + max: 10, + idleTimeoutMillis: 30_000, + connectionTimeoutMillis: 10_000, +}; diff --git a/packages/appkit/src/connectors/lakebase/index.ts b/packages/appkit/src/connectors/lakebase/index.ts new file mode 100644 index 00000000..f0f060db --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/index.ts @@ -0,0 +1,16 @@ +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + RequestedClaims, + RequestedClaimsPermissionSet, + RequestedResource, +} from "./auth-types"; +export { getWorkspaceClient } from "./config"; +export { createLakebasePool } from "./pool"; +export { + getLakebaseOrmConfig, + getLakebasePgConfig, + getLakebasePoolConfig, +} from "./pool-config"; +export type { LakebasePoolConfig } from "./types"; +export { generateDatabaseCredential } from "./utils"; diff --git a/packages/appkit/src/connectors/lakebase/pool-config.ts b/packages/appkit/src/connectors/lakebase/pool-config.ts new file mode 100644 index 00000000..1b26ffb0 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/pool-config.ts @@ -0,0 +1,115 @@ +import type pg from "pg"; +import { getUsernameSync, parsePoolConfig } from "./config"; +import { type DriverTelemetry, initTelemetry } from "./telemetry"; +import { createTokenRefreshCallback } from "./token-refresh"; +import type { LakebasePoolConfig } from "./types"; +import { mapSslConfig } from "./utils"; + +/** + * Get Lakebase connection configuration for PostgreSQL clients. + * + * Returns pg.PoolConfig with OAuth token authentication configured. + * Best used with pg.Pool directly or ORMs that accept pg.Pool instances (like Drizzle). + * + * For ORMs that need connection parameters (TypeORM, Sequelize), use getLakebaseOrmConfig() instead. + * + * Used internally by createLakebasePool(). + * + * @param config - Optional configuration (reads from environment if not provided) + * @param telemetry - Optional pre-initialized telemetry (created internally if not provided) + * @returns PostgreSQL pool configuration with OAuth token refresh + */ +export function getLakebasePgConfig( + config?: Partial, + telemetry?: DriverTelemetry, +): pg.PoolConfig { + const userConfig = config ?? {}; + const poolConfig = parsePoolConfig(userConfig); + const username = getUsernameSync(userConfig); + + let passwordConfig: string | (() => string | Promise) | undefined; + + if (userConfig.password !== undefined) { + passwordConfig = userConfig.password; + } else if (poolConfig.endpoint) { + // endpoint is guaranteed here -- parsePoolConfig() throws if + // neither endpoint nor password is provided + passwordConfig = createTokenRefreshCallback({ + userConfig, + endpoint: poolConfig.endpoint, + telemetry: telemetry ?? initTelemetry(userConfig), + }); + } + + return { + host: poolConfig.host, + port: poolConfig.port, + user: username, + database: poolConfig.database, + password: passwordConfig, + ssl: poolConfig.ssl ?? mapSslConfig(poolConfig.sslMode), + max: poolConfig.max, + idleTimeoutMillis: poolConfig.idleTimeoutMillis, + connectionTimeoutMillis: poolConfig.connectionTimeoutMillis, + }; +} + +/** + * Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. + * + * Designed for ORMs like TypeORM and Sequelize that need connection parameters + * rather than a pre-configured pool instance. + * + * Returns connection config with field names compatible with common ORMs: + * - `username` instead of `user` + * - Simplified SSL config + * - Password callback support for OAuth token refresh + * + * @param config - Optional configuration (reads from environment if not provided) + * @returns ORM-compatible connection configuration + * + * @example + * ```typescript + * // TypeORM + * const dataSource = new DataSource({ + * type: 'postgres', + * ...getLakebaseOrmConfig(), + * entities: [User], + * synchronize: true, + * }); + * + * // Sequelize + * const sequelize = new Sequelize({ + * dialect: 'postgres', + * ...getLakebaseOrmConfig(), + * logging: false, + * }); + * ``` + */ +export function getLakebaseOrmConfig(config?: Partial) { + const { user, password, ssl, ...pgConfig } = getLakebasePgConfig(config); + + return { + ...pgConfig, + username: user, + password: password as + | string + | (() => string) + | (() => Promise) + | undefined, + ssl: ssl + ? typeof ssl === "boolean" + ? ssl + : { rejectUnauthorized: ssl.rejectUnauthorized } + : false, + }; +} + +/** + * @deprecated Use getLakebasePgConfig() instead. This function will be removed in a future version. + */ +export function getLakebasePoolConfig( + config?: Partial, +): pg.PoolConfig { + return getLakebasePgConfig(config); +} diff --git a/packages/appkit/src/connectors/lakebase/pool.ts b/packages/appkit/src/connectors/lakebase/pool.ts new file mode 100644 index 00000000..811ed443 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/pool.ts @@ -0,0 +1,113 @@ +import pg from "pg"; +import { createLogger } from "../../logging/logger"; +import { getLakebasePgConfig } from "./pool-config"; +import { attachPoolMetrics, initTelemetry } from "./telemetry"; +import type { LakebasePoolConfig } from "./types"; + +const logger = createLogger("connectors:lakebase:pool"); + +/** + * Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. + * + * This function returns a standard `pg.Pool` instance configured with a password callback + * that automatically fetches and caches OAuth tokens from Databricks. The returned pool + * works with any ORM or library that accepts a `pg.Pool` (Drizzle, Prisma, TypeORM, etc.). + * + * @param config - Configuration options (optional, reads from environment if not provided) + * @returns Standard pg.Pool instance with OAuth token refresh + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + * + * @example Using environment variables + * ```typescript + * // Set: PGHOST, PGDATABASE, LAKEBASE_ENDPOINT + * const pool = createLakebasePool(); + * const result = await pool.query('SELECT * FROM users'); + * ``` + * + * @example With explicit configuration + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * // Note: Use actual IDs from Databricks (project-id is a UUID) + * const pool = createLakebasePool({ + * endpoint: 'projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0', + * host: 'ep-abc.databricks.com', + * database: 'databricks_postgres', + * user: 'service-principal-id' + * }); + * ``` + * + * @example With Drizzle ORM + * ```typescript + * import { drizzle } from 'drizzle-orm/node-postgres'; + * const pool = createLakebasePool(); + * const db = drizzle({ client: pool }); + * ``` + * + * @example With Prisma + * ```typescript + * import { PrismaPg } from '@prisma/adapter-pg'; + * const pool = createLakebasePool(); + * const adapter = new PrismaPg(pool); + * const prisma = new PrismaClient({ adapter }); + * ``` + */ +export function createLakebasePool( + config?: Partial, +): pg.Pool { + const userConfig = config ?? {}; + + // Initialize telemetry once and thread it through to avoid duplicate instruments + const telemetry = initTelemetry(userConfig); + + // Get complete pool config (connection + pool settings) + const poolConfig = getLakebasePgConfig(userConfig, telemetry); + + // Create standard pg.Pool with the config + const pool = new pg.Pool(poolConfig); + + // Attach pool-level telemetry metrics (gauges, error counter, and error logging) + attachPoolMetrics(pool, telemetry); + + // Wrap pool.query to track query duration. + // pg.Pool.query has 15+ overloads that are difficult to type-preserve, + // so we use a loosely-typed wrapper and cast back. + const origQuery = pool.query.bind(pool); + pool.query = function queryWithMetrics( + ...args: unknown[] + ): ReturnType { + const start = Date.now(); + const firstArg = args[0]; + const sql = + typeof firstArg === "string" + ? firstArg + : (firstArg as { text?: string } | undefined)?.text; + const attrs = { + "db.statement": sql ? sql.substring(0, 100) : "unknown", + }; + + const result = ( + origQuery as (...a: unknown[]) => Promise | undefined + )(...args); + + // Promise-based query: record duration on completion + if (result && typeof result.finally === "function") { + return result.finally(() => { + telemetry.queryDuration.record(Date.now() - start, attrs); + }) as unknown as ReturnType; + } + + // Callback-based query (void return): duration is approximate + telemetry.queryDuration.record(Date.now() - start, attrs); + return result as ReturnType; + } as typeof pool.query; + + logger.info( + "Created Lakebase connection pool for %s@%s/%s", + poolConfig.user, + poolConfig.host, + poolConfig.database, + ); + + return pool; +} diff --git a/packages/appkit/src/connectors/lakebase/telemetry.ts b/packages/appkit/src/connectors/lakebase/telemetry.ts new file mode 100644 index 00000000..c6c685a9 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/telemetry.ts @@ -0,0 +1,91 @@ +import type pg from "pg"; +import { + type Counter, + type Histogram, + TelemetryManager, + type TelemetryProvider, +} from "@/telemetry"; +import { createLogger } from "../../logging/logger"; +import type { LakebasePoolConfig } from "./types"; + +const logger = createLogger("connectors:lakebase:pool"); + +/** Telemetry instruments shared across the driver */ +export interface DriverTelemetry { + provider: TelemetryProvider; + tokenRefreshDuration: Histogram; + queryDuration: Histogram; + poolErrors: Counter; +} + +/** Create telemetry provider and metric instruments */ +export function initTelemetry( + config: Partial, +): DriverTelemetry { + const provider = TelemetryManager.getProvider( + "connectors:lakebase", + config.telemetry, + ); + const meter = provider.getMeter(); + + return { + provider, + tokenRefreshDuration: meter.createHistogram( + "lakebase.token.refresh.duration", + { + description: "Duration of OAuth token refresh operations", + unit: "ms", + }, + ), + queryDuration: meter.createHistogram("lakebase.query.duration", { + description: "Duration of queries executed via pool.query", + unit: "ms", + }), + poolErrors: meter.createCounter("lakebase.pool.errors", { + description: "Connection pool errors by error code", + unit: "1", + }), + }; +} + +/** + * Attach pool-level metrics collection, error counting, and error logging. + * + * Uses observable gauges (pull model) for pool connection stats -- the OTEL SDK + * reads pool counts at collection time, requiring no timers or cleanup. + */ +export function attachPoolMetrics( + pool: pg.Pool, + telemetry: DriverTelemetry, +): void { + const meter = telemetry.provider.getMeter(); + + const poolTotal = meter.createObservableGauge( + "lakebase.pool.connections.total", + { description: "Total connections in the pool" }, + ); + const poolIdle = meter.createObservableGauge( + "lakebase.pool.connections.idle", + { description: "Idle connections in the pool" }, + ); + const poolWaiting = meter.createObservableGauge( + "lakebase.pool.connections.waiting", + { description: "Clients waiting for a connection" }, + ); + + poolTotal.addCallback((result) => result.observe(pool.totalCount)); + poolIdle.addCallback((result) => result.observe(pool.idleCount)); + poolWaiting.addCallback((result) => result.observe(pool.waitingCount)); + + // Single error handler for both logging and metrics + pool.on("error", (error: Error & { code?: string }) => { + logger.error( + "Connection pool error: %s (code: %s)", + error.message, + error.code, + ); + telemetry.poolErrors.add(1, { + "error.code": error.code ?? "unknown", + }); + }); +} diff --git a/packages/appkit/src/connectors/lakebase/token-refresh.ts b/packages/appkit/src/connectors/lakebase/token-refresh.ts new file mode 100644 index 00000000..ff4e6d5a --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/token-refresh.ts @@ -0,0 +1,127 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import { SpanStatusCode } from "@/telemetry"; +import { createLogger } from "../../logging/logger"; +import { getWorkspaceClient } from "./config"; +import type { DriverTelemetry } from "./telemetry"; +import type { LakebasePoolConfig } from "./types"; +import { generateDatabaseCredential } from "./utils"; + +const logger = createLogger("connectors:lakebase:token"); + +// 2-minute buffer before token expiration to prevent race conditions +// Lakebase tokens expire after 1 hour, so we refresh when ~58 minutes remain +const CACHE_BUFFER_MS = 2 * 60 * 1000; + +export interface TokenRefreshDeps { + userConfig: Partial; + endpoint: string; + telemetry: DriverTelemetry; +} + +/** Fetch a fresh OAuth token from Databricks */ +async function refreshToken( + workspaceClient: WorkspaceClient, + endpoint: string, +): Promise<{ token: string; expiresAt: number }> { + const credential = await generateDatabaseCredential(workspaceClient, { + endpoint, + }); + + return { + token: credential.token, + expiresAt: new Date(credential.expire_time).getTime(), + }; +} + +/** + * Build the password callback with token caching, deduplication, and telemetry. + * + * The returned async function is called by `pg.Pool` each time a new connection + * is established. It caches OAuth tokens and deduplicates concurrent refresh + * requests so only one API call is made even under parallel connection creation. + */ +export function createTokenRefreshCallback( + deps: TokenRefreshDeps, +): () => Promise { + let cachedToken: string | undefined; + let tokenExpiresAt = 0; + let workspaceClient: WorkspaceClient | null = null; + let refreshPromise: Promise | null = null; + + return async (): Promise => { + // Lazily initialize workspace client on first password fetch + if (!workspaceClient) { + try { + workspaceClient = await getWorkspaceClient(deps.userConfig); + } catch (error) { + logger.error("Failed to initialize workspace client: %O", error); + throw error; + } + } + + const now = Date.now(); + const hasValidToken = cachedToken && now < tokenExpiresAt - CACHE_BUFFER_MS; + if (hasValidToken) { + // Return cached token if still valid (with buffer) + const expiresIn = Math.round((tokenExpiresAt - now) / 1000 / 60); + logger.debug( + "Using cached OAuth token (expires in %d minutes at %s)", + expiresIn, + new Date(tokenExpiresAt).toISOString(), + ); + return cachedToken as string; + } + + const client = workspaceClient; + + // Deduplicate concurrent refresh requests + if (!refreshPromise) { + refreshPromise = (async () => { + const startTime = Date.now(); + try { + const result = await deps.telemetry.provider.startActiveSpan( + "lakebase.token.refresh", + { + attributes: { "lakebase.endpoint": deps.endpoint }, + }, + async (span) => { + const tokenResult = await refreshToken(client, deps.endpoint); + span.setAttribute( + "lakebase.token.expires_at", + new Date(tokenResult.expiresAt).toISOString(), + ); + span.setStatus({ code: SpanStatusCode.OK }); + span.end(); + return tokenResult; + }, + ); + + cachedToken = result.token; + tokenExpiresAt = result.expiresAt; + + const duration = Date.now() - startTime; + const expiresAt = new Date(result.expiresAt).toISOString(); + logger.info( + "OAuth token refreshed successfully in %dms (expires at %s)", + duration, + expiresAt, + ); + + return cachedToken; + } catch (error) { + logger.error("Failed to fetch OAuth token: %O", { + error, + message: error instanceof Error ? error.message : String(error), + endpoint: deps.endpoint, + }); + throw error; + } finally { + deps.telemetry.tokenRefreshDuration.record(Date.now() - startTime); + refreshPromise = null; + } + })(); + } + + return refreshPromise; + }; +} diff --git a/packages/appkit/src/connectors/lakebase/types.ts b/packages/appkit/src/connectors/lakebase/types.ts new file mode 100644 index 00000000..1c9875d6 --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/types.ts @@ -0,0 +1,58 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import type { PoolConfig } from "pg"; +import type { TelemetryOptions } from "shared"; + +/** + * Configuration for creating a Lakebase connection pool + * + * Supports two authentication methods: + * 1. OAuth token authentication - Provide workspaceClient + endpoint (automatic token rotation) + * 2. Native Postgres password authentication - Provide password string or function + * + * Extends pg.PoolConfig to support all standard PostgreSQL pool options. + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + */ +export interface LakebasePoolConfig extends PoolConfig { + /** + * Databricks workspace client for OAuth authentication + * If not provided along with endpoint, will attempt to use ServiceContext + * + * Note: If password is provided, OAuth auth is not used + */ + workspaceClient?: WorkspaceClient; + + /** + * Endpoint resource path for OAuth token generation. + * + * All segments are IDs assigned by Databricks (not names you create): + * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) + * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) + * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + * + * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + * + * Required for OAuth authentication (unless password is provided) + * Can also be set via LAKEBASE_ENDPOINT environment variable + * + * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + */ + endpoint?: string; + + /** + * SSL mode for the connection (convenience helper) + * Can also be set via PGSSLMODE environment variable + * + * @default "require" + */ + sslMode?: "require" | "disable" | "prefer"; + + /** + * Telemetry configuration + * + * - `true` or omitted: enable all telemetry (traces, metrics, logs) -- no-op when OTEL is not configured + * - `false`: disable all telemetry + * - `{ traces?, metrics?, logs? }`: fine-grained control + */ + telemetry?: TelemetryOptions; +} diff --git a/packages/appkit/src/connectors/lakebase/utils.ts b/packages/appkit/src/connectors/lakebase/utils.ts new file mode 100644 index 00000000..4de4a34a --- /dev/null +++ b/packages/appkit/src/connectors/lakebase/utils.ts @@ -0,0 +1,147 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import type pg from "pg"; +import { ValidationError } from "../../errors"; +import { createLogger } from "../../logging/logger"; +import type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, +} from "./auth-types"; + +const logger = createLogger("connectors:lakebase:utils"); + +/** + * Map an SSL mode string to the corresponding `pg` SSL configuration. + * + * - `"require"` -- SSL enabled with certificate verification + * - `"prefer"` -- SSL enabled without certificate verification (try SSL, accept any cert) + * - `"disable"` -- SSL disabled + * + * @param sslMode - The SSL mode to map + * @returns pg-compatible SSL config value + */ +export function mapSslConfig( + sslMode: "require" | "prefer" | "disable", +): pg.PoolConfig["ssl"] { + switch (sslMode) { + case "require": + return { rejectUnauthorized: true }; + case "prefer": + return { rejectUnauthorized: false }; + case "disable": + return false; + } +} + +/** + * Generate OAuth credentials for Postgres database connection using the proper Postgres API. + * + * This generates a time-limited OAuth token (expires after 1 hour) that can be used + * as a password when connecting to Lakebase Postgres databases. + * + * @param workspaceClient - Databricks workspace client for authentication + * @param request - Request parameters including endpoint path and optional UC claims + * @returns Database credentials with OAuth token and expiration time + * + * @see https://docs.databricks.com/aws/en/oltp/projects/authentication + * + * @example + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * // Note: Use actual IDs from Databricks (project-id is a UUID) + * const credential = await generateDatabaseCredential(workspaceClient, { + * endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + * }); + * + * // Use credential.token as password + * const conn = await pg.connect({ + * host: "ep-abc123.database.us-east-1.databricks.com", + * user: "user@example.com", + * password: credential.token + * }); + * ``` + * + * @example With UC table permissions + * ```typescript + * // Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} + * const credential = await generateDatabaseCredential(workspaceClient, { + * endpoint: "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0", + * claims: [{ + * permission_set: RequestedClaimsPermissionSet.READ_ONLY, + * resources: [{ table_name: "catalog.schema.users" }] + * }] + * }); + * ``` + */ +export async function generateDatabaseCredential( + workspaceClient: WorkspaceClient, + request: GenerateDatabaseCredentialRequest, +): Promise { + const apiPath = "/api/2.0/postgres/credentials"; + + // Get workspace ID from execution context or environment + let workspaceId: string | undefined; + try { + const { getWorkspaceId } = await import("../../context"); + workspaceId = await getWorkspaceId(); + } catch { + workspaceId = process.env.DATABRICKS_WORKSPACE_ID; + } + + try { + const headers = new Headers({ + Accept: "application/json", + "Content-Type": "application/json", + }); + + // Manually add X-Databricks-Org-Id header if workspace ID is available + // The SDK's automatic header addition doesn't work because config.workspaceId isn't set + if (workspaceId) { + headers.set("X-Databricks-Org-Id", workspaceId); + } + + const response = await workspaceClient.apiClient.request({ + path: apiPath, + method: "POST", + headers, + raw: false, + payload: request, + }); + + return validateCredentialResponse(response); + } catch (error) { + logger.error("Failed to generate database credential: %O", { + error, + message: error instanceof Error ? error.message : String(error), + endpoint: request.endpoint, + }); + throw error; + } +} + +/** Validate the API response has the expected shape */ +function validateCredentialResponse(response: unknown): DatabaseCredential { + if ( + typeof response !== "object" || + response === null || + !("token" in response) || + !("expire_time" in response) + ) { + throw ValidationError.invalidValue( + "credential response", + response, + "an object with { token, expire_time }", + ); + } + + const { token, expire_time } = response as Record; + + if (typeof token !== "string" || typeof expire_time !== "string") { + throw ValidationError.invalidValue( + "credential response fields", + { tokenType: typeof token, expireTimeType: typeof expire_time }, + "token and expire_time to be strings", + ); + } + + return { token, expire_time }; +} diff --git a/packages/appkit/src/connectors/tests/lakebase-auth.test.ts b/packages/appkit/src/connectors/tests/lakebase-auth.test.ts new file mode 100644 index 00000000..edc3f766 --- /dev/null +++ b/packages/appkit/src/connectors/tests/lakebase-auth.test.ts @@ -0,0 +1,187 @@ +import type { WorkspaceClient } from "@databricks/sdk-experimental"; +import { ApiClient, Config } from "@databricks/sdk-experimental"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + type DatabaseCredential, + RequestedClaimsPermissionSet, +} from "../lakebase/auth-types"; +import { generateDatabaseCredential } from "../lakebase/utils"; + +// Mock the @databricks/sdk-experimental module +vi.mock("@databricks/sdk-experimental", () => { + const mockRequest = vi.fn(); + + return { + Config: vi.fn(), + ApiClient: vi.fn().mockImplementation(() => ({ + request: mockRequest, + })), + }; +}); + +describe("Lakebase Authentication", () => { + let mockWorkspaceClient: WorkspaceClient; + let mockApiClient: ApiClient; + + beforeEach(() => { + vi.clearAllMocks(); + + // Get the mocked ApiClient constructor + const ApiClientConstructor = ApiClient as unknown as ReturnType< + typeof vi.fn + >; + mockApiClient = new ApiClientConstructor( + new Config({ host: "https://test.databricks.com" }), + ); + + // Setup mock workspace client with apiClient + mockWorkspaceClient = { + config: { + host: "https://test.databricks.com", + }, + apiClient: mockApiClient, + } as WorkspaceClient; + }); + + describe("generateDatabaseCredential", () => { + it("should generate database credentials with proper endpoint format", async () => { + const mockCredential: DatabaseCredential = { + token: "mock-oauth-token-abc123", + expire_time: "2026-02-06T18:00:00Z", + }; + + // Setup mock response + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + const credential = await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }); + + // Verify API call + expect(mockApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }, + }); + + // Verify response + expect(credential).toEqual(mockCredential); + expect(credential.token).toBe("mock-oauth-token-abc123"); + expect(credential.expire_time).toBe("2026-02-06T18:00:00Z"); + }); + + it("should include claims when provided", async () => { + const mockCredential: DatabaseCredential = { + token: "mock-oauth-token-with-claims", + expire_time: "2026-02-06T18:00:00Z", + }; + + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + claims: [ + { + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [ + { table_name: "catalog.schema.users" }, + { table_name: "catalog.schema.orders" }, + ], + }, + ], + }); + + // Verify claims are included in payload + expect(mockApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test-project/branches/main/endpoints/primary", + claims: [ + { + permission_set: RequestedClaimsPermissionSet.READ_ONLY, + resources: [ + { table_name: "catalog.schema.users" }, + { table_name: "catalog.schema.orders" }, + ], + }, + ], + }, + }); + }); + + it("should handle token expiration time parsing", async () => { + const futureTime = new Date(Date.now() + 60 * 60 * 1000).toISOString(); // 1 hour from now + const mockCredential: DatabaseCredential = { + token: "mock-token", + expire_time: futureTime, + }; + + vi.mocked(mockApiClient.request).mockResolvedValue(mockCredential); + + const credential = await generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/test-project/branches/main/endpoints/primary", + }); + + // Verify expiration time is in the future + const expiresAt = new Date(credential.expire_time).getTime(); + expect(expiresAt).toBeGreaterThan(Date.now()); + }); + + it("should handle API errors gracefully", async () => { + const mockError = new Error("API request failed"); + vi.mocked(mockApiClient.request).mockRejectedValue(mockError); + + await expect( + generateDatabaseCredential(mockWorkspaceClient, { + endpoint: "projects/invalid/branches/main/endpoints/primary", + }), + ).rejects.toThrow("API request failed"); + }); + + it("should use correct workspace host for API calls", async () => { + const customHost = "https://custom-workspace.databricks.com"; + + // Create a new mock API client for the custom workspace + const ApiClientConstructor = ApiClient as unknown as ReturnType< + typeof vi.fn + >; + const customApiClient = new ApiClientConstructor( + new Config({ host: customHost }), + ); + + const customWorkspaceClient = { + config: { host: customHost }, + apiClient: customApiClient, + } as WorkspaceClient; + + const mockCredential: DatabaseCredential = { + token: "mock-token", + expire_time: "2026-02-06T18:00:00Z", + }; + + vi.mocked(customApiClient.request).mockResolvedValue(mockCredential); + + await generateDatabaseCredential(customWorkspaceClient, { + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + // Verify the request was made with the correct workspace client + expect(customApiClient.request).toHaveBeenCalledWith({ + path: "/api/2.0/postgres/credentials", + method: "POST", + headers: expect.any(Headers), + raw: false, + payload: { + endpoint: "projects/test/branches/main/endpoints/primary", + }, + }); + }); + }); +}); diff --git a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts new file mode 100644 index 00000000..66c92111 --- /dev/null +++ b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts @@ -0,0 +1,712 @@ +import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; +import { createLakebasePool } from "../lakebase"; + +// ── Mocks ──────────────────────────────────────────────────────────── + +// Mock pg module +vi.mock("pg", () => { + const mockQuery = vi.fn(); + const mockConnect = vi.fn(); + const mockEnd = vi.fn().mockResolvedValue(undefined); + const mockOn = vi.fn(); + + const MockPool = vi.fn((config) => ({ + query: mockQuery, + connect: mockConnect, + end: mockEnd, + on: mockOn, + options: config, // Store config for inspection + totalCount: 3, + idleCount: 1, + waitingCount: 0, + })); + + return { + default: { Pool: MockPool }, + Pool: MockPool, + __mockQuery: mockQuery, + __mockConnect: mockConnect, + __mockEnd: mockEnd, + __mockOn: mockOn, + __MockPool: MockPool, + }; +}); + +// Mock generateDatabaseCredential +vi.mock("../lakebase/utils", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + generateDatabaseCredential: vi.fn(), + }; +}); + +// Mock telemetry - create spies for all metric instruments +const mockSpanEnd = vi.fn(); +const mockSpanSetAttribute = vi.fn(); +const mockSpanSetStatus = vi.fn(); +const mockCounterAdd = vi.fn(); +const mockHistogramRecord = vi.fn(); +const mockAddCallback = vi.fn(); + +vi.mock("@/telemetry", () => ({ + SpanStatusCode: { OK: 1, ERROR: 2 }, + TelemetryManager: { + getProvider: vi.fn(() => ({ + getMeter: vi.fn(() => ({ + createCounter: vi.fn(() => ({ add: mockCounterAdd })), + createHistogram: vi.fn(() => ({ record: mockHistogramRecord })), + createObservableGauge: vi.fn(() => ({ + addCallback: mockAddCallback, + })), + })), + startActiveSpan: vi.fn( + async ( + _name: string, + _opts: unknown, + fn: (span: unknown) => Promise, + ) => { + const span = { + setAttribute: mockSpanSetAttribute, + setStatus: mockSpanSetStatus, + end: mockSpanEnd, + recordException: vi.fn(), + }; + return fn(span); + }, + ), + })), + }, +})); + +// ── Test suite ─────────────────────────────────────────────────────── + +describe("createLakebasePool", () => { + let mockGenerateCredential: ReturnType; + + // Save original env vars to restore after each test + const originalEnv: Record = {}; + const envKeysUsed = [ + "PGHOST", + "PGDATABASE", + "LAKEBASE_ENDPOINT", + "PGUSER", + "PGPORT", + "PGSSLMODE", + "DATABRICKS_CLIENT_ID", + ]; + + beforeEach(async () => { + vi.clearAllMocks(); + + // Save original env vars + for (const key of envKeysUsed) { + originalEnv[key] = process.env[key]; + } + + // Setup environment variables + process.env.PGHOST = "ep-test.database.us-east-1.databricks.com"; + process.env.PGDATABASE = "databricks_postgres"; + process.env.LAKEBASE_ENDPOINT = + "projects/test-project/branches/main/endpoints/primary"; + process.env.PGUSER = "test-user@example.com"; + + // Setup mock for generateDatabaseCredential + const utils = await import("../lakebase/utils"); + mockGenerateCredential = utils.generateDatabaseCredential as any; + mockGenerateCredential.mockResolvedValue({ + token: "test-oauth-token-12345", + expire_time: new Date(Date.now() + 3600000).toISOString(), // 1 hour from now + }); + }); + + afterEach(() => { + // Restore original env vars + for (const key of envKeysUsed) { + if (originalEnv[key] === undefined) { + delete process.env[key]; + } else { + process.env[key] = originalEnv[key]; + } + } + }); + + describe("configuration", () => { + test("should create pool with environment variables", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool).toBeDefined(); + expect(pool.options.host).toBe( + "ep-test.database.us-east-1.databricks.com", + ); + expect(pool.options.database).toBe("databricks_postgres"); + expect(pool.options.user).toBe("test-user@example.com"); + expect(pool.options.port).toBe(5432); + }); + + test("should create pool with explicit configuration", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + endpoint: "projects/my-project/branches/dev/endpoints/test", + host: "ep-custom.database.us-west-2.databricks.com", + database: "custom_db", + user: "custom-user@example.com", // Explicit user overrides env + port: 5433, + max: 20, + }); + + expect(pool.options.host).toBe( + "ep-custom.database.us-west-2.databricks.com", + ); + expect(pool.options.database).toBe("custom_db"); + expect(pool.options.user).toBe("custom-user@example.com"); + expect(pool.options.port).toBe(5433); + expect(pool.options.max).toBe(20); + }); + + test("should throw error when endpoint is missing", () => { + delete process.env.LAKEBASE_ENDPOINT; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("LAKEBASE_ENDPOINT or config.endpoint"); + }); + + test("should throw error when host is missing", () => { + delete process.env.PGHOST; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGHOST or config.host"); + }); + + test("should throw error when database is missing", () => { + delete process.env.PGDATABASE; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGDATABASE or config.database"); + }); + + test("should throw error when user is missing", () => { + delete process.env.PGUSER; + delete process.env.DATABRICKS_CLIENT_ID; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("PGUSER, DATABRICKS_CLIENT_ID, or config.user"); + }); + + test("should use DATABRICKS_CLIENT_ID as fallback for user", () => { + delete process.env.PGUSER; + process.env.DATABRICKS_CLIENT_ID = "service-principal-123"; + + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool.options.user).toBe("service-principal-123"); + }); + + test("should use default values for optional config", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(pool.options.port).toBe(5432); + expect(pool.options.max).toBe(10); + expect(pool.options.idleTimeoutMillis).toBe(30000); + expect(pool.options.connectionTimeoutMillis).toBe(10000); + }); + + test("should configure SSL based on sslMode", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + sslMode: "require", + }); + + expect(pool.options.ssl).toEqual({ rejectUnauthorized: true }); + }); + + test("should allow custom SSL configuration", () => { + const customSSL = { rejectUnauthorized: false, ca: "custom-ca" }; + const pool = createLakebasePool({ + workspaceClient: {} as any, + ssl: customSSL, + }); + + expect(pool.options.ssl).toEqual(customSSL); + }); + + test("should throw on invalid PGSSLMODE", () => { + process.env.PGSSLMODE = "verify-full"; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).toThrow("one of: require, disable, prefer"); + }); + + test("should accept valid PGSSLMODE values", () => { + for (const mode of ["require", "disable", "prefer"]) { + process.env.PGSSLMODE = mode; + + expect(() => + createLakebasePool({ + workspaceClient: {} as any, + }), + ).not.toThrow(); + } + }); + }); + + describe("password callback", () => { + test("should configure password as async function", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(typeof pool.options.password).toBe("function"); + }); + + test("should fetch OAuth token when password callback is invoked", async () => { + const workspaceClient = { + test: "client", + config: { host: "test" }, + } as any; + const pool = createLakebasePool({ + workspaceClient, + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + // Invoke the password callback + const passwordFn = pool.options.password as () => Promise; + const password = await passwordFn(); + + expect(mockGenerateCredential).toHaveBeenCalledWith(workspaceClient, { + endpoint: "projects/test/branches/main/endpoints/primary", + }); + expect(password).toBe("test-oauth-token-12345"); + }); + + test("should cache OAuth token for subsequent calls", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // First call - should fetch token + const password1 = await passwordFn(); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + + // Second call - should use cached token + const password2 = await passwordFn(); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); // Still 1 + expect(password2).toBe(password1); + }); + + test("should refresh token when it expires", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + // First token expires in 1 minute (within buffer) + mockGenerateCredential.mockResolvedValueOnce({ + token: "expiring-token", + expire_time: new Date(Date.now() + 60000).toISOString(), + }); + + // Second token expires in 1 hour + mockGenerateCredential.mockResolvedValueOnce({ + token: "new-token", + expire_time: new Date(Date.now() + 3600000).toISOString(), + }); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // First call - get expiring token + const password1 = await passwordFn(); + expect(password1).toBe("expiring-token"); + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + + // Second call - token is expiring, should refresh + const password2 = await passwordFn(); + expect(password2).toBe("new-token"); + expect(mockGenerateCredential).toHaveBeenCalledTimes(2); + }); + + test("should handle token fetch errors", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + mockGenerateCredential.mockRejectedValue(new Error("Token fetch failed")); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + await expect(passwordFn()).rejects.toThrow("Token fetch failed"); + }); + + test("should deduplicate concurrent token refresh requests", async () => { + const workspaceClient = { config: { host: "test" } } as any; + + // Make the credential generation slow + mockGenerateCredential.mockImplementation( + () => + new Promise((resolve) => + setTimeout( + () => + resolve({ + token: "deduped-token", + expire_time: new Date(Date.now() + 3600000).toISOString(), + }), + 50, + ), + ), + ); + + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + + // Fire multiple concurrent calls + const [p1, p2, p3] = await Promise.all([ + passwordFn(), + passwordFn(), + passwordFn(), + ]); + + // Only one API call should have been made + expect(mockGenerateCredential).toHaveBeenCalledTimes(1); + expect(p1).toBe("deduped-token"); + expect(p2).toBe("deduped-token"); + expect(p3).toBe("deduped-token"); + }); + }); + + describe("workspace client", () => { + test("should use provided workspace client", () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + expect(pool).toBeDefined(); + }); + + test("should fallback to SDK default auth when workspace client not provided", async () => { + const pool = createLakebasePool({ + // No workspace client provided - should use SDK default auth chain + }); + + // Pool should be created successfully + expect(pool).toBeDefined(); + expect(pool.options.password).toBeDefined(); + expect(typeof pool.options.password).toBe("function"); + }); + }); + + describe("pool behavior", () => { + test("should register error handler", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Pool should have on method for error handling + expect(pool.on).toBeDefined(); + expect(typeof pool.on).toBe("function"); + }); + + test("should return pg.Pool instance", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Standard pg.Pool methods should be available + expect(pool.query).toBeDefined(); + expect(pool.connect).toBeDefined(); + expect(pool.end).toBeDefined(); + expect(typeof pool.query).toBe("function"); + expect(typeof pool.connect).toBe("function"); + expect(typeof pool.end).toBe("function"); + }); + }); + + describe("ORM compatibility patterns", () => { + test("should work with Drizzle pattern", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Drizzle expects { client: pool } + const drizzleConfig = { client: pool }; + expect(drizzleConfig.client).toBe(pool); + expect(typeof drizzleConfig.client.query).toBe("function"); + }); + + test("should work with Prisma adapter pattern", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Prisma expects PrismaPg(pool) + // Mock PrismaPg adapter + const mockPrismaPg = (pgPool: any) => ({ pool: pgPool }); + const adapter = mockPrismaPg(pool); + + expect(adapter.pool).toBe(pool); + }); + + test("should expose standard pg.Pool interface", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Standard pg.Pool methods + expect(pool.query).toBeDefined(); + expect(pool.connect).toBeDefined(); + expect(pool.end).toBeDefined(); + expect(pool.on).toBeDefined(); + + // Options should be accessible + expect(pool.options).toBeDefined(); + expect(pool.options.host).toBeDefined(); + expect(pool.options.database).toBeDefined(); + }); + }); + + describe("native password authentication", () => { + test("should use static password when provided", () => { + const pool = createLakebasePool({ + password: "my-static-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + expect(pool.options.password).toBe("my-static-password"); + }); + + test("should prioritize password over OAuth when both provided", () => { + const pool = createLakebasePool({ + password: "my-password", + workspaceClient: {} as any, + endpoint: "projects/test/branches/main/endpoints/primary", + }); + + expect(pool.options.password).toBe("my-password"); + }); + + test("should support custom password callback function", async () => { + const customCallback = vi.fn(async () => "custom-token"); + + const pool = createLakebasePool({ + password: customCallback, + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + expect(typeof pool.options.password).toBe("function"); + const passwordFn = pool.options.password as () => Promise; + const result = await passwordFn(); + + expect(result).toBe("custom-token"); + expect(customCallback).toHaveBeenCalled(); + }); + + test("should not require endpoint when password is provided", () => { + delete process.env.LAKEBASE_ENDPOINT; + + expect(() => + createLakebasePool({ + password: "my-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }), + ).not.toThrow(); + }); + + test("should not call OAuth token generation when password is provided", async () => { + const pool = createLakebasePool({ + password: "static-password", + host: "ep-test.database.us-east-1.databricks.com", + database: "databricks_postgres", + }); + + // Simulate pg calling the password - should return the string directly + expect(pool.options.password).toBe("static-password"); + + // OAuth credential generation should not have been called + expect(mockGenerateCredential).not.toHaveBeenCalled(); + }); + }); + + describe("telemetry", () => { + test("should initialize telemetry provider", async () => { + const { TelemetryManager } = await import("@/telemetry"); + + createLakebasePool({ + workspaceClient: {} as any, + }); + + expect(TelemetryManager.getProvider).toHaveBeenCalledWith( + "connectors:lakebase", + undefined, + ); + }); + + test("should pass telemetry config to provider", async () => { + const { TelemetryManager } = await import("@/telemetry"); + const telemetryConfig = { traces: true, metrics: false }; + + createLakebasePool({ + workspaceClient: {} as any, + telemetry: telemetryConfig, + }); + + expect(TelemetryManager.getProvider).toHaveBeenCalledWith( + "connectors:lakebase", + telemetryConfig, + ); + }); + + test("should record token refresh duration on successful fetch", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + await passwordFn(); + + // Token refresh duration should be recorded (histogram captures count implicitly) + expect(mockHistogramRecord).toHaveBeenCalledWith(expect.any(Number)); + }); + + test("should set span attributes on token refresh", async () => { + const workspaceClient = { config: { host: "test" } } as any; + const pool = createLakebasePool({ + workspaceClient, + }); + + const passwordFn = pool.options.password as () => Promise; + await passwordFn(); + + // Span should have token expiration attribute + expect(mockSpanSetAttribute).toHaveBeenCalledWith( + "lakebase.token.expires_at", + expect.any(String), + ); + expect(mockSpanSetStatus).toHaveBeenCalledWith({ + code: 1, // SpanStatusCode.OK + }); + expect(mockSpanEnd).toHaveBeenCalled(); + }); + + test("should register observable gauge callbacks for pool metrics", () => { + createLakebasePool({ + workspaceClient: {} as any, + }); + + // Three observable gauges should have callbacks registered + // (total, idle, waiting) + expect(mockAddCallback).toHaveBeenCalledTimes(3); + }); + + test("should observe pool counts via gauge callbacks", () => { + createLakebasePool({ + workspaceClient: {} as any, + }); + + // Get the registered callbacks + const callbacks = mockAddCallback.mock.calls.map( + (call: unknown[]) => call[0], + ); + expect(callbacks).toHaveLength(3); + + // Simulate OTEL collection by invoking each callback + const observeResults: number[] = []; + const mockResult = { + observe: (value: number) => observeResults.push(value), + }; + + for (const cb of callbacks) { + (cb as (result: { observe: (v: number) => void }) => void)(mockResult); + } + + // Pool mock returns totalCount=3, idleCount=1, waitingCount=0 + expect(observeResults).toEqual([3, 1, 0]); + }); + + test("should increment pool error counter with error code on pool error event", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // Find the error handler registered via pool.on("error", ...) + const onMock = pool.on as ReturnType; + const errorHandlers = onMock.mock.calls.filter( + (call: unknown[]) => call[0] === "error", + ); + + // Single consolidated error handler (logging + metrics) + expect(errorHandlers.length).toBe(1); + + // Invoke the error handler with a PG error that has a code + const errorHandler = errorHandlers[0][1]; + const pgError = Object.assign(new Error("auth failed"), { + code: "28P01", + }); + errorHandler(pgError); + + expect(mockCounterAdd).toHaveBeenCalledWith(1, { + "error.code": "28P01", + }); + }); + + test("should use 'unknown' error code when error has no code", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + const onMock = pool.on as ReturnType; + const errorHandlers = onMock.mock.calls.filter( + (call: unknown[]) => call[0] === "error", + ); + + const errorHandler = errorHandlers[0][1]; + errorHandler(new Error("unknown error")); + + expect(mockCounterAdd).toHaveBeenCalledWith(1, { + "error.code": "unknown", + }); + }); + + test("should wrap pool.query to add metrics tracking", () => { + const pool = createLakebasePool({ + workspaceClient: {} as any, + }); + + // pool.query should be our wrapped function + expect(typeof pool.query).toBe("function"); + expect(pool.query.name).toBe("queryWithMetrics"); + }); + }); +}); diff --git a/packages/appkit/src/index.ts b/packages/appkit/src/index.ts index b0745592..2dd1c858 100644 --- a/packages/appkit/src/index.ts +++ b/packages/appkit/src/index.ts @@ -14,6 +14,23 @@ export type { } from "shared"; export { isSQLTypeMarker, sql } from "shared"; export { CacheManager } from "./cache"; +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + LakebasePoolConfig, + RequestedClaims, + RequestedClaimsPermissionSet, + RequestedResource, +} from "./connectors/lakebase"; +// Lakebase Autoscaling connector +export { + createLakebasePool, + generateDatabaseCredential, + getLakebaseOrmConfig, + getLakebasePgConfig, + getLakebasePoolConfig, + getWorkspaceClient, +} from "./connectors/lakebase"; export { getExecutionContext } from "./context"; export { createApp } from "./core"; // Errors diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4b598b89..d82b0d03 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -48,8 +48,8 @@ importers: specifier: ^15.5.1 version: 15.5.2 pg: - specifier: ^8.16.3 - version: 8.16.3 + specifier: ^8.18.0 + version: 8.18.0 plop: specifier: ^4.0.4 version: 4.0.4(@types/node@24.7.2) @@ -237,8 +237,8 @@ importers: packages/appkit: dependencies: '@databricks/sdk-experimental': - specifier: ^0.15.0 - version: 0.15.0 + specifier: ^0.16.0 + version: 0.16.0 '@opentelemetry/api': specifier: ^1.9.0 version: 1.9.0 @@ -294,8 +294,8 @@ importers: specifier: ^2.1.1 version: 2.1.1 pg: - specifier: ^8.16.3 - version: 8.16.3 + specifier: ^8.18.0 + version: 8.18.0 semver: specifier: ^7.7.3 version: 7.7.3 @@ -319,8 +319,8 @@ importers: specifier: ^7.0.15 version: 7.0.15 '@types/pg': - specifier: ^8.15.6 - version: 8.15.6 + specifier: ^8.16.0 + version: 8.16.0 '@types/ws': specifier: ^8.18.1 version: 8.18.1 @@ -1761,8 +1761,8 @@ packages: peerDependencies: postcss: ^8.4 - '@databricks/sdk-experimental@0.15.0': - resolution: {integrity: sha512-HkoMiF7dNDt6WRW0xhi7oPlBJQfxJ9suJhEZRFt08VwLMaWcw2PiF8monfHlkD4lkufEYV6CTxi5njQkciqiHA==} + '@databricks/sdk-experimental@0.16.0': + resolution: {integrity: sha512-9c2RxWYoRDFupdt4ZnBc1IPE1XaXgN+/wyV4DVcEqOnIa31ep51OnwAD/3014BImfKdyXg32nmgrB9dwvB6+lg==} engines: {node: '>=22.0', npm: '>=10.0.0'} '@date-fns/tz@1.4.1': @@ -4588,6 +4588,9 @@ packages: '@types/pg@8.15.6': resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==} + '@types/pg@8.16.0': + resolution: {integrity: sha512-RmhMd/wD+CF8Dfo+cVIy3RR5cl8CyfXQ0tGgW6XBL8L4LM/UTEbNXYRbLwU6w+CgrKBNbrQWt4FUtTfaU5jSYQ==} + '@types/picomatch@4.0.2': resolution: {integrity: sha512-qHHxQ+P9PysNEGbALT8f8YOSHW0KJu6l2xU8DYY0fu/EmGxXdVnuTLvFUvBgPJMSqXq29SYHveejeAha+4AYgA==} @@ -8739,30 +8742,33 @@ packages: perfect-debounce@2.0.0: resolution: {integrity: sha512-fkEH/OBiKrqqI/yIgjR92lMfs2K8105zt/VT6+7eTjNwisrsh47CeIED9z58zI7DfKdH3uHAn25ziRZn3kgAow==} - pg-cloudflare@1.2.7: - resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} + pg-cloudflare@1.3.0: + resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} - pg-connection-string@2.9.1: - resolution: {integrity: sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==} + pg-connection-string@2.11.0: + resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} pg-int8@1.0.1: resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} engines: {node: '>=4.0.0'} - pg-pool@3.10.1: - resolution: {integrity: sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==} + pg-pool@3.11.0: + resolution: {integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w==} peerDependencies: pg: '>=8.0' pg-protocol@1.10.3: resolution: {integrity: sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==} + pg-protocol@1.11.0: + resolution: {integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g==} + pg-types@2.2.0: resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} engines: {node: '>=4'} - pg@8.16.3: - resolution: {integrity: sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==} + pg@8.18.0: + resolution: {integrity: sha512-xqrUDL1b9MbkydY/s+VZ6v+xiMUmOUk7SS9d/1kpyQxoJ6U9AO1oIJyUWVZojbfe5Cc/oluutcgFG4L9RDP1iQ==} engines: {node: '>= 16.0.0'} peerDependencies: pg-native: '>=3.0.1' @@ -12654,7 +12660,7 @@ snapshots: dependencies: postcss: 8.5.6 - '@databricks/sdk-experimental@0.15.0': + '@databricks/sdk-experimental@0.16.0': dependencies: google-auth-library: 10.5.0 ini: 6.0.0 @@ -16249,7 +16255,7 @@ snapshots: '@types/pg-pool@2.0.6': dependencies: - '@types/pg': 8.15.6 + '@types/pg': 8.16.0 '@types/pg@8.15.6': dependencies: @@ -16257,6 +16263,12 @@ snapshots: pg-protocol: 1.10.3 pg-types: 2.2.0 + '@types/pg@8.16.0': + dependencies: + '@types/node': 24.10.1 + pg-protocol: 1.10.3 + pg-types: 2.2.0 + '@types/picomatch@4.0.2': {} '@types/prismjs@1.26.5': {} @@ -21198,19 +21210,21 @@ snapshots: perfect-debounce@2.0.0: {} - pg-cloudflare@1.2.7: + pg-cloudflare@1.3.0: optional: true - pg-connection-string@2.9.1: {} + pg-connection-string@2.11.0: {} pg-int8@1.0.1: {} - pg-pool@3.10.1(pg@8.16.3): + pg-pool@3.11.0(pg@8.18.0): dependencies: - pg: 8.16.3 + pg: 8.18.0 pg-protocol@1.10.3: {} + pg-protocol@1.11.0: {} + pg-types@2.2.0: dependencies: pg-int8: 1.0.1 @@ -21219,15 +21233,15 @@ snapshots: postgres-date: 1.0.7 postgres-interval: 1.2.0 - pg@8.16.3: + pg@8.18.0: dependencies: - pg-connection-string: 2.9.1 - pg-pool: 3.10.1(pg@8.16.3) - pg-protocol: 1.10.3 + pg-connection-string: 2.11.0 + pg-pool: 3.11.0(pg@8.18.0) + pg-protocol: 1.11.0 pg-types: 2.2.0 pgpass: 1.0.5 optionalDependencies: - pg-cloudflare: 1.2.7 + pg-cloudflare: 1.3.0 pgpass@1.0.5: dependencies: From c9654c68e1f1b2aa8357de4b1019bc6ff7de8290 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Wed, 11 Feb 2026 08:59:59 +0100 Subject: [PATCH 02/12] chore: cleanup code --- .../appkit/Function.getLakebasePoolConfig.md | 19 ---- docs/docs/api/appkit/index.md | 1 - docs/docs/api/appkit/typedoc-sidebar.ts | 6 -- .../src/connectors/lakebase/auth-types.ts | 98 ------------------ .../appkit/src/connectors/lakebase/config.ts | 21 ++-- .../lakebase/{utils.ts => credentials.ts} | 28 +----- .../src/connectors/lakebase/defaults.ts | 8 -- .../appkit/src/connectors/lakebase/index.ts | 19 ++-- .../src/connectors/lakebase/pool-config.ts | 30 ++++-- .../src/connectors/lakebase/token-refresh.ts | 2 +- .../appkit/src/connectors/lakebase/types.ts | 99 +++++++++++++++++++ .../connectors/tests/lakebase-auth.test.ts | 4 +- .../connectors/tests/lakebase-pool.test.ts | 7 +- packages/appkit/src/index.ts | 1 - 14 files changed, 151 insertions(+), 192 deletions(-) delete mode 100644 docs/docs/api/appkit/Function.getLakebasePoolConfig.md delete mode 100644 packages/appkit/src/connectors/lakebase/auth-types.ts rename packages/appkit/src/connectors/lakebase/{utils.ts => credentials.ts} (84%) delete mode 100644 packages/appkit/src/connectors/lakebase/defaults.ts diff --git a/docs/docs/api/appkit/Function.getLakebasePoolConfig.md b/docs/docs/api/appkit/Function.getLakebasePoolConfig.md deleted file mode 100644 index c8ee5b7e..00000000 --- a/docs/docs/api/appkit/Function.getLakebasePoolConfig.md +++ /dev/null @@ -1,19 +0,0 @@ -# ~~Function: getLakebasePoolConfig()~~ - -```ts -function getLakebasePoolConfig(config?: Partial): PoolConfig; -``` - -## Parameters - -| Parameter | Type | -| ------ | ------ | -| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | - -## Returns - -`PoolConfig` - -## Deprecated - -Use getLakebasePgConfig() instead. This function will be removed in a future version. diff --git a/docs/docs/api/appkit/index.md b/docs/docs/api/appkit/index.md index 00f90768..c85800b3 100644 --- a/docs/docs/api/appkit/index.md +++ b/docs/docs/api/appkit/index.md @@ -62,6 +62,5 @@ plugin architecture, and React integration. | [getExecutionContext](Function.getExecutionContext.md) | Get the current execution context. | | [getLakebaseOrmConfig](Function.getLakebaseOrmConfig.md) | Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. | | [getLakebasePgConfig](Function.getLakebasePgConfig.md) | Get Lakebase connection configuration for PostgreSQL clients. | -| [~~getLakebasePoolConfig~~](Function.getLakebasePoolConfig.md) | - | | [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or execution context | | [isSQLTypeMarker](Function.isSQLTypeMarker.md) | Type guard to check if a value is a SQL type marker | diff --git a/docs/docs/api/appkit/typedoc-sidebar.ts b/docs/docs/api/appkit/typedoc-sidebar.ts index 27367112..d25c3e9a 100644 --- a/docs/docs/api/appkit/typedoc-sidebar.ts +++ b/docs/docs/api/appkit/typedoc-sidebar.ts @@ -185,12 +185,6 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Function.getLakebasePgConfig", label: "getLakebasePgConfig" }, - { - type: "doc", - id: "api/appkit/Function.getLakebasePoolConfig", - label: "getLakebasePoolConfig", - className: "typedoc-sidebar-item-deprecated" - }, { type: "doc", id: "api/appkit/Function.getWorkspaceClient", diff --git a/packages/appkit/src/connectors/lakebase/auth-types.ts b/packages/appkit/src/connectors/lakebase/auth-types.ts deleted file mode 100644 index 9d9ba2d3..00000000 --- a/packages/appkit/src/connectors/lakebase/auth-types.ts +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Authentication types for Lakebase Postgres OAuth token generation - * @see https://docs.databricks.com/aws/en/oltp/projects/authentication - */ - -/** - * Database credentials with OAuth token for Postgres connection - */ -export interface DatabaseCredential { - /** OAuth token to use as the password when connecting to Postgres */ - token: string; - - /** - * Token expiration time in UTC (ISO 8601 format) - * Tokens expire after 1 hour from generation - * @example "2026-02-06T17:07:00Z" - */ - expire_time: string; -} - -/** - * Permission set for Unity Catalog table access - */ -export enum RequestedClaimsPermissionSet { - /** - * Read-only access to specified UC tables - */ - READ_ONLY = "READ_ONLY", -} - -/** - * Resource to request permissions for in Unity Catalog - */ -export interface RequestedResource { - /** - * Unity Catalog table name to request access to - * @example "catalog.schema.table" - */ - table_name?: string; - - /** - * Generic resource name for non-table resources - */ - unspecified_resource_name?: string; -} - -/** - * Optional claims for fine-grained Unity Catalog table permissions - * When specified, the returned token will be scoped to only the requested tables - */ -export interface RequestedClaims { - /** - * Permission level to request - */ - permission_set?: RequestedClaimsPermissionSet; - - /** - * List of UC resources to request access to - */ - resources?: RequestedResource[]; -} - -/** - * Request parameters for generating database OAuth credentials - */ -export interface GenerateDatabaseCredentialRequest { - /** - * Endpoint resource path with IDs assigned by Databricks. - * - * All segments are IDs from Databricks (not names you create): - * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) - * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) - * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) - * - * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` - * - * **Important:** Copy from Databricks Lakebase UI - do not construct manually. - * - * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" - */ - endpoint: string; - - /** - * Optional claims for fine-grained UC table permissions. - * When specified, the token will only grant access to the specified tables. - * - * @example - * ```typescript - * { - * claims: [{ - * permission_set: RequestedClaimsPermissionSet.READ_ONLY, - * resources: [{ table_name: "catalog.schema.users" }] - * }] - * } - * ``` - */ - claims?: RequestedClaims[]; -} diff --git a/packages/appkit/src/connectors/lakebase/config.ts b/packages/appkit/src/connectors/lakebase/config.ts index 3731b4c1..ed3453c2 100644 --- a/packages/appkit/src/connectors/lakebase/config.ts +++ b/packages/appkit/src/connectors/lakebase/config.ts @@ -1,9 +1,17 @@ import { WorkspaceClient } from "@databricks/sdk-experimental"; import type pg from "pg"; import { ConfigurationError, ValidationError } from "../../errors"; -import { lakebaseDefaults } from "./defaults"; import type { LakebasePoolConfig } from "./types"; +/** Default configuration values for the Lakebase connector */ +const defaults = { + port: 5432, + sslMode: "require" as const, + max: 10, + idleTimeoutMillis: 30_000, + connectionTimeoutMillis: 10_000, +}; + const VALID_SSL_MODES = ["require", "disable", "prefer"] as const; type SslMode = (typeof VALID_SSL_MODES)[number]; @@ -49,7 +57,7 @@ export function parsePoolConfig( const portStr = process.env.PGPORT; const port = userConfig?.port ?? - (portStr ? Number.parseInt(portStr, 10) : lakebaseDefaults.port); + (portStr ? Number.parseInt(portStr, 10) : defaults.port); if (Number.isNaN(port)) { throw ValidationError.invalidValue("port", portStr, "a number"); @@ -58,15 +66,14 @@ export function parsePoolConfig( // Get SSL mode (optional, default from defaults) const rawSslMode = userConfig?.sslMode ?? process.env.PGSSLMODE ?? undefined; - const sslMode = validateSslMode(rawSslMode) ?? lakebaseDefaults.sslMode; + const sslMode = validateSslMode(rawSslMode) ?? defaults.sslMode; // Pool options (with defaults) - const max = userConfig?.max ?? lakebaseDefaults.max; + const max = userConfig?.max ?? defaults.max; const idleTimeoutMillis = - userConfig?.idleTimeoutMillis ?? lakebaseDefaults.idleTimeoutMillis; + userConfig?.idleTimeoutMillis ?? defaults.idleTimeoutMillis; const connectionTimeoutMillis = - userConfig?.connectionTimeoutMillis ?? - lakebaseDefaults.connectionTimeoutMillis; + userConfig?.connectionTimeoutMillis ?? defaults.connectionTimeoutMillis; return { endpoint, diff --git a/packages/appkit/src/connectors/lakebase/utils.ts b/packages/appkit/src/connectors/lakebase/credentials.ts similarity index 84% rename from packages/appkit/src/connectors/lakebase/utils.ts rename to packages/appkit/src/connectors/lakebase/credentials.ts index 4de4a34a..8783d07e 100644 --- a/packages/appkit/src/connectors/lakebase/utils.ts +++ b/packages/appkit/src/connectors/lakebase/credentials.ts @@ -1,36 +1,12 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; -import type pg from "pg"; import { ValidationError } from "../../errors"; import { createLogger } from "../../logging/logger"; import type { DatabaseCredential, GenerateDatabaseCredentialRequest, -} from "./auth-types"; +} from "./types"; -const logger = createLogger("connectors:lakebase:utils"); - -/** - * Map an SSL mode string to the corresponding `pg` SSL configuration. - * - * - `"require"` -- SSL enabled with certificate verification - * - `"prefer"` -- SSL enabled without certificate verification (try SSL, accept any cert) - * - `"disable"` -- SSL disabled - * - * @param sslMode - The SSL mode to map - * @returns pg-compatible SSL config value - */ -export function mapSslConfig( - sslMode: "require" | "prefer" | "disable", -): pg.PoolConfig["ssl"] { - switch (sslMode) { - case "require": - return { rejectUnauthorized: true }; - case "prefer": - return { rejectUnauthorized: false }; - case "disable": - return false; - } -} +const logger = createLogger("connectors:lakebase:credentials"); /** * Generate OAuth credentials for Postgres database connection using the proper Postgres API. diff --git a/packages/appkit/src/connectors/lakebase/defaults.ts b/packages/appkit/src/connectors/lakebase/defaults.ts deleted file mode 100644 index 3153ce9d..00000000 --- a/packages/appkit/src/connectors/lakebase/defaults.ts +++ /dev/null @@ -1,8 +0,0 @@ -/** Default configuration values for the Lakebase connector */ -export const lakebaseDefaults = { - port: 5432, - sslMode: "require" as const, - max: 10, - idleTimeoutMillis: 30_000, - connectionTimeoutMillis: 10_000, -}; diff --git a/packages/appkit/src/connectors/lakebase/index.ts b/packages/appkit/src/connectors/lakebase/index.ts index f0f060db..70da0be8 100644 --- a/packages/appkit/src/connectors/lakebase/index.ts +++ b/packages/appkit/src/connectors/lakebase/index.ts @@ -1,16 +1,15 @@ -export type { - DatabaseCredential, - GenerateDatabaseCredentialRequest, - RequestedClaims, - RequestedClaimsPermissionSet, - RequestedResource, -} from "./auth-types"; export { getWorkspaceClient } from "./config"; +export { generateDatabaseCredential } from "./credentials"; export { createLakebasePool } from "./pool"; export { getLakebaseOrmConfig, getLakebasePgConfig, - getLakebasePoolConfig, } from "./pool-config"; -export type { LakebasePoolConfig } from "./types"; -export { generateDatabaseCredential } from "./utils"; +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + LakebasePoolConfig, + RequestedClaims, + RequestedClaimsPermissionSet, + RequestedResource, +} from "./types"; diff --git a/packages/appkit/src/connectors/lakebase/pool-config.ts b/packages/appkit/src/connectors/lakebase/pool-config.ts index 1b26ffb0..6b9f473f 100644 --- a/packages/appkit/src/connectors/lakebase/pool-config.ts +++ b/packages/appkit/src/connectors/lakebase/pool-config.ts @@ -3,7 +3,26 @@ import { getUsernameSync, parsePoolConfig } from "./config"; import { type DriverTelemetry, initTelemetry } from "./telemetry"; import { createTokenRefreshCallback } from "./token-refresh"; import type { LakebasePoolConfig } from "./types"; -import { mapSslConfig } from "./utils"; + +/** + * Map an SSL mode string to the corresponding `pg` SSL configuration. + * + * - `"require"` -- SSL enabled with certificate verification + * - `"prefer"` -- SSL enabled without certificate verification (try SSL, accept any cert) + * - `"disable"` -- SSL disabled + */ +function mapSslConfig( + sslMode: "require" | "prefer" | "disable", +): pg.PoolConfig["ssl"] { + switch (sslMode) { + case "require": + return { rejectUnauthorized: true }; + case "prefer": + return { rejectUnauthorized: false }; + case "disable": + return false; + } +} /** * Get Lakebase connection configuration for PostgreSQL clients. @@ -104,12 +123,3 @@ export function getLakebaseOrmConfig(config?: Partial) { : false, }; } - -/** - * @deprecated Use getLakebasePgConfig() instead. This function will be removed in a future version. - */ -export function getLakebasePoolConfig( - config?: Partial, -): pg.PoolConfig { - return getLakebasePgConfig(config); -} diff --git a/packages/appkit/src/connectors/lakebase/token-refresh.ts b/packages/appkit/src/connectors/lakebase/token-refresh.ts index ff4e6d5a..b302117a 100644 --- a/packages/appkit/src/connectors/lakebase/token-refresh.ts +++ b/packages/appkit/src/connectors/lakebase/token-refresh.ts @@ -2,9 +2,9 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; import { SpanStatusCode } from "@/telemetry"; import { createLogger } from "../../logging/logger"; import { getWorkspaceClient } from "./config"; +import { generateDatabaseCredential } from "./credentials"; import type { DriverTelemetry } from "./telemetry"; import type { LakebasePoolConfig } from "./types"; -import { generateDatabaseCredential } from "./utils"; const logger = createLogger("connectors:lakebase:token"); diff --git a/packages/appkit/src/connectors/lakebase/types.ts b/packages/appkit/src/connectors/lakebase/types.ts index 1c9875d6..2c46d87f 100644 --- a/packages/appkit/src/connectors/lakebase/types.ts +++ b/packages/appkit/src/connectors/lakebase/types.ts @@ -56,3 +56,102 @@ export interface LakebasePoolConfig extends PoolConfig { */ telemetry?: TelemetryOptions; } + +// --------------------------------------------------------------------------- +// Authentication types for Lakebase Postgres OAuth token generation +// @see https://docs.databricks.com/aws/en/oltp/projects/authentication +// --------------------------------------------------------------------------- + +/** + * Database credentials with OAuth token for Postgres connection + */ +export interface DatabaseCredential { + /** OAuth token to use as the password when connecting to Postgres */ + token: string; + + /** + * Token expiration time in UTC (ISO 8601 format) + * Tokens expire after 1 hour from generation + * @example "2026-02-06T17:07:00Z" + */ + expire_time: string; +} + +/** + * Permission set for Unity Catalog table access + */ +export enum RequestedClaimsPermissionSet { + /** + * Read-only access to specified UC tables + */ + READ_ONLY = "READ_ONLY", +} + +/** + * Resource to request permissions for in Unity Catalog + */ +export interface RequestedResource { + /** + * Unity Catalog table name to request access to + * @example "catalog.schema.table" + */ + table_name?: string; + + /** + * Generic resource name for non-table resources + */ + unspecified_resource_name?: string; +} + +/** + * Optional claims for fine-grained Unity Catalog table permissions + * When specified, the returned token will be scoped to only the requested tables + */ +export interface RequestedClaims { + /** + * Permission level to request + */ + permission_set?: RequestedClaimsPermissionSet; + + /** + * List of UC resources to request access to + */ + resources?: RequestedResource[]; +} + +/** + * Request parameters for generating database OAuth credentials + */ +export interface GenerateDatabaseCredentialRequest { + /** + * Endpoint resource path with IDs assigned by Databricks. + * + * All segments are IDs from Databricks (not names you create): + * - project-id: UUID format (e.g., `a1b2c3d4-e5f6-4789-a012-b3c4d5e6f789`) + * - branch-id: Identifier from Databricks (e.g., `main`, `dev`) + * - endpoint-id: Identifier from Databricks (e.g., `primary`, `analytics`) + * + * Format: `projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}` + * + * **Important:** Copy from Databricks Lakebase UI - do not construct manually. + * + * @example "projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0" + */ + endpoint: string; + + /** + * Optional claims for fine-grained UC table permissions. + * When specified, the token will only grant access to the specified tables. + * + * @example + * ```typescript + * { + * claims: [{ + * permission_set: RequestedClaimsPermissionSet.READ_ONLY, + * resources: [{ table_name: "catalog.schema.users" }] + * }] + * } + * ``` + */ + claims?: RequestedClaims[]; +} diff --git a/packages/appkit/src/connectors/tests/lakebase-auth.test.ts b/packages/appkit/src/connectors/tests/lakebase-auth.test.ts index edc3f766..9f2c099a 100644 --- a/packages/appkit/src/connectors/tests/lakebase-auth.test.ts +++ b/packages/appkit/src/connectors/tests/lakebase-auth.test.ts @@ -1,11 +1,11 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; import { ApiClient, Config } from "@databricks/sdk-experimental"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { generateDatabaseCredential } from "../lakebase/credentials"; import { type DatabaseCredential, RequestedClaimsPermissionSet, -} from "../lakebase/auth-types"; -import { generateDatabaseCredential } from "../lakebase/utils"; +} from "../lakebase/types"; // Mock the @databricks/sdk-experimental module vi.mock("@databricks/sdk-experimental", () => { diff --git a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts index 66c92111..87728f53 100644 --- a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts +++ b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts @@ -33,8 +33,9 @@ vi.mock("pg", () => { }); // Mock generateDatabaseCredential -vi.mock("../lakebase/utils", async (importOriginal) => { - const actual = await importOriginal(); +vi.mock("../lakebase/credentials", async (importOriginal) => { + const actual = + await importOriginal(); return { ...actual, generateDatabaseCredential: vi.fn(), @@ -112,7 +113,7 @@ describe("createLakebasePool", () => { process.env.PGUSER = "test-user@example.com"; // Setup mock for generateDatabaseCredential - const utils = await import("../lakebase/utils"); + const utils = await import("../lakebase/credentials"); mockGenerateCredential = utils.generateDatabaseCredential as any; mockGenerateCredential.mockResolvedValue({ token: "test-oauth-token-12345", diff --git a/packages/appkit/src/index.ts b/packages/appkit/src/index.ts index 2dd1c858..b5ccd373 100644 --- a/packages/appkit/src/index.ts +++ b/packages/appkit/src/index.ts @@ -28,7 +28,6 @@ export { generateDatabaseCredential, getLakebaseOrmConfig, getLakebasePgConfig, - getLakebasePoolConfig, getWorkspaceClient, } from "./connectors/lakebase"; export { getExecutionContext } from "./context"; From afd898237de37f7fecf42fd1f634c52619af5e98 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Wed, 11 Feb 2026 16:06:14 +0100 Subject: [PATCH 03/12] chore: remove unused stuff --- .../appkit/src/connectors/lakebase/config.ts | 6 -- .../src/connectors/lakebase/credentials.ts | 25 ++----- .../appkit/src/connectors/lakebase/pool.ts | 67 ++++++++++++++----- .../src/connectors/lakebase/token-refresh.ts | 9 --- 4 files changed, 54 insertions(+), 53 deletions(-) diff --git a/packages/appkit/src/connectors/lakebase/config.ts b/packages/appkit/src/connectors/lakebase/config.ts index ed3453c2..76689662 100644 --- a/packages/appkit/src/connectors/lakebase/config.ts +++ b/packages/appkit/src/connectors/lakebase/config.ts @@ -141,12 +141,6 @@ export function getUsernameSync(config: Partial): string { return pgUser; } - // Priority 3: DATABRICKS_CLIENT_ID (service principal) - const clientId = process.env.DATABRICKS_CLIENT_ID; - if (clientId) { - return clientId; - } - throw ConfigurationError.missingEnvVar( "PGUSER, DATABRICKS_CLIENT_ID, or config.user", ); diff --git a/packages/appkit/src/connectors/lakebase/credentials.ts b/packages/appkit/src/connectors/lakebase/credentials.ts index 8783d07e..3f73392f 100644 --- a/packages/appkit/src/connectors/lakebase/credentials.ts +++ b/packages/appkit/src/connectors/lakebase/credentials.ts @@ -54,31 +54,14 @@ export async function generateDatabaseCredential( ): Promise { const apiPath = "/api/2.0/postgres/credentials"; - // Get workspace ID from execution context or environment - let workspaceId: string | undefined; try { - const { getWorkspaceId } = await import("../../context"); - workspaceId = await getWorkspaceId(); - } catch { - workspaceId = process.env.DATABRICKS_WORKSPACE_ID; - } - - try { - const headers = new Headers({ - Accept: "application/json", - "Content-Type": "application/json", - }); - - // Manually add X-Databricks-Org-Id header if workspace ID is available - // The SDK's automatic header addition doesn't work because config.workspaceId isn't set - if (workspaceId) { - headers.set("X-Databricks-Org-Id", workspaceId); - } - const response = await workspaceClient.apiClient.request({ path: apiPath, method: "POST", - headers, + headers: new Headers({ + Accept: "application/json", + "Content-Type": "application/json", + }), raw: false, payload: request, }); diff --git a/packages/appkit/src/connectors/lakebase/pool.ts b/packages/appkit/src/connectors/lakebase/pool.ts index 811ed443..126e0f09 100644 --- a/packages/appkit/src/connectors/lakebase/pool.ts +++ b/packages/appkit/src/connectors/lakebase/pool.ts @@ -1,4 +1,5 @@ import pg from "pg"; +import { SpanKind, SpanStatusCode } from "@/telemetry"; import { createLogger } from "../../logging/logger"; import { getLakebasePgConfig } from "./pool-config"; import { attachPoolMetrics, initTelemetry } from "./telemetry"; @@ -69,40 +70,72 @@ export function createLakebasePool( // Attach pool-level telemetry metrics (gauges, error counter, and error logging) attachPoolMetrics(pool, telemetry); - // Wrap pool.query to track query duration. + // Wrap pool.query to track query duration and create trace spans. // pg.Pool.query has 15+ overloads that are difficult to type-preserve, // so we use a loosely-typed wrapper and cast back. + // We use the tracer directly (not provider.startActiveSpan) because the + // provider wrapper is async-only, while pool.query supports both promise + // and callback paths. const origQuery = pool.query.bind(pool); - pool.query = function queryWithMetrics( + const tracer = telemetry.provider.getTracer(); + pool.query = function queryWithTelemetry( ...args: unknown[] ): ReturnType { - const start = Date.now(); const firstArg = args[0]; const sql = typeof firstArg === "string" ? firstArg : (firstArg as { text?: string } | undefined)?.text; - const attrs = { + const metricAttrs = { "db.statement": sql ? sql.substring(0, 100) : "unknown", }; - const result = ( - origQuery as (...a: unknown[]) => Promise | undefined - )(...args); + return tracer.startActiveSpan( + "lakebase.query", + { + kind: SpanKind.CLIENT, + attributes: { + "db.system": "lakebase", + "db.statement": sql ? sql.substring(0, 500) : "unknown", + }, + }, + (span) => { + const start = Date.now(); - // Promise-based query: record duration on completion - if (result && typeof result.finally === "function") { - return result.finally(() => { - telemetry.queryDuration.record(Date.now() - start, attrs); - }) as unknown as ReturnType; - } + const result = ( + origQuery as (...a: unknown[]) => Promise | undefined + )(...args); - // Callback-based query (void return): duration is approximate - telemetry.queryDuration.record(Date.now() - start, attrs); - return result as ReturnType; + // Promise-based query: record duration and end span on completion + if (result && typeof result.then === "function") { + return (result as Promise<{ rowCount?: number | null }>) + .then( + (res) => { + span.setAttribute("db.rows_affected", res?.rowCount ?? 0); + span.setStatus({ code: SpanStatusCode.OK }); + return res; + }, + (err: Error) => { + span.recordException(err); + span.setStatus({ code: SpanStatusCode.ERROR }); + throw err; + }, + ) + .finally(() => { + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + }) as unknown as ReturnType; + } + + // Callback-based query (void return): duration is approximate + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + return result as ReturnType; + }, + ) as ReturnType; } as typeof pool.query; - logger.info( + logger.debug( "Created Lakebase connection pool for %s@%s/%s", poolConfig.user, poolConfig.host, diff --git a/packages/appkit/src/connectors/lakebase/token-refresh.ts b/packages/appkit/src/connectors/lakebase/token-refresh.ts index b302117a..f346a3c2 100644 --- a/packages/appkit/src/connectors/lakebase/token-refresh.ts +++ b/packages/appkit/src/connectors/lakebase/token-refresh.ts @@ -98,15 +98,6 @@ export function createTokenRefreshCallback( cachedToken = result.token; tokenExpiresAt = result.expiresAt; - - const duration = Date.now() - startTime; - const expiresAt = new Date(result.expiresAt).toISOString(); - logger.info( - "OAuth token refreshed successfully in %dms (expires at %s)", - duration, - expiresAt, - ); - return cachedToken; } catch (error) { logger.error("Failed to fetch OAuth token: %O", { From 7b14aafbb1f49b0fa38303e7e7516d324897e41e Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Thu, 12 Feb 2026 09:46:34 +0100 Subject: [PATCH 04/12] fix: bring back using DATABRICKS_CLIENT_ID as user --- packages/appkit/src/connectors/lakebase/config.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/appkit/src/connectors/lakebase/config.ts b/packages/appkit/src/connectors/lakebase/config.ts index 76689662..48ddf84e 100644 --- a/packages/appkit/src/connectors/lakebase/config.ts +++ b/packages/appkit/src/connectors/lakebase/config.ts @@ -141,6 +141,12 @@ export function getUsernameSync(config: Partial): string { return pgUser; } + // Priority 3: DATABRICKS_CLIENT_ID (service principal ID) + const clientId = process.env.DATABRICKS_CLIENT_ID; + if (clientId) { + return clientId; + } + throw ConfigurationError.missingEnvVar( "PGUSER, DATABRICKS_CLIENT_ID, or config.user", ); From 2ba1a304fe4997a77680c1709f6e51b4cef3e3e2 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Mon, 16 Feb 2026 11:37:06 +0100 Subject: [PATCH 05/12] chore: fix test mock --- .../src/connectors/tests/lakebase-pool.test.ts | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts index 87728f53..1264ed44 100644 --- a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts +++ b/packages/appkit/src/connectors/tests/lakebase-pool.test.ts @@ -54,6 +54,19 @@ vi.mock("@/telemetry", () => ({ SpanStatusCode: { OK: 1, ERROR: 2 }, TelemetryManager: { getProvider: vi.fn(() => ({ + getTracer: vi.fn(() => ({ + startActiveSpan: vi.fn( + (_name: string, _opts: unknown, fn: (span: unknown) => unknown) => { + const span = { + setAttribute: mockSpanSetAttribute, + setStatus: mockSpanSetStatus, + end: mockSpanEnd, + recordException: vi.fn(), + }; + return fn(span); + }, + ), + })), getMeter: vi.fn(() => ({ createCounter: vi.fn(() => ({ add: mockCounterAdd })), createHistogram: vi.fn(() => ({ record: mockHistogramRecord })), @@ -707,7 +720,7 @@ describe("createLakebasePool", () => { // pool.query should be our wrapped function expect(typeof pool.query).toBe("function"); - expect(pool.query.name).toBe("queryWithMetrics"); + expect(pool.query.name).toBe("queryWithTelemetry"); }); }); }); From c893af65a3f3dff4c2b43df09dc93ba535777c6a Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 11:09:07 +0100 Subject: [PATCH 06/12] chore: extract Lakebase driver as a separate package --- .github/workflows/release-lakebase.yml | 61 +++ .../api/appkit/Function.createLakebasePool.md | 47 +- .../appkit/Function.getLakebasePgConfig.md | 6 +- .../api/appkit/Function.getWorkspaceClient.md | 2 +- .../appkit/Interface.LakebasePoolConfig.md | 24 +- docs/docs/api/appkit/index.md | 4 +- packages/appkit/package.json | 1 + .../appkit/src/connectors/lakebase/index.ts | 49 +- packages/appkit/src/index.ts | 2 +- packages/appkit/tsconfig.json | 3 +- packages/lakebase/.release-it.json | 35 ++ packages/lakebase/README.md | 222 +++++++++ packages/lakebase/package.json | 71 +++ .../src/__tests__/credentials.test.ts} | 4 +- .../src/__tests__/pool.test.ts} | 215 ++++----- .../lakebase => lakebase/src}/config.ts | 14 +- .../lakebase => lakebase/src}/credentials.ts | 36 +- packages/lakebase/src/errors.ts | 66 +++ packages/lakebase/src/index.ts | 19 + .../lakebase => lakebase/src}/pool-config.ts | 7 +- .../lakebase => lakebase/src}/pool.ts | 29 +- .../lakebase => lakebase/src}/telemetry.ts | 59 ++- .../src}/token-refresh.ts | 17 +- .../lakebase => lakebase/src}/types.ts | 40 +- packages/lakebase/tsconfig.json | 12 + packages/lakebase/tsdown.config.ts | 28 ++ pnpm-lock.yaml | 449 ++++++++++++++++++ tools/license-utils.ts | 1 + vitest.config.ts | 8 + 29 files changed, 1245 insertions(+), 286 deletions(-) create mode 100644 .github/workflows/release-lakebase.yml create mode 100644 packages/lakebase/.release-it.json create mode 100644 packages/lakebase/README.md create mode 100644 packages/lakebase/package.json rename packages/{appkit/src/connectors/tests/lakebase-auth.test.ts => lakebase/src/__tests__/credentials.test.ts} (98%) rename packages/{appkit/src/connectors/tests/lakebase-pool.test.ts => lakebase/src/__tests__/pool.test.ts} (80%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/config.ts (89%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/credentials.ts (79%) create mode 100644 packages/lakebase/src/errors.ts create mode 100644 packages/lakebase/src/index.ts rename packages/{appkit/src/connectors/lakebase => lakebase/src}/pool-config.ts (95%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/pool.ts (82%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/telemetry.ts (63%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/token-refresh.ts (87%) rename packages/{appkit/src/connectors/lakebase => lakebase/src}/types.ts (82%) create mode 100644 packages/lakebase/tsconfig.json create mode 100644 packages/lakebase/tsdown.config.ts diff --git a/.github/workflows/release-lakebase.yml b/.github/workflows/release-lakebase.yml new file mode 100644 index 00000000..639ff8b3 --- /dev/null +++ b/.github/workflows/release-lakebase.yml @@ -0,0 +1,61 @@ +name: Release @databricks/lakebase + +on: + workflow_dispatch: + inputs: + dry-run: + description: "Dry run (no actual release)" + required: false + type: boolean + default: false + +jobs: + release: + runs-on: + group: databricks-protected-runner-group + labels: linux-ubuntu-latest + + environment: release + + permissions: + contents: write + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Git + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + registry-url: "https://registry.npmjs.org" + cache: "pnpm" + + - name: Update npm + run: npm install -g npm@latest + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Release + working-directory: packages/lakebase + run: | + if [ "${{ inputs.dry-run }}" == "true" ]; then + pnpm release:dry + else + pnpm release:ci + fi + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/docs/docs/api/appkit/Function.createLakebasePool.md b/docs/docs/api/appkit/Function.createLakebasePool.md index e73edbc4..c3b5b010 100644 --- a/docs/docs/api/appkit/Function.createLakebasePool.md +++ b/docs/docs/api/appkit/Function.createLakebasePool.md @@ -4,56 +4,17 @@ function createLakebasePool(config?: Partial): Pool; ``` -Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. - -This function returns a standard `pg.Pool` instance configured with a password callback -that automatically fetches and caches OAuth tokens from Databricks. The returned pool -works with any ORM or library that accepts a `pg.Pool` (Drizzle, Prisma, TypeORM, etc.). +Create a Lakebase pool with appkit's logger integration. +Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. ## Parameters | Parameter | Type | Description | | ------ | ------ | ------ | -| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Configuration options (optional, reads from environment if not provided) | +| `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Lakebase pool configuration | ## Returns `Pool` -Standard pg.Pool instance with OAuth token refresh - -## See - -https://docs.databricks.com/aws/en/oltp/projects/authentication - -## Examples - -```typescript -// Set: PGHOST, PGDATABASE, LAKEBASE_ENDPOINT -const pool = createLakebasePool(); -const result = await pool.query('SELECT * FROM users'); -``` - -```typescript -// Format: projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} -// Note: Use actual IDs from Databricks (project-id is a UUID) -const pool = createLakebasePool({ - endpoint: 'projects/6bef4151-4b5d-4147-b4d0-c2f4fd5b40db/branches/br-sparkling-tree-y17uj7fn/endpoints/ep-restless-pine-y1ldaht0', - host: 'ep-abc.databricks.com', - database: 'databricks_postgres', - user: 'service-principal-id' -}); -``` - -```typescript -import { drizzle } from 'drizzle-orm/node-postgres'; -const pool = createLakebasePool(); -const db = drizzle({ client: pool }); -``` - -```typescript -import { PrismaPg } from '@prisma/adapter-pg'; -const pool = createLakebasePool(); -const adapter = new PrismaPg(pool); -const prisma = new PrismaClient({ adapter }); -``` +PostgreSQL pool with appkit integration diff --git a/docs/docs/api/appkit/Function.getLakebasePgConfig.md b/docs/docs/api/appkit/Function.getLakebasePgConfig.md index a3c2f7e7..e604f43b 100644 --- a/docs/docs/api/appkit/Function.getLakebasePgConfig.md +++ b/docs/docs/api/appkit/Function.getLakebasePgConfig.md @@ -1,7 +1,10 @@ # Function: getLakebasePgConfig() ```ts -function getLakebasePgConfig(config?: Partial, telemetry?: DriverTelemetry): PoolConfig; +function getLakebasePgConfig( + config?: Partial, + telemetry?: DriverTelemetry, + logger?: Logger): PoolConfig; ``` Get Lakebase connection configuration for PostgreSQL clients. @@ -19,6 +22,7 @@ Used internally by createLakebasePool(). | ------ | ------ | ------ | | `config?` | `Partial`\<[`LakebasePoolConfig`](Interface.LakebasePoolConfig.md)\> | Optional configuration (reads from environment if not provided) | | `telemetry?` | `DriverTelemetry` | Optional pre-initialized telemetry (created internally if not provided) | +| `logger?` | `Logger` | Optional logger (silent if not provided) | ## Returns diff --git a/docs/docs/api/appkit/Function.getWorkspaceClient.md b/docs/docs/api/appkit/Function.getWorkspaceClient.md index 5ff9d152..f8b856b2 100644 --- a/docs/docs/api/appkit/Function.getWorkspaceClient.md +++ b/docs/docs/api/appkit/Function.getWorkspaceClient.md @@ -4,7 +4,7 @@ function getWorkspaceClient(config: Partial): Promise; ``` -Get workspace client from config or execution context +Get workspace client from config or SDK default auth chain ## Parameters diff --git a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md index 3d19e38a..ce099dbf 100644 --- a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md +++ b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md @@ -44,6 +44,26 @@ Can also be set via LAKEBASE_ENDPOINT environment variable *** +### logger? + +```ts +optional logger: Logger; +``` + +Optional logger instance for the driver. +When not provided, the driver operates silently (no logging). + +#### Example + +```typescript +import { createLogger } from '@databricks/appkit'; +const pool = createLakebasePool({ + logger: createLogger('connectors:lakebase') +}); +``` + +*** + ### sslMode? ```ts @@ -69,9 +89,9 @@ optional telemetry: TelemetryOptions; Telemetry configuration -- `true` or omitted: enable all telemetry (traces, metrics, logs) -- no-op when OTEL is not configured +- `true` or omitted: enable all telemetry (traces, metrics) -- no-op when OTEL is not configured - `false`: disable all telemetry -- `{ traces?, metrics?, logs? }`: fine-grained control +- `{ traces?, metrics? }`: fine-grained control *** diff --git a/docs/docs/api/appkit/index.md b/docs/docs/api/appkit/index.md index c85800b3..7786b26a 100644 --- a/docs/docs/api/appkit/index.md +++ b/docs/docs/api/appkit/index.md @@ -57,10 +57,10 @@ plugin architecture, and React integration. | ------ | ------ | | [appKitTypesPlugin](Function.appKitTypesPlugin.md) | Vite plugin to generate types for AppKit queries. Calls generateFromEntryPoint under the hood. | | [createApp](Function.createApp.md) | Bootstraps AppKit with the provided configuration. | -| [createLakebasePool](Function.createLakebasePool.md) | Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. | +| [createLakebasePool](Function.createLakebasePool.md) | Create a Lakebase pool with appkit's logger integration. Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. | | [generateDatabaseCredential](Function.generateDatabaseCredential.md) | Generate OAuth credentials for Postgres database connection using the proper Postgres API. | | [getExecutionContext](Function.getExecutionContext.md) | Get the current execution context. | | [getLakebaseOrmConfig](Function.getLakebaseOrmConfig.md) | Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. | | [getLakebasePgConfig](Function.getLakebasePgConfig.md) | Get Lakebase connection configuration for PostgreSQL clients. | -| [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or execution context | +| [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or SDK default auth chain | | [isSQLTypeMarker](Function.isSQLTypeMarker.md) | Type guard to check if a value is a SQL type marker | diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 6880425c..2b8ce014 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -42,6 +42,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { + "@databricks/lakebase": "workspace:*", "@databricks/sdk-experimental": "^0.16.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.208.0", diff --git a/packages/appkit/src/connectors/lakebase/index.ts b/packages/appkit/src/connectors/lakebase/index.ts index 70da0be8..e33ac077 100644 --- a/packages/appkit/src/connectors/lakebase/index.ts +++ b/packages/appkit/src/connectors/lakebase/index.ts @@ -1,15 +1,42 @@ -export { getWorkspaceClient } from "./config"; -export { generateDatabaseCredential } from "./credentials"; -export { createLakebasePool } from "./pool"; +import { + createLakebasePool as createLakebasePoolBase, + type LakebasePoolConfig, +} from "@databricks/lakebase"; +import type pg from "pg"; +import { createLogger } from "@/logging/logger"; + +/** + * Create a Lakebase pool with appkit's logger integration. + * Telemetry automatically uses appkit's OpenTelemetry configuration via global registry. + * + * @param config - Lakebase pool configuration + * @returns PostgreSQL pool with appkit integration + */ +export function createLakebasePool( + config?: Partial, +): pg.Pool { + const logger = createLogger("connectors:lakebase"); + + return createLakebasePoolBase({ + ...config, + logger, + }); +} + +// Re-export everything else from lakebase export { + createTokenRefreshCallback, + type DatabaseCredential, + type DriverTelemetry, + type GenerateDatabaseCredentialRequest, + generateDatabaseCredential, getLakebaseOrmConfig, getLakebasePgConfig, -} from "./pool-config"; -export type { - DatabaseCredential, - GenerateDatabaseCredentialRequest, - LakebasePoolConfig, - RequestedClaims, + getWorkspaceClient, + type LakebasePoolConfig, + type Logger, + type RequestedClaims, RequestedClaimsPermissionSet, - RequestedResource, -} from "./types"; + type RequestedResource, + type TokenRefreshDeps, +} from "@databricks/lakebase"; diff --git a/packages/appkit/src/index.ts b/packages/appkit/src/index.ts index b5ccd373..8ba528ef 100644 --- a/packages/appkit/src/index.ts +++ b/packages/appkit/src/index.ts @@ -19,7 +19,6 @@ export type { GenerateDatabaseCredentialRequest, LakebasePoolConfig, RequestedClaims, - RequestedClaimsPermissionSet, RequestedResource, } from "./connectors/lakebase"; // Lakebase Autoscaling connector @@ -29,6 +28,7 @@ export { getLakebaseOrmConfig, getLakebasePgConfig, getWorkspaceClient, + RequestedClaimsPermissionSet, } from "./connectors/lakebase"; export { getExecutionContext } from "./context"; export { createApp } from "./core"; diff --git a/packages/appkit/tsconfig.json b/packages/appkit/tsconfig.json index 8d1cbf59..5265a688 100644 --- a/packages/appkit/tsconfig.json +++ b/packages/appkit/tsconfig.json @@ -6,7 +6,8 @@ "paths": { "@/*": ["src/*"], "@tools/*": ["../../tools/*"], - "shared": ["../../packages/shared/src"] + "shared": ["../../packages/shared/src"], + "@databricks/lakebase": ["../../packages/lakebase/src"] } }, "include": ["src/**/*"], diff --git a/packages/lakebase/.release-it.json b/packages/lakebase/.release-it.json new file mode 100644 index 00000000..dc8e0b2c --- /dev/null +++ b/packages/lakebase/.release-it.json @@ -0,0 +1,35 @@ +{ + "$schema": "https://unpkg.com/release-it@19/schema/release-it.json", + "git": { + "commitMessage": "chore(lakebase): release v${version} [skip ci]", + "tagName": "lakebase-v${version}", + "tagAnnotation": "Release @databricks/lakebase v${version}", + "requireBranch": "main", + "requireCleanWorkingDir": true, + "push": true, + "pushArgs": ["--follow-tags"] + }, + "github": { + "release": true, + "releaseName": "@databricks/lakebase v${version}", + "autoGenerate": false, + "draft": false, + "preRelease": false, + "tokenRef": "GITHUB_TOKEN" + }, + "npm": false, + "hooks": { + "before:release": "pnpm build && pnpm --filter=@databricks/lakebase dist", + "after:release": "npm publish packages/lakebase/tmp --access public --provenance" + }, + "plugins": { + "@release-it/conventional-changelog": { + "preset": { + "name": "conventionalcommits", + "bumpStrict": true + }, + "infile": "CHANGELOG.md", + "header": "# Changelog\n\nAll notable changes to @databricks/lakebase will be documented in this file." + } + } +} diff --git a/packages/lakebase/README.md b/packages/lakebase/README.md new file mode 100644 index 00000000..e753739a --- /dev/null +++ b/packages/lakebase/README.md @@ -0,0 +1,222 @@ +# @databricks/lakebase + +PostgreSQL driver for Databricks Lakebase Autoscaling with automatic OAuth token refresh. + +## Overview + +`@databricks/lakebase` provides a drop-in replacement for the standard `pg` connection pool that automatically handles OAuth authentication for Databricks Lakebase Autoscaling (OLTP) databases. + +It: + +- Returns a standard `pg.Pool` - works with any PostgreSQL library or ORM +- Automatically refreshes OAuth tokens (1-hour lifetime, with 2-minute buffer) +- Caches tokens to minimize API calls +- Zero configuration with environment variables +- Optional OpenTelemetry instrumentation + +**NOTE:** This package is NOT compatible with the Databricks Lakebase Provisioned. + +## Installation + +```bash +npm install @databricks/lakebase +``` + +## Quick Start + +### Using Environment Variables + +Set the following environment variables: + +```bash +export PGHOST=your-lakebase-host.databricks.com +export PGDATABASE=your_database_name +export LAKEBASE_ENDPOINT=projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id} +export PGUSER=your-service-principal-id +export PGSSLMODE=require +``` + +Then use the driver: + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const result = await pool.query("SELECT * FROM users"); +console.log(result.rows); +``` + +### With Explicit Configuration + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool({ + host: "your-lakebase-host.databricks.com", + database: "your_database_name", + endpoint: + "projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}", + user: "service-principal-id", // Optional, defaults to DATABRICKS_CLIENT_ID + max: 10, // Connection pool size +}); +``` + +## Authentication + +The driver supports Databricks authentication via: + +1. **Default auth chain** (`.databrickscfg`, environment variables) +2. **Service principal** (`DATABRICKS_CLIENT_ID` + `DATABRICKS_CLIENT_SECRET`) +3. **OAuth tokens** (via Databricks SDK) + +See [Databricks authentication docs](https://docs.databricks.com/en/dev-tools/auth/index.html) for configuration. + +## Configuration + +| Option | Environment Variable | Description | Default | +| ------------------------- | ---------------------------------- | ------------------------------------ | -------------------- | +| `host` | `PGHOST` | Lakebase host | _Required_ | +| `database` | `PGDATABASE` | Database name | _Required_ | +| `endpoint` | `LAKEBASE_ENDPOINT` | Endpoint resource path | _Required_ | +| `user` | `PGUSER` or `DATABRICKS_CLIENT_ID` | Username or service principal ID | Auto-detected | +| `port` | `PGPORT` | Port number | `5432` | +| `sslMode` | `PGSSLMODE` | SSL mode | `require` | +| `max` | - | Max pool connections | `10` | +| `idleTimeoutMillis` | - | Idle connection timeout | `30000` | +| `connectionTimeoutMillis` | - | Connection timeout | `10000` | +| `logger` | - | Optional logger instance | `undefined` (silent) | + +## Logging + +By default, the driver operates silently (no logging). You can inject a custom logger for observability: + +```typescript +const logger = { + debug: (msg: string, ...args: unknown[]) => console.debug(msg, ...args), + info: (msg: string, ...args: unknown[]) => console.log(msg, ...args), + warn: (msg: string, ...args: unknown[]) => console.warn(msg, ...args), + error: (msg: string, ...args: unknown[]) => console.error(msg, ...args), +}; + +const pool = createLakebasePool({ logger }); +``` + +When used with AppKit, logging is automatically configured - see the [AppKit Integration](#appkit-integration) section. + +## ORM Examples + +### Drizzle ORM + +```typescript +import { drizzle } from "drizzle-orm/node-postgres"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const db = drizzle(pool); + +const users = await db.select().from(usersTable); +``` + +### Prisma + +```typescript +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "@prisma/client"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); +const adapter = new PrismaPg(pool); +const prisma = new PrismaClient({ adapter }); + +const users = await prisma.user.findMany(); +``` + +### TypeORM + +```typescript +import { DataSource } from "typeorm"; +import { createLakebasePool } from "@databricks/lakebase"; + +const pool = createLakebasePool(); + +const dataSource = new DataSource({ + type: "postgres", + synchronize: true, + ...getLakebaseOrmConfig(), + entities: [ + // Your entity classes + ], +}); + +await dataSource.initialize(); +``` + +### Sequelize + +```typescript +import { Sequelize } from "sequelize"; +import { getLakebaseOrmConfig } from "@databricks/lakebase"; + +const sequelize = new Sequelize({ + dialect: "postgres", + ...getLakebaseOrmConfig(), +}); +``` + +## OpenTelemetry Integration + +The driver automatically uses OpenTelemetry's global registry when available. If your application initializes OpenTelemetry providers, the driver will automatically instrument queries and metrics with no additional configuration needed. + +### Setup + +Install OpenTelemetry in your application: + +```bash +npm install @opentelemetry/api @opentelemetry/sdk-node +``` + +Initialize OpenTelemetry in your application: + +```typescript +import { NodeSDK } from "@opentelemetry/sdk-node"; + +const sdk = new NodeSDK({ + // Your OTEL configuration +}); + +sdk.start(); // Registers global providers + +// Now create your pool - it automatically uses the global providers +import { createLakebasePool } from "@databricks/lakebase"; +const pool = createLakebasePool(); +``` + +The driver calls `trace.getTracer('@databricks/lakebase')` and `metrics.getMeter('@databricks/lakebase')` internally. If no global providers are registered, operations are automatic no-ops. + +### Metrics Exported + +- `lakebase.token.refresh.duration` - OAuth token refresh duration (histogram, ms) +- `lakebase.query.duration` - Query execution duration (histogram, ms) +- `lakebase.pool.connections.total` - Total connections in pool (gauge) +- `lakebase.pool.connections.idle` - Idle connections (gauge) +- `lakebase.pool.connections.waiting` - Clients waiting for connection (gauge) +- `lakebase.pool.errors` - Pool errors by error code (counter) + +## AppKit Integration + +This driver is also available as part of [@databricks/appkit](https://www.npmjs.com/package/@databricks/appkit): + +```typescript +import { createLakebasePool } from "@databricks/appkit"; + +const pool = createLakebasePool(); +``` + +**Differences between standalone and AppKit:** + +- **Standalone** (`@databricks/lakebase`): Silent by default - no logger configured +- **AppKit** (`@databricks/appkit`): Automatically injects AppKit's logger with scope `appkit:connectors:lakebase`. + +## Learn more about Lakebase Autoscaling + +For Lakebase Autoscaling documentation, see [docs.databricks.com/aws/en/oltp/projects](https://docs.databricks.com/aws/en/oltp/projects/). diff --git a/packages/lakebase/package.json b/packages/lakebase/package.json new file mode 100644 index 00000000..eb5676ef --- /dev/null +++ b/packages/lakebase/package.json @@ -0,0 +1,71 @@ +{ + "name": "@databricks/lakebase", + "type": "module", + "version": "0.1.0", + "description": "PostgreSQL driver for Databricks Lakebase with automatic OAuth token refresh", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "packageManager": "pnpm@10.21.0", + "repository": { + "type": "git", + "url": "git+https://github.com/databricks/appkit.git", + "directory": "packages/lakebase" + }, + "keywords": [ + "databricks", + "lakebase", + "postgres", + "postgresql", + "driver", + "oauth", + "oltp" + ], + "license": "Apache-2.0", + "files": [ + "dist", + "README.md", + "LICENSE" + ], + "exports": { + ".": { + "types": "./dist/index.d.ts", + "development": "./src/index.ts", + "default": "./dist/index.js" + }, + "./package.json": "./package.json" + }, + "scripts": { + "build:package": "tsdown --config tsdown.config.ts", + "build:watch": "tsdown --config tsdown.config.ts --watch", + "clean:full": "rm -rf dist node_modules tmp", + "clean": "rm -rf dist tmp", + "dist": "tsx ../../tools/dist.ts", + "tarball": "rm -rf tmp && pnpm dist && npm pack ./tmp --pack-destination ./tmp", + "typecheck": "tsc --noEmit", + "release": "release-it", + "release:dry": "release-it --dry-run", + "release:ci": "release-it --ci" + }, + "dependencies": { + "@databricks/sdk-experimental": "^0.16.0", + "pg": "^8.18.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + } + }, + "devDependencies": { + "@types/pg": "^8.16.0" + }, + "module": "./dist/index.js", + "publishConfig": { + "exports": { + ".": "./dist/index.js", + "./package.json": "./package.json" + } + } +} diff --git a/packages/appkit/src/connectors/tests/lakebase-auth.test.ts b/packages/lakebase/src/__tests__/credentials.test.ts similarity index 98% rename from packages/appkit/src/connectors/tests/lakebase-auth.test.ts rename to packages/lakebase/src/__tests__/credentials.test.ts index 9f2c099a..837163b1 100644 --- a/packages/appkit/src/connectors/tests/lakebase-auth.test.ts +++ b/packages/lakebase/src/__tests__/credentials.test.ts @@ -1,11 +1,11 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; import { ApiClient, Config } from "@databricks/sdk-experimental"; import { beforeEach, describe, expect, it, vi } from "vitest"; -import { generateDatabaseCredential } from "../lakebase/credentials"; +import { generateDatabaseCredential } from "../credentials"; import { type DatabaseCredential, RequestedClaimsPermissionSet, -} from "../lakebase/types"; +} from "../types"; // Mock the @databricks/sdk-experimental module vi.mock("@databricks/sdk-experimental", () => { diff --git a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts b/packages/lakebase/src/__tests__/pool.test.ts similarity index 80% rename from packages/appkit/src/connectors/tests/lakebase-pool.test.ts rename to packages/lakebase/src/__tests__/pool.test.ts index 1264ed44..b6f65246 100644 --- a/packages/appkit/src/connectors/tests/lakebase-pool.test.ts +++ b/packages/lakebase/src/__tests__/pool.test.ts @@ -1,5 +1,5 @@ import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; -import { createLakebasePool } from "../lakebase"; +import { createLakebasePool } from "../pool"; // ── Mocks ──────────────────────────────────────────────────────────── @@ -33,9 +33,8 @@ vi.mock("pg", () => { }); // Mock generateDatabaseCredential -vi.mock("../lakebase/credentials", async (importOriginal) => { - const actual = - await importOriginal(); +vi.mock("../credentials", async (importOriginal) => { + const actual = await importOriginal(); return { ...actual, generateDatabaseCredential: vi.fn(), @@ -50,47 +49,39 @@ const mockCounterAdd = vi.fn(); const mockHistogramRecord = vi.fn(); const mockAddCallback = vi.fn(); -vi.mock("@/telemetry", () => ({ +const mockTracer = { + startActiveSpan: vi.fn( + (_name: string, _opts: unknown, fn: (span: unknown) => T): T => { + const span = { + setAttribute: mockSpanSetAttribute, + setStatus: mockSpanSetStatus, + end: mockSpanEnd, + recordException: vi.fn(), + }; + return fn(span); + }, + ), +}; + +const mockMeter = { + createCounter: vi.fn(() => ({ add: mockCounterAdd })), + createHistogram: vi.fn(() => ({ record: mockHistogramRecord })), + createObservableGauge: vi.fn(() => ({ + addCallback: mockAddCallback, + })), +}; + +vi.mock("../telemetry", () => ({ SpanStatusCode: { OK: 1, ERROR: 2 }, - TelemetryManager: { - getProvider: vi.fn(() => ({ - getTracer: vi.fn(() => ({ - startActiveSpan: vi.fn( - (_name: string, _opts: unknown, fn: (span: unknown) => unknown) => { - const span = { - setAttribute: mockSpanSetAttribute, - setStatus: mockSpanSetStatus, - end: mockSpanEnd, - recordException: vi.fn(), - }; - return fn(span); - }, - ), - })), - getMeter: vi.fn(() => ({ - createCounter: vi.fn(() => ({ add: mockCounterAdd })), - createHistogram: vi.fn(() => ({ record: mockHistogramRecord })), - createObservableGauge: vi.fn(() => ({ - addCallback: mockAddCallback, - })), - })), - startActiveSpan: vi.fn( - async ( - _name: string, - _opts: unknown, - fn: (span: unknown) => Promise, - ) => { - const span = { - setAttribute: mockSpanSetAttribute, - setStatus: mockSpanSetStatus, - end: mockSpanEnd, - recordException: vi.fn(), - }; - return fn(span); - }, - ), - })), - }, + SpanKind: { CLIENT: 3 }, + initTelemetry: vi.fn(() => ({ + tracer: mockTracer, + meter: mockMeter, + tokenRefreshDuration: { record: mockHistogramRecord }, + queryDuration: { record: mockHistogramRecord }, + poolErrors: { add: mockCounterAdd }, + })), + attachPoolMetrics: vi.fn(), })); // ── Test suite ─────────────────────────────────────────────────────── @@ -126,7 +117,7 @@ describe("createLakebasePool", () => { process.env.PGUSER = "test-user@example.com"; // Setup mock for generateDatabaseCredential - const utils = await import("../lakebase/credentials"); + const utils = await import("../credentials"); mockGenerateCredential = utils.generateDatabaseCredential as any; mockGenerateCredential.mockResolvedValue({ token: "test-oauth-token-12345", @@ -573,32 +564,14 @@ describe("createLakebasePool", () => { }); describe("telemetry", () => { - test("should initialize telemetry provider", async () => { - const { TelemetryManager } = await import("@/telemetry"); + test("should initialize telemetry", async () => { + const { initTelemetry } = await import("../telemetry"); createLakebasePool({ workspaceClient: {} as any, }); - expect(TelemetryManager.getProvider).toHaveBeenCalledWith( - "connectors:lakebase", - undefined, - ); - }); - - test("should pass telemetry config to provider", async () => { - const { TelemetryManager } = await import("@/telemetry"); - const telemetryConfig = { traces: true, metrics: false }; - - createLakebasePool({ - workspaceClient: {} as any, - telemetry: telemetryConfig, - }); - - expect(TelemetryManager.getProvider).toHaveBeenCalledWith( - "connectors:lakebase", - telemetryConfig, - ); + expect(initTelemetry).toHaveBeenCalled(); }); test("should record token refresh duration on successful fetch", async () => { @@ -634,93 +607,77 @@ describe("createLakebasePool", () => { expect(mockSpanEnd).toHaveBeenCalled(); }); - test("should register observable gauge callbacks for pool metrics", () => { - createLakebasePool({ + test("should wrap pool.query to add telemetry tracking", () => { + const pool = createLakebasePool({ workspaceClient: {} as any, }); - // Three observable gauges should have callbacks registered - // (total, idle, waiting) - expect(mockAddCallback).toHaveBeenCalledTimes(3); + // pool.query should be our wrapped function + expect(typeof pool.query).toBe("function"); + expect(pool.query.name).toBe("queryWithTelemetry"); }); + }); - test("should observe pool counts via gauge callbacks", () => { - createLakebasePool({ - workspaceClient: {} as any, - }); - - // Get the registered callbacks - const callbacks = mockAddCallback.mock.calls.map( - (call: unknown[]) => call[0], - ); - expect(callbacks).toHaveLength(3); - - // Simulate OTEL collection by invoking each callback - const observeResults: number[] = []; - const mockResult = { - observe: (value: number) => observeResults.push(value), - }; - - for (const cb of callbacks) { - (cb as (result: { observe: (v: number) => void }) => void)(mockResult); - } - - // Pool mock returns totalCount=3, idleCount=1, waitingCount=0 - expect(observeResults).toEqual([3, 1, 0]); - }); + describe("logger injection", () => { + test("should operate silently without logger", () => { + const consoleSpy = vi.spyOn(console, "log"); + const consoleDebugSpy = vi.spyOn(console, "debug"); - test("should increment pool error counter with error code on pool error event", () => { const pool = createLakebasePool({ workspaceClient: {} as any, }); - // Find the error handler registered via pool.on("error", ...) - const onMock = pool.on as ReturnType; - const errorHandlers = onMock.mock.calls.filter( - (call: unknown[]) => call[0] === "error", - ); - - // Single consolidated error handler (logging + metrics) - expect(errorHandlers.length).toBe(1); - - // Invoke the error handler with a PG error that has a code - const errorHandler = errorHandlers[0][1]; - const pgError = Object.assign(new Error("auth failed"), { - code: "28P01", - }); - errorHandler(pgError); + expect(pool).toBeDefined(); + expect(consoleSpy).not.toHaveBeenCalled(); + expect(consoleDebugSpy).not.toHaveBeenCalled(); - expect(mockCounterAdd).toHaveBeenCalledWith(1, { - "error.code": "28P01", - }); + consoleSpy.mockRestore(); + consoleDebugSpy.mockRestore(); }); - test("should use 'unknown' error code when error has no code", () => { + test("should use injected logger", () => { + const mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + const pool = createLakebasePool({ workspaceClient: {} as any, + logger: mockLogger, }); - const onMock = pool.on as ReturnType; - const errorHandlers = onMock.mock.calls.filter( - (call: unknown[]) => call[0] === "error", + expect(pool).toBeDefined(); + expect(mockLogger.debug).toHaveBeenCalledWith( + expect.stringContaining("Created Lakebase connection pool"), + expect.any(String), + expect.any(String), + expect.any(String), ); + }); - const errorHandler = errorHandlers[0][1]; - errorHandler(new Error("unknown error")); + test("should pass logger to error handlers", async () => { + const mockLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; - expect(mockCounterAdd).toHaveBeenCalledWith(1, { - "error.code": "unknown", - }); - }); + const { attachPoolMetrics } = await import("../telemetry"); - test("should wrap pool.query to add metrics tracking", () => { - const pool = createLakebasePool({ + createLakebasePool({ workspaceClient: {} as any, + logger: mockLogger, }); - // pool.query should be our wrapped function - expect(typeof pool.query).toBe("function"); - expect(pool.query.name).toBe("queryWithTelemetry"); + // Verify attachPoolMetrics was called with the logger + expect(attachPoolMetrics).toHaveBeenCalledWith( + expect.anything(), + expect.anything(), + mockLogger, + ); }); }); }); diff --git a/packages/appkit/src/connectors/lakebase/config.ts b/packages/lakebase/src/config.ts similarity index 89% rename from packages/appkit/src/connectors/lakebase/config.ts rename to packages/lakebase/src/config.ts index 48ddf84e..43a44051 100644 --- a/packages/appkit/src/connectors/lakebase/config.ts +++ b/packages/lakebase/src/config.ts @@ -1,6 +1,6 @@ import { WorkspaceClient } from "@databricks/sdk-experimental"; import type pg from "pg"; -import { ConfigurationError, ValidationError } from "../../errors"; +import { ConfigurationError, ValidationError } from "./errors"; import type { LakebasePoolConfig } from "./types"; /** Default configuration values for the Lakebase connector */ @@ -105,7 +105,7 @@ function validateSslMode(value: string | undefined): SslMode | undefined { return value as SslMode; } -/** Get workspace client from config or execution context */ +/** Get workspace client from config or SDK default auth chain */ export async function getWorkspaceClient( config: Partial, ): Promise { @@ -114,15 +114,7 @@ export async function getWorkspaceClient( return config.workspaceClient; } - // Priority 2: ServiceContext (when running in AppKit plugin) - try { - const { getWorkspaceClient: getClient } = await import("../../context"); - return getClient(); - } catch (_error) { - // ServiceContext not available - fall through to environment variables - } - - // Priority 3: Create with SDK default auth chain + // Priority 2: Create with SDK default auth chain // Use empty config to let SDK use .databrickscfg, DATABRICKS_HOST, DATABRICKS_TOKEN, etc. // NOTE: config.host is the PostgreSQL host (PGHOST), not the Databricks workspace host return new WorkspaceClient({}); diff --git a/packages/appkit/src/connectors/lakebase/credentials.ts b/packages/lakebase/src/credentials.ts similarity index 79% rename from packages/appkit/src/connectors/lakebase/credentials.ts rename to packages/lakebase/src/credentials.ts index 3f73392f..2e3c84bd 100644 --- a/packages/appkit/src/connectors/lakebase/credentials.ts +++ b/packages/lakebase/src/credentials.ts @@ -1,13 +1,10 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; -import { ValidationError } from "../../errors"; -import { createLogger } from "../../logging/logger"; +import { ValidationError } from "./errors"; import type { DatabaseCredential, GenerateDatabaseCredentialRequest, } from "./types"; -const logger = createLogger("connectors:lakebase:credentials"); - /** * Generate OAuth credentials for Postgres database connection using the proper Postgres API. * @@ -54,27 +51,18 @@ export async function generateDatabaseCredential( ): Promise { const apiPath = "/api/2.0/postgres/credentials"; - try { - const response = await workspaceClient.apiClient.request({ - path: apiPath, - method: "POST", - headers: new Headers({ - Accept: "application/json", - "Content-Type": "application/json", - }), - raw: false, - payload: request, - }); + const response = await workspaceClient.apiClient.request({ + path: apiPath, + method: "POST", + headers: new Headers({ + Accept: "application/json", + "Content-Type": "application/json", + }), + raw: false, + payload: request, + }); - return validateCredentialResponse(response); - } catch (error) { - logger.error("Failed to generate database credential: %O", { - error, - message: error instanceof Error ? error.message : String(error), - endpoint: request.endpoint, - }); - throw error; - } + return validateCredentialResponse(response); } /** Validate the API response has the expected shape */ diff --git a/packages/lakebase/src/errors.ts b/packages/lakebase/src/errors.ts new file mode 100644 index 00000000..86c3ecc1 --- /dev/null +++ b/packages/lakebase/src/errors.ts @@ -0,0 +1,66 @@ +/** + * Base error class for Lakebase driver errors. + */ +export abstract class LakebaseError extends Error { + abstract readonly code: string; + readonly cause?: Error; + readonly context?: Record; + + constructor( + message: string, + options?: { cause?: Error; context?: Record }, + ) { + super(message); + this.name = this.constructor.name; + this.cause = options?.cause; + this.context = options?.context; + + if (Error.captureStackTrace) { + Error.captureStackTrace(this, this.constructor); + } + } +} + +/** + * Error thrown when configuration is missing or invalid. + */ +export class ConfigurationError extends LakebaseError { + readonly code = "CONFIGURATION_ERROR"; + + /** + * Create a configuration error for missing environment variable + */ + static missingEnvVar(varName: string): ConfigurationError { + return new ConfigurationError( + `${varName} environment variable is required`, + { context: { envVar: varName } }, + ); + } +} + +/** + * Error thrown when input validation fails. + */ +export class ValidationError extends LakebaseError { + readonly code = "VALIDATION_ERROR"; + + /** + * Create a validation error for an invalid field value + */ + static invalidValue( + fieldName: string, + value: unknown, + expected?: string, + ): ValidationError { + const msg = expected + ? `Invalid value for ${fieldName}: expected ${expected}` + : `Invalid value for ${fieldName}`; + return new ValidationError(msg, { + context: { + field: fieldName, + valueType: value === null ? "null" : typeof value, + expected, + }, + }); + } +} diff --git a/packages/lakebase/src/index.ts b/packages/lakebase/src/index.ts new file mode 100644 index 00000000..7ea59006 --- /dev/null +++ b/packages/lakebase/src/index.ts @@ -0,0 +1,19 @@ +export { getWorkspaceClient } from "./config"; +export { generateDatabaseCredential } from "./credentials"; +export { createLakebasePool } from "./pool"; +export { + getLakebaseOrmConfig, + getLakebasePgConfig, +} from "./pool-config"; +export type { DriverTelemetry } from "./telemetry"; +export type { TokenRefreshDeps } from "./token-refresh"; +export { createTokenRefreshCallback } from "./token-refresh"; +export type { + DatabaseCredential, + GenerateDatabaseCredentialRequest, + LakebasePoolConfig, + Logger, + RequestedClaims, + RequestedResource, +} from "./types"; +export { RequestedClaimsPermissionSet } from "./types"; diff --git a/packages/appkit/src/connectors/lakebase/pool-config.ts b/packages/lakebase/src/pool-config.ts similarity index 95% rename from packages/appkit/src/connectors/lakebase/pool-config.ts rename to packages/lakebase/src/pool-config.ts index 6b9f473f..bba4d663 100644 --- a/packages/appkit/src/connectors/lakebase/pool-config.ts +++ b/packages/lakebase/src/pool-config.ts @@ -2,7 +2,7 @@ import type pg from "pg"; import { getUsernameSync, parsePoolConfig } from "./config"; import { type DriverTelemetry, initTelemetry } from "./telemetry"; import { createTokenRefreshCallback } from "./token-refresh"; -import type { LakebasePoolConfig } from "./types"; +import type { LakebasePoolConfig, Logger } from "./types"; /** * Map an SSL mode string to the corresponding `pg` SSL configuration. @@ -36,11 +36,13 @@ function mapSslConfig( * * @param config - Optional configuration (reads from environment if not provided) * @param telemetry - Optional pre-initialized telemetry (created internally if not provided) + * @param logger - Optional logger (silent if not provided) * @returns PostgreSQL pool configuration with OAuth token refresh */ export function getLakebasePgConfig( config?: Partial, telemetry?: DriverTelemetry, + logger?: Logger, ): pg.PoolConfig { const userConfig = config ?? {}; const poolConfig = parsePoolConfig(userConfig); @@ -56,7 +58,8 @@ export function getLakebasePgConfig( passwordConfig = createTokenRefreshCallback({ userConfig, endpoint: poolConfig.endpoint, - telemetry: telemetry ?? initTelemetry(userConfig), + telemetry: telemetry ?? initTelemetry(), + logger, }); } diff --git a/packages/appkit/src/connectors/lakebase/pool.ts b/packages/lakebase/src/pool.ts similarity index 82% rename from packages/appkit/src/connectors/lakebase/pool.ts rename to packages/lakebase/src/pool.ts index 126e0f09..a2d1016a 100644 --- a/packages/appkit/src/connectors/lakebase/pool.ts +++ b/packages/lakebase/src/pool.ts @@ -1,12 +1,13 @@ import pg from "pg"; -import { SpanKind, SpanStatusCode } from "@/telemetry"; -import { createLogger } from "../../logging/logger"; import { getLakebasePgConfig } from "./pool-config"; -import { attachPoolMetrics, initTelemetry } from "./telemetry"; +import { + attachPoolMetrics, + initTelemetry, + SpanKind, + SpanStatusCode, +} from "./telemetry"; import type { LakebasePoolConfig } from "./types"; -const logger = createLogger("connectors:lakebase:pool"); - /** * Create a PostgreSQL connection pool with automatic OAuth token refresh for Lakebase. * @@ -57,27 +58,21 @@ export function createLakebasePool( config?: Partial, ): pg.Pool { const userConfig = config ?? {}; + const logger = userConfig.logger; - // Initialize telemetry once and thread it through to avoid duplicate instruments - const telemetry = initTelemetry(userConfig); + const telemetry = initTelemetry(); - // Get complete pool config (connection + pool settings) - const poolConfig = getLakebasePgConfig(userConfig, telemetry); + const poolConfig = getLakebasePgConfig(userConfig, telemetry, logger); - // Create standard pg.Pool with the config const pool = new pg.Pool(poolConfig); - // Attach pool-level telemetry metrics (gauges, error counter, and error logging) - attachPoolMetrics(pool, telemetry); + attachPoolMetrics(pool, telemetry, logger); // Wrap pool.query to track query duration and create trace spans. // pg.Pool.query has 15+ overloads that are difficult to type-preserve, // so we use a loosely-typed wrapper and cast back. - // We use the tracer directly (not provider.startActiveSpan) because the - // provider wrapper is async-only, while pool.query supports both promise - // and callback paths. const origQuery = pool.query.bind(pool); - const tracer = telemetry.provider.getTracer(); + const tracer = telemetry.tracer; pool.query = function queryWithTelemetry( ...args: unknown[] ): ReturnType { @@ -135,7 +130,7 @@ export function createLakebasePool( ) as ReturnType; } as typeof pool.query; - logger.debug( + logger?.debug( "Created Lakebase connection pool for %s@%s/%s", poolConfig.user, poolConfig.host, diff --git a/packages/appkit/src/connectors/lakebase/telemetry.ts b/packages/lakebase/src/telemetry.ts similarity index 63% rename from packages/appkit/src/connectors/lakebase/telemetry.ts rename to packages/lakebase/src/telemetry.ts index c6c685a9..cced3bf0 100644 --- a/packages/appkit/src/connectors/lakebase/telemetry.ts +++ b/packages/lakebase/src/telemetry.ts @@ -1,35 +1,39 @@ -import type pg from "pg"; +import type { Counter, Histogram, Meter } from "@opentelemetry/api"; import { - type Counter, - type Histogram, - TelemetryManager, - type TelemetryProvider, -} from "@/telemetry"; -import { createLogger } from "../../logging/logger"; -import type { LakebasePoolConfig } from "./types"; + metrics, + SpanKind, + SpanStatusCode, + type Tracer, + trace, +} from "@opentelemetry/api"; +import type pg from "pg"; +import type { Logger } from "./types"; -const logger = createLogger("connectors:lakebase:pool"); +// Re-export OpenTelemetry types for backward compatibility +export { SpanKind, SpanStatusCode }; +export type { Tracer }; -/** Telemetry instruments shared across the driver */ +/** Telemetry instruments for the driver */ export interface DriverTelemetry { - provider: TelemetryProvider; + tracer: Tracer; + meter: Meter; tokenRefreshDuration: Histogram; queryDuration: Histogram; poolErrors: Counter; } -/** Create telemetry provider and metric instruments */ -export function initTelemetry( - config: Partial, -): DriverTelemetry { - const provider = TelemetryManager.getProvider( - "connectors:lakebase", - config.telemetry, - ); - const meter = provider.getMeter(); +/** + * Initialize telemetry using OpenTelemetry's global registry. + * If OTel providers are not initialized, operations will be no-ops automatically. + */ +export function initTelemetry(): DriverTelemetry { + // Use global OTel registry - no injection needed! + const tracer = trace.getTracer("@databricks/lakebase"); + const meter = metrics.getMeter("@databricks/lakebase"); return { - provider, + tracer, + meter, tokenRefreshDuration: meter.createHistogram( "lakebase.token.refresh.duration", { @@ -51,14 +55,18 @@ export function initTelemetry( /** * Attach pool-level metrics collection, error counting, and error logging. * - * Uses observable gauges (pull model) for pool connection stats -- the OTEL SDK - * reads pool counts at collection time, requiring no timers or cleanup. + * Uses observable gauges (pull model) for pool connection stats. + * + * @param pool - PostgreSQL connection pool + * @param telemetry - Telemetry instruments + * @param logger - Optional logger for error logging (silent if not provided) */ export function attachPoolMetrics( pool: pg.Pool, telemetry: DriverTelemetry, + logger?: Logger, ): void { - const meter = telemetry.provider.getMeter(); + const meter = telemetry.meter; const poolTotal = meter.createObservableGauge( "lakebase.pool.connections.total", @@ -77,9 +85,8 @@ export function attachPoolMetrics( poolIdle.addCallback((result) => result.observe(pool.idleCount)); poolWaiting.addCallback((result) => result.observe(pool.waitingCount)); - // Single error handler for both logging and metrics pool.on("error", (error: Error & { code?: string }) => { - logger.error( + logger?.error( "Connection pool error: %s (code: %s)", error.message, error.code, diff --git a/packages/appkit/src/connectors/lakebase/token-refresh.ts b/packages/lakebase/src/token-refresh.ts similarity index 87% rename from packages/appkit/src/connectors/lakebase/token-refresh.ts rename to packages/lakebase/src/token-refresh.ts index f346a3c2..d22bc7d4 100644 --- a/packages/appkit/src/connectors/lakebase/token-refresh.ts +++ b/packages/lakebase/src/token-refresh.ts @@ -1,12 +1,8 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; -import { SpanStatusCode } from "@/telemetry"; -import { createLogger } from "../../logging/logger"; import { getWorkspaceClient } from "./config"; import { generateDatabaseCredential } from "./credentials"; -import type { DriverTelemetry } from "./telemetry"; -import type { LakebasePoolConfig } from "./types"; - -const logger = createLogger("connectors:lakebase:token"); +import { type DriverTelemetry, SpanStatusCode } from "./telemetry"; +import type { LakebasePoolConfig, Logger } from "./types"; // 2-minute buffer before token expiration to prevent race conditions // Lakebase tokens expire after 1 hour, so we refresh when ~58 minutes remain @@ -16,6 +12,7 @@ export interface TokenRefreshDeps { userConfig: Partial; endpoint: string; telemetry: DriverTelemetry; + logger?: Logger; } /** Fetch a fresh OAuth token from Databricks */ @@ -54,7 +51,7 @@ export function createTokenRefreshCallback( try { workspaceClient = await getWorkspaceClient(deps.userConfig); } catch (error) { - logger.error("Failed to initialize workspace client: %O", error); + deps.logger?.error("Failed to initialize workspace client: %O", error); throw error; } } @@ -64,7 +61,7 @@ export function createTokenRefreshCallback( if (hasValidToken) { // Return cached token if still valid (with buffer) const expiresIn = Math.round((tokenExpiresAt - now) / 1000 / 60); - logger.debug( + deps.logger?.debug( "Using cached OAuth token (expires in %d minutes at %s)", expiresIn, new Date(tokenExpiresAt).toISOString(), @@ -79,7 +76,7 @@ export function createTokenRefreshCallback( refreshPromise = (async () => { const startTime = Date.now(); try { - const result = await deps.telemetry.provider.startActiveSpan( + const result = await deps.telemetry.tracer.startActiveSpan( "lakebase.token.refresh", { attributes: { "lakebase.endpoint": deps.endpoint }, @@ -100,7 +97,7 @@ export function createTokenRefreshCallback( tokenExpiresAt = result.expiresAt; return cachedToken; } catch (error) { - logger.error("Failed to fetch OAuth token: %O", { + deps.logger?.error("Failed to fetch OAuth token: %O", { error, message: error instanceof Error ? error.message : String(error), endpoint: deps.endpoint, diff --git a/packages/appkit/src/connectors/lakebase/types.ts b/packages/lakebase/src/types.ts similarity index 82% rename from packages/appkit/src/connectors/lakebase/types.ts rename to packages/lakebase/src/types.ts index 2c46d87f..c3611c54 100644 --- a/packages/appkit/src/connectors/lakebase/types.ts +++ b/packages/lakebase/src/types.ts @@ -1,6 +1,26 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; import type { PoolConfig } from "pg"; -import type { TelemetryOptions } from "shared"; + +/** + * Optional logger interface for the Lakebase driver. + * When not provided, the driver operates silently (no logging). + */ +export interface Logger { + debug(message: string, ...args: unknown[]): void; + info(message: string, ...args: unknown[]): void; + warn(message: string, ...args: unknown[]): void; + error(message: string, ...args: unknown[]): void; +} + +/** + * Telemetry configuration options + */ +export type TelemetryOptions = + | boolean + | { + traces?: boolean; + metrics?: boolean; + }; /** * Configuration for creating a Lakebase connection pool @@ -50,11 +70,25 @@ export interface LakebasePoolConfig extends PoolConfig { /** * Telemetry configuration * - * - `true` or omitted: enable all telemetry (traces, metrics, logs) -- no-op when OTEL is not configured + * - `true` or omitted: enable all telemetry (traces, metrics) -- no-op when OTEL is not configured * - `false`: disable all telemetry - * - `{ traces?, metrics?, logs? }`: fine-grained control + * - `{ traces?, metrics? }`: fine-grained control */ telemetry?: TelemetryOptions; + + /** + * Optional logger instance for the driver. + * When not provided, the driver operates silently (no logging). + * + * @example Using appkit logger + * ```typescript + * import { createLogger } from '@databricks/appkit'; + * const pool = createLakebasePool({ + * logger: createLogger('connectors:lakebase') + * }); + * ``` + */ + logger?: Logger; } // --------------------------------------------------------------------------- diff --git a/packages/lakebase/tsconfig.json b/packages/lakebase/tsconfig.json new file mode 100644 index 00000000..4a6e68b3 --- /dev/null +++ b/packages/lakebase/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/lakebase/tsdown.config.ts b/packages/lakebase/tsdown.config.ts new file mode 100644 index 00000000..293e2fc3 --- /dev/null +++ b/packages/lakebase/tsdown.config.ts @@ -0,0 +1,28 @@ +import { defineConfig } from "tsdown"; + +export default defineConfig([ + { + publint: true, + name: "@databricks/lakebase", + entry: "src/index.ts", + outDir: "dist", + hash: false, + format: "esm", + platform: "node", + minify: false, + dts: { + resolve: true, + }, + sourcemap: false, + clean: false, + unbundle: true, + noExternal: [], + external: (id) => { + // Bundle all internal modules + if (id.startsWith("@/")) return false; + // Externalize all npm packages + return /^[^./]/.test(id) || id.includes("/node_modules/"); + }, + tsconfig: "./tsconfig.json", + }, +]); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d82b0d03..f8ee73ce 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -132,6 +132,18 @@ importers: '@databricks/appkit': specifier: workspace:* version: link:../../packages/appkit + drizzle-orm: + specifier: ^0.45.1 + version: 0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0) + reflect-metadata: + specifier: ^0.2.0 + version: 0.2.2 + sequelize: + specifier: ^6.37.7 + version: 6.37.7(pg@8.18.0) + typeorm: + specifier: ^0.3.20 + version: 0.3.28(pg@8.18.0) zod: specifier: ^4.1.13 version: 4.1.13 @@ -236,6 +248,9 @@ importers: packages/appkit: dependencies: + '@databricks/lakebase': + specifier: workspace:* + version: link:../lakebase '@databricks/sdk-experimental': specifier: ^0.16.0 version: 0.16.0 @@ -494,6 +509,22 @@ importers: specifier: ^1.4.0 version: 1.4.0 + packages/lakebase: + dependencies: + '@databricks/sdk-experimental': + specifier: ^0.16.0 + version: 0.16.0 + '@opentelemetry/api': + specifier: ^1.0.0 + version: 1.9.0 + pg: + specifier: ^8.18.0 + version: 8.18.0 + devDependencies: + '@types/pg': + specifier: ^8.16.0 + version: 8.16.0 + packages/shared: dependencies: '@ast-grep/napi': @@ -4088,6 +4119,9 @@ packages: '@slorber/remark-comment@1.0.0': resolution: {integrity: sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==} + '@sqltools/formatter@1.2.5': + resolution: {integrity: sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==} + '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -4676,6 +4710,9 @@ packages: '@types/unist@3.0.3': resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + '@types/validator@13.15.10': + resolution: {integrity: sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==} + '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -5009,6 +5046,10 @@ packages: app-module-path@2.2.0: resolution: {integrity: sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ==} + app-root-path@3.1.0: + resolution: {integrity: sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==} + engines: {node: '>= 6.0.0'} + aproba@2.1.0: resolution: {integrity: sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==} @@ -5083,6 +5124,10 @@ packages: peerDependencies: postcss: ^8.1.0 + available-typed-arrays@1.0.7: + resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} + engines: {node: '>= 0.4'} + babel-loader@9.2.1: resolution: {integrity: sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==} engines: {node: '>= 14.15.0'} @@ -5210,6 +5255,9 @@ packages: buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bufferutil@4.0.9: resolution: {integrity: sha512-WDtdLmJvAuNNPzByAYpRo2rF1Mmradw6gvWsQKf63476DDXmomT9zUiGypLcG4ibIM67vhAj8jJRdbmEws2Aqw==} engines: {node: '>=6.14.2'} @@ -6029,6 +6077,14 @@ packages: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} + dedent@1.7.1: + resolution: {integrity: sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + deep-eql@5.0.2: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} @@ -6264,6 +6320,101 @@ packages: resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} engines: {node: '>=12'} + dottie@2.0.6: + resolution: {integrity: sha512-iGCHkfUc5kFekGiqhe8B/mdaurD+lakO9txNnTvKtA6PISrw86LgqHvRzWYPyoE2Ph5aMIrCw9/uko6XHTKCwA==} + + drizzle-orm@0.45.1: + resolution: {integrity: sha512-Te0FOdKIistGNPMq2jscdqngBRfBpC8uMFVwqjf6gtTVJHIQ/dosgV/CLBU2N4ZJBsXL5savCba9b0YJskKdcA==} + peerDependencies: + '@aws-sdk/client-rds-data': '>=3' + '@cloudflare/workers-types': '>=4' + '@electric-sql/pglite': '>=0.2.0' + '@libsql/client': '>=0.10.0' + '@libsql/client-wasm': '>=0.10.0' + '@neondatabase/serverless': '>=0.10.0' + '@op-engineering/op-sqlite': '>=2' + '@opentelemetry/api': ^1.4.1 + '@planetscale/database': '>=1.13' + '@prisma/client': '*' + '@tidbcloud/serverless': '*' + '@types/better-sqlite3': '*' + '@types/pg': '*' + '@types/sql.js': '*' + '@upstash/redis': '>=1.34.7' + '@vercel/postgres': '>=0.8.0' + '@xata.io/client': '*' + better-sqlite3: '>=7' + bun-types: '*' + expo-sqlite: '>=14.0.0' + gel: '>=2' + knex: '*' + kysely: '*' + mysql2: '>=2' + pg: '>=8' + postgres: '>=3' + prisma: '*' + sql.js: '>=1' + sqlite3: '>=5' + peerDependenciesMeta: + '@aws-sdk/client-rds-data': + optional: true + '@cloudflare/workers-types': + optional: true + '@electric-sql/pglite': + optional: true + '@libsql/client': + optional: true + '@libsql/client-wasm': + optional: true + '@neondatabase/serverless': + optional: true + '@op-engineering/op-sqlite': + optional: true + '@opentelemetry/api': + optional: true + '@planetscale/database': + optional: true + '@prisma/client': + optional: true + '@tidbcloud/serverless': + optional: true + '@types/better-sqlite3': + optional: true + '@types/pg': + optional: true + '@types/sql.js': + optional: true + '@upstash/redis': + optional: true + '@vercel/postgres': + optional: true + '@xata.io/client': + optional: true + better-sqlite3: + optional: true + bun-types: + optional: true + expo-sqlite: + optional: true + gel: + optional: true + knex: + optional: true + kysely: + optional: true + mysql2: + optional: true + pg: + optional: true + postgres: + optional: true + prisma: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + dts-resolver@2.1.2: resolution: {integrity: sha512-xeXHBQkn2ISSXxbJWD828PFjtyg+/UrMDo7W4Ffcs7+YWCquxU8YjV1KoxuiL+eJ5pg3ll+bC6flVv61L3LKZg==} engines: {node: '>=20.18.0'} @@ -6721,6 +6872,10 @@ packages: debug: optional: true + for-each@0.3.5: + resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} + engines: {node: '>= 0.4'} + for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} engines: {node: '>=0.10.0'} @@ -6995,6 +7150,10 @@ packages: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + has-unicode@2.0.1: resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} @@ -7295,6 +7454,10 @@ packages: resolution: {integrity: sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==} engines: {node: '>=12'} + inflection@1.13.4: + resolution: {integrity: sha512-6I/HUDeYFfuNCVS3td055BaXBwKYuzw7K3ExVMStBowKo9oOAMJIXIHvdyR3iboTCp1b+1i5DSkIZTcwIktuDw==} + engines: {'0': node >= 0.4.0} + inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. @@ -7389,6 +7552,10 @@ packages: resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} engines: {node: '>=4'} + is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + is-ci@3.0.1: resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} hasBin: true @@ -7524,6 +7691,10 @@ packages: resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==} engines: {node: '>=8'} + is-typed-array@1.1.15: + resolution: {integrity: sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==} + engines: {node: '>= 0.4'} + is-typedarray@1.0.0: resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} @@ -7568,6 +7739,9 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + isarray@2.0.5: + resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + isbinaryfile@5.0.6: resolution: {integrity: sha512-I+NmIfBHUl+r2wcDd6JwE9yWje/PIVY/R5/CmV8dXLZd5K+L9X2klAOwfAHNnondLXkbHyTAleQAWonpTJBTtw==} engines: {node: '>= 18.0.0'} @@ -8331,6 +8505,12 @@ packages: engines: {node: '>=18'} hasBin: true + moment-timezone@0.5.48: + resolution: {integrity: sha512-f22b8LV1gbTO2ms2j2z13MuPogNoh5UzxL3nzNAYKGraILnbGc9NEE6dyiiiLv46DGRb8A4kg8UKWLjPthxBHw==} + + moment@2.30.1: + resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} + mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -8826,6 +9006,10 @@ packages: points-on-path@0.2.1: resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + possible-typed-array-names@1.1.0: + resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} + engines: {node: '>= 0.4'} + postcss-attribute-case-insensitive@7.0.1: resolution: {integrity: sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==} engines: {node: '>=18'} @@ -9688,6 +9872,9 @@ packages: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} + retry-as-promised@7.1.1: + resolution: {integrity: sha512-hMD7odLOt3LkTjcif8aRZqi/hybjpLNgSk5oF5FCowfCjok6LukpN2bDX7R5wDmbgBQFn7YoBxSagmtXHaJYJw==} + retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} @@ -9886,6 +10073,43 @@ packages: resolution: {integrity: sha512-p4rRk4f23ynFEfcD9LA0xRYngj+IyGiEYyqqOak8kaN0TvNmuxC2dcVeBn62GpCeR2CpWqyHCNScTP91QbAVFg==} engines: {node: '>= 0.8.0'} + sequelize-pool@7.1.0: + resolution: {integrity: sha512-G9c0qlIWQSK29pR/5U2JF5dDQeqqHRragoyahj/Nx4KOOQ3CPPfzxnfqFPCSB7x5UgjOgnZ61nSxz+fjDpRlJg==} + engines: {node: '>= 10.0.0'} + + sequelize@6.37.7: + resolution: {integrity: sha512-mCnh83zuz7kQxxJirtFD7q6Huy6liPanI67BSlbzSYgVNl5eXVdE2CN1FuAeZwG1SNpGsNRCV+bJAVVnykZAFA==} + engines: {node: '>=10.0.0'} + peerDependencies: + ibm_db: '*' + mariadb: '*' + mysql2: '*' + oracledb: '*' + pg: '*' + pg-hstore: '*' + snowflake-sdk: '*' + sqlite3: '*' + tedious: '*' + peerDependenciesMeta: + ibm_db: + optional: true + mariadb: + optional: true + mysql2: + optional: true + oracledb: + optional: true + pg: + optional: true + pg-hstore: + optional: true + snowflake-sdk: + optional: true + sqlite3: + optional: true + tedious: + optional: true + serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} @@ -9910,6 +10134,11 @@ packages: setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + sha.js@2.4.12: + resolution: {integrity: sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==} + engines: {node: '>= 0.10'} + hasBin: true + shallow-clone@3.0.1: resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} engines: {node: '>=8'} @@ -10065,6 +10294,10 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + sql-highlight@6.1.0: + resolution: {integrity: sha512-ed7OK4e9ywpE7pgRMkMQmZDPKSVdm0oX5IEtZiKnFucSF0zu6c80GZBe38UqHuVhTWJ9xsKgSMjCG2bml86KvA==} + engines: {node: '>=14'} + srcset@4.0.0: resolution: {integrity: sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==} engines: {node: '>=12'} @@ -10315,6 +10548,10 @@ packages: resolution: {integrity: sha512-Y1KQBgDd/NUc+LfOtKS6mNsC9CCaH+m2P1RoIZy7RAPo3C3/t8X45+zgut31cRZtZ3xKPjfn3TkGTrctC2TQIQ==} hasBin: true + to-buffer@1.2.2: + resolution: {integrity: sha512-db0E3UJjcFhpDhAF4tLo03oli3pwl3dbnzXOUIlRKrp+ldk/VUxzpWYZENsw2SZiuBjHAk7DfB0VU7NKdpb6sw==} + engines: {node: '>= 0.4'} + to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -10326,6 +10563,9 @@ packages: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} + toposort-class@1.0.1: + resolution: {integrity: sha512-OsLcGGbYF3rMjPUf8oKktyvCiUxSbqMMS39m33MAjLTC1DVIH6x3WSt63/M77ihI09+Sdfk1AXvfhCEeUmC7mg==} + totalist@3.0.1: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} @@ -10477,6 +10717,10 @@ packages: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} + typed-array-buffer@1.0.3: + resolution: {integrity: sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==} + engines: {node: '>= 0.4'} + typedarray-to-buffer@3.1.5: resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} @@ -10501,6 +10745,61 @@ packages: peerDependencies: typescript: 5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x + typeorm@0.3.28: + resolution: {integrity: sha512-6GH7wXhtfq2D33ZuRXYwIsl/qM5685WZcODZb7noOOcRMteM9KF2x2ap3H0EBjnSV0VO4gNAfJT5Ukp0PkOlvg==} + engines: {node: '>=16.13.0'} + hasBin: true + peerDependencies: + '@google-cloud/spanner': ^5.18.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 + '@sap/hana-client': ^2.14.22 + better-sqlite3: ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0 || ^12.0.0 + ioredis: ^5.0.4 + mongodb: ^5.8.0 || ^6.0.0 + mssql: ^9.1.1 || ^10.0.0 || ^11.0.0 || ^12.0.0 + mysql2: ^2.2.5 || ^3.0.1 + oracledb: ^6.3.0 + pg: ^8.5.1 + pg-native: ^3.0.0 + pg-query-stream: ^4.0.0 + redis: ^3.1.1 || ^4.0.0 || ^5.0.14 + sql.js: ^1.4.0 + sqlite3: ^5.0.3 + ts-node: ^10.7.0 + typeorm-aurora-data-api-driver: ^2.0.0 || ^3.0.0 + peerDependenciesMeta: + '@google-cloud/spanner': + optional: true + '@sap/hana-client': + optional: true + better-sqlite3: + optional: true + ioredis: + optional: true + mongodb: + optional: true + mssql: + optional: true + mysql2: + optional: true + oracledb: + optional: true + pg: + optional: true + pg-native: + optional: true + pg-query-stream: + optional: true + redis: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + ts-node: + optional: true + typeorm-aurora-data-api-driver: + optional: true + typescript-eslint@8.49.0: resolution: {integrity: sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -10725,6 +11024,10 @@ packages: validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + validator@13.15.26: + resolution: {integrity: sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==} + engines: {node: '>= 0.10'} + value-equal@1.0.1: resolution: {integrity: sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==} @@ -10974,6 +11277,10 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + which-typed-array@1.1.20: + resolution: {integrity: sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==} + engines: {node: '>= 0.4'} + which@1.3.1: resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} hasBin: true @@ -11005,6 +11312,9 @@ packages: resolution: {integrity: sha512-1lOb3qdzw6OFmOzoY0nauhLG72TpWtb5qgYPiSh/62rjc1XidBSDio2qw0pwHh17VINF217ebIkZJdFLZFn9SA==} engines: {node: '>=18'} + wkx@0.5.0: + resolution: {integrity: sha512-Xng/d4Ichh8uN4l0FToV/258EjMGU9MGcA0HV2d9B/ZpZB3lqQm7nkOdZdm5GhKtLLhAE7PiVQwN4eN+2YJJUg==} + word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -15708,6 +16018,8 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 + '@sqltools/formatter@1.2.5': {} + '@standard-schema/spec@1.1.0': {} '@standard-schema/utils@0.3.0': {} @@ -16371,6 +16683,8 @@ snapshots: '@types/unist@3.0.3': {} + '@types/validator@13.15.10': {} + '@types/ws@8.18.1': dependencies: '@types/node': 24.10.1 @@ -16843,6 +17157,8 @@ snapshots: app-module-path@2.2.0: {} + app-root-path@3.1.0: {} + aproba@2.1.0: {} arg@5.0.2: {} @@ -16905,6 +17221,10 @@ snapshots: postcss: 8.5.6 postcss-value-parser: 4.2.0 + available-typed-arrays@1.0.7: + dependencies: + possible-typed-array-names: 1.1.0 + babel-loader@9.2.1(@babel/core@7.28.5)(webpack@5.103.0): dependencies: '@babel/core': 7.28.5 @@ -17064,6 +17384,11 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + bufferutil@4.0.9: dependencies: node-gyp-build: 4.8.4 @@ -17932,6 +18257,8 @@ snapshots: dependencies: mimic-response: 3.1.0 + dedent@1.7.1: {} + deep-eql@5.0.2: {} deep-extend@0.6.0: {} @@ -18182,6 +18509,14 @@ snapshots: dotenv@17.2.3: {} + dottie@2.0.6: {} + + drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0): + optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@types/pg': 8.16.0 + pg: 8.18.0 + dts-resolver@2.1.2: {} dunder-proto@1.0.1: @@ -18708,6 +19043,10 @@ snapshots: follow-redirects@1.15.11: {} + for-each@0.3.5: + dependencies: + is-callable: 1.2.7 + for-in@1.0.2: {} for-own@1.0.0: @@ -19040,6 +19379,10 @@ snapshots: has-symbols@1.1.0: {} + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + has-unicode@2.0.1: {} has-yarn@3.0.0: {} @@ -19515,6 +19858,8 @@ snapshots: infima@0.2.0-alpha.45: {} + inflection@1.13.4: {} + inflight@1.0.6: dependencies: once: 1.4.0 @@ -19604,6 +19949,8 @@ snapshots: is-buffer@2.0.5: {} + is-callable@1.2.7: {} + is-ci@3.0.1: dependencies: ci-info: 3.9.0 @@ -19693,6 +20040,10 @@ snapshots: dependencies: text-extensions: 2.4.0 + is-typed-array@1.1.15: + dependencies: + which-typed-array: 1.1.20 + is-typedarray@1.0.0: {} is-unc-path@1.0.0: @@ -19723,6 +20074,8 @@ snapshots: isarray@1.0.0: {} + isarray@2.0.5: {} + isbinaryfile@5.0.6: {} isexe@2.0.0: {} @@ -20789,6 +21142,12 @@ snapshots: requirejs: 2.3.8 requirejs-config-file: 4.0.0 + moment-timezone@0.5.48: + dependencies: + moment: 2.30.1 + + moment@2.30.1: {} + mri@1.2.0: {} mrmime@2.0.1: {} @@ -21299,6 +21658,8 @@ snapshots: path-data-parser: 0.1.0 points-on-curve: 0.2.0 + possible-typed-array-names@1.1.0: {} + postcss-attribute-case-insensitive@7.0.1(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -22344,6 +22705,8 @@ snapshots: onetime: 7.0.0 signal-exit: 4.1.0 + retry-as-promised@7.1.1: {} + retry@0.13.1: {} reusify@1.1.0: {} @@ -22606,6 +22969,31 @@ snapshots: transitivePeerDependencies: - supports-color + sequelize-pool@7.1.0: {} + + sequelize@6.37.7(pg@8.18.0): + dependencies: + '@types/debug': 4.1.12 + '@types/validator': 13.15.10 + debug: 4.4.3 + dottie: 2.0.6 + inflection: 1.13.4 + lodash: 4.17.21 + moment: 2.30.1 + moment-timezone: 0.5.48 + pg-connection-string: 2.11.0 + retry-as-promised: 7.1.1 + semver: 7.7.3 + sequelize-pool: 7.1.0 + toposort-class: 1.0.1 + uuid: 8.3.2 + validator: 13.15.26 + wkx: 0.5.0 + optionalDependencies: + pg: 8.18.0 + transitivePeerDependencies: + - supports-color + serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 @@ -22654,6 +23042,12 @@ snapshots: setprototypeof@1.2.0: {} + sha.js@2.4.12: + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + to-buffer: 1.2.2 + shallow-clone@3.0.1: dependencies: kind-of: 6.0.3 @@ -22824,6 +23218,8 @@ snapshots: sprintf-js@1.0.3: {} + sql-highlight@6.1.0: {} + srcset@4.0.0: {} stackback@0.0.2: {} @@ -23036,6 +23432,12 @@ snapshots: dependencies: tldts-core: 7.0.17 + to-buffer@1.2.2: + dependencies: + isarray: 2.0.5 + safe-buffer: 5.2.1 + typed-array-buffer: 1.0.3 + to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -23047,6 +23449,8 @@ snapshots: toidentifier@1.0.1: {} + toposort-class@1.0.1: {} + totalist@3.0.1: {} tough-cookie@6.0.0: @@ -23170,6 +23574,12 @@ snapshots: media-typer: 0.3.0 mime-types: 2.1.35 + typed-array-buffer@1.0.3: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + is-typed-array: 1.1.15 + typedarray-to-buffer@3.1.5: dependencies: is-typedarray: 1.0.0 @@ -23193,6 +23603,29 @@ snapshots: typescript: 5.6.3 yaml: 2.8.1 + typeorm@0.3.28(pg@8.18.0): + dependencies: + '@sqltools/formatter': 1.2.5 + ansis: 4.2.0 + app-root-path: 3.1.0 + buffer: 6.0.3 + dayjs: 1.11.19 + debug: 4.4.3 + dedent: 1.7.1 + dotenv: 16.6.1 + glob: 10.5.0 + reflect-metadata: 0.2.2 + sha.js: 2.4.12 + sql-highlight: 6.1.0 + tslib: 2.8.1 + uuid: 11.1.0 + yargs: 17.7.2 + optionalDependencies: + pg: 8.18.0 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + typescript-eslint@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3): dependencies: '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3) @@ -23416,6 +23849,8 @@ snapshots: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 + validator@13.15.26: {} + value-equal@1.0.1: {} vary@1.1.2: {} @@ -23779,6 +24214,16 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 + which-typed-array@1.1.20: + dependencies: + available-typed-arrays: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.4 + for-each: 0.3.5 + get-proto: 1.0.1 + gopd: 1.2.0 + has-tostringtag: 1.0.2 + which@1.3.1: dependencies: isexe: 2.0.0 @@ -23808,6 +24253,10 @@ snapshots: dependencies: execa: 8.0.1 + wkx@0.5.0: + dependencies: + '@types/node': 24.10.1 + word-wrap@1.2.5: {} wordwrap@1.0.0: {} diff --git a/tools/license-utils.ts b/tools/license-utils.ts index f8a7e479..43247f05 100644 --- a/tools/license-utils.ts +++ b/tools/license-utils.ts @@ -18,6 +18,7 @@ type PackageJson = { export const PUBLISHED_PACKAGES = [ "packages/appkit", "packages/appkit-ui", + "packages/lakebase", "packages/shared", ]; diff --git a/vitest.config.ts b/vitest.config.ts index da6ca040..8c2893b0 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -42,6 +42,14 @@ export default defineConfig({ environment: "node", }, }, + { + plugins: [tsconfigPaths()], + test: { + name: "lakebase", + root: "./packages/lakebase", + environment: "node", + }, + }, { plugins: [tsconfigPaths()], test: { From 374583c72588a8e1f9708482cb64727447ce4251 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 11:17:42 +0100 Subject: [PATCH 07/12] chore: regenerate docs after rebase --- docs/docs/api/appkit/index.md | 13 +- docs/docs/api/appkit/typedoc-sidebar.ts | 55 +++ pnpm-lock.yaml | 430 ------------------------ 3 files changed, 67 insertions(+), 431 deletions(-) diff --git a/docs/docs/api/appkit/index.md b/docs/docs/api/appkit/index.md index 7786b26a..91f9a66e 100644 --- a/docs/docs/api/appkit/index.md +++ b/docs/docs/api/appkit/index.md @@ -8,6 +8,7 @@ plugin architecture, and React integration. | Enumeration | Description | | ------ | ------ | | [RequestedClaimsPermissionSet](Enumeration.RequestedClaimsPermissionSet.md) | Permission set for Unity Catalog table access | +| [ResourceType](Enumeration.ResourceType.md) | Supported resource types that plugins can depend on. Each type has its own set of valid permissions. | ## Classes @@ -19,7 +20,8 @@ plugin architecture, and React integration. | [ConnectionError](Class.ConnectionError.md) | Error thrown when a connection or network operation fails. Use for database pool errors, API failures, timeouts, etc. | | [ExecutionError](Class.ExecutionError.md) | Error thrown when an operation execution fails. Use for statement failures, canceled operations, or unexpected states. | | [InitializationError](Class.InitializationError.md) | Error thrown when a service or component is not properly initialized. Use when accessing services before they are ready. | -| [Plugin](Class.Plugin.md) | Base abstract class for creating AppKit plugins | +| [Plugin](Class.Plugin.md) | Base abstract class for creating AppKit plugins. | +| [ResourceRegistry](Class.ResourceRegistry.md) | Central registry for tracking plugin resource requirements. Deduplication uses type + resourceKey (machine-stable); alias is for display only. | | [ServerError](Class.ServerError.md) | Error thrown when server lifecycle operations fail. Use for server start/stop issues, configuration conflicts, etc. | | [TunnelError](Class.TunnelError.md) | Error thrown when remote tunnel operations fail. Use for tunnel connection issues, message parsing failures, etc. | | [ValidationError](Class.ValidationError.md) | Error thrown when input validation fails. Use for invalid parameters, missing required fields, or type mismatches. | @@ -34,16 +36,23 @@ plugin architecture, and React integration. | [GenerateDatabaseCredentialRequest](Interface.GenerateDatabaseCredentialRequest.md) | Request parameters for generating database OAuth credentials | | [ITelemetry](Interface.ITelemetry.md) | Plugin-facing interface for OpenTelemetry instrumentation. Provides a thin abstraction over OpenTelemetry APIs for plugins. | | [LakebasePoolConfig](Interface.LakebasePoolConfig.md) | Configuration for creating a Lakebase connection pool | +| [PluginManifest](Interface.PluginManifest.md) | Plugin manifest that declares metadata and resource requirements. Attached to plugin classes as a static property. | | [RequestedClaims](Interface.RequestedClaims.md) | Optional claims for fine-grained Unity Catalog table permissions When specified, the returned token will be scoped to only the requested tables | | [RequestedResource](Interface.RequestedResource.md) | Resource to request permissions for in Unity Catalog | +| [ResourceEntry](Interface.ResourceEntry.md) | Internal representation of a resource in the registry. Extends ResourceRequirement with resolution state and plugin ownership. | +| [ResourceFieldEntry](Interface.ResourceFieldEntry.md) | Defines a single field for a resource. Each field has its own environment variable and optional description. Single-value types use one key (e.g. id); multi-value types (database, secret) use multiple (e.g. instance_name, database_name or scope, key). | +| [ResourceRequirement](Interface.ResourceRequirement.md) | Declares a resource requirement for a plugin. Can be defined statically in a manifest or dynamically via getResourceRequirements(). | | [StreamExecutionSettings](Interface.StreamExecutionSettings.md) | Configuration for streaming execution with default and user-scoped settings | | [TelemetryConfig](Interface.TelemetryConfig.md) | OpenTelemetry configuration for AppKit applications | +| [ValidationResult](Interface.ValidationResult.md) | Result of validating all registered resources against the environment. | ## Type Aliases | Type Alias | Description | | ------ | ------ | +| [ConfigSchema](TypeAlias.ConfigSchema.md) | Configuration schema definition for plugin config. Re-exported from the standard JSON Schema Draft 7 types. | | [IAppRouter](TypeAlias.IAppRouter.md) | Express router type for plugin route registration | +| [ResourcePermission](TypeAlias.ResourcePermission.md) | Union of all possible permission levels across all resource types. | ## Variables @@ -62,5 +71,7 @@ plugin architecture, and React integration. | [getExecutionContext](Function.getExecutionContext.md) | Get the current execution context. | | [getLakebaseOrmConfig](Function.getLakebaseOrmConfig.md) | Get Lakebase connection configuration for ORMs that don't accept pg.Pool directly. | | [getLakebasePgConfig](Function.getLakebasePgConfig.md) | Get Lakebase connection configuration for PostgreSQL clients. | +| [getPluginManifest](Function.getPluginManifest.md) | Loads and validates the manifest from a plugin constructor. Normalizes string type/permission to strict ResourceType/ResourcePermission. | +| [getResourceRequirements](Function.getResourceRequirements.md) | Gets the resource requirements from a plugin's manifest. | | [getWorkspaceClient](Function.getWorkspaceClient.md) | Get workspace client from config or SDK default auth chain | | [isSQLTypeMarker](Function.isSQLTypeMarker.md) | Type guard to check if a value is a SQL type marker | diff --git a/docs/docs/api/appkit/typedoc-sidebar.ts b/docs/docs/api/appkit/typedoc-sidebar.ts index d25c3e9a..3421d7ee 100644 --- a/docs/docs/api/appkit/typedoc-sidebar.ts +++ b/docs/docs/api/appkit/typedoc-sidebar.ts @@ -9,6 +9,11 @@ const typedocSidebar: SidebarsConfig = { type: "doc", id: "api/appkit/Enumeration.RequestedClaimsPermissionSet", label: "RequestedClaimsPermissionSet" + }, + { + type: "doc", + id: "api/appkit/Enumeration.ResourceType", + label: "ResourceType" } ] }, @@ -51,6 +56,11 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Class.Plugin", label: "Plugin" }, + { + type: "doc", + id: "api/appkit/Class.ResourceRegistry", + label: "ResourceRegistry" + }, { type: "doc", id: "api/appkit/Class.ServerError", @@ -102,6 +112,11 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Interface.LakebasePoolConfig", label: "LakebasePoolConfig" }, + { + type: "doc", + id: "api/appkit/Interface.PluginManifest", + label: "PluginManifest" + }, { type: "doc", id: "api/appkit/Interface.RequestedClaims", @@ -112,6 +127,21 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Interface.RequestedResource", label: "RequestedResource" }, + { + type: "doc", + id: "api/appkit/Interface.ResourceEntry", + label: "ResourceEntry" + }, + { + type: "doc", + id: "api/appkit/Interface.ResourceFieldEntry", + label: "ResourceFieldEntry" + }, + { + type: "doc", + id: "api/appkit/Interface.ResourceRequirement", + label: "ResourceRequirement" + }, { type: "doc", id: "api/appkit/Interface.StreamExecutionSettings", @@ -121,6 +151,11 @@ const typedocSidebar: SidebarsConfig = { type: "doc", id: "api/appkit/Interface.TelemetryConfig", label: "TelemetryConfig" + }, + { + type: "doc", + id: "api/appkit/Interface.ValidationResult", + label: "ValidationResult" } ] }, @@ -128,10 +163,20 @@ const typedocSidebar: SidebarsConfig = { type: "category", label: "Type Aliases", items: [ + { + type: "doc", + id: "api/appkit/TypeAlias.ConfigSchema", + label: "ConfigSchema" + }, { type: "doc", id: "api/appkit/TypeAlias.IAppRouter", label: "IAppRouter" + }, + { + type: "doc", + id: "api/appkit/TypeAlias.ResourcePermission", + label: "ResourcePermission" } ] }, @@ -185,6 +230,16 @@ const typedocSidebar: SidebarsConfig = { id: "api/appkit/Function.getLakebasePgConfig", label: "getLakebasePgConfig" }, + { + type: "doc", + id: "api/appkit/Function.getPluginManifest", + label: "getPluginManifest" + }, + { + type: "doc", + id: "api/appkit/Function.getResourceRequirements", + label: "getResourceRequirements" + }, { type: "doc", id: "api/appkit/Function.getWorkspaceClient", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f8ee73ce..0108e4d6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -132,18 +132,6 @@ importers: '@databricks/appkit': specifier: workspace:* version: link:../../packages/appkit - drizzle-orm: - specifier: ^0.45.1 - version: 0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0) - reflect-metadata: - specifier: ^0.2.0 - version: 0.2.2 - sequelize: - specifier: ^6.37.7 - version: 6.37.7(pg@8.18.0) - typeorm: - specifier: ^0.3.20 - version: 0.3.28(pg@8.18.0) zod: specifier: ^4.1.13 version: 4.1.13 @@ -4119,9 +4107,6 @@ packages: '@slorber/remark-comment@1.0.0': resolution: {integrity: sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==} - '@sqltools/formatter@1.2.5': - resolution: {integrity: sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==} - '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -4710,9 +4695,6 @@ packages: '@types/unist@3.0.3': resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} - '@types/validator@13.15.10': - resolution: {integrity: sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==} - '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -5046,10 +5028,6 @@ packages: app-module-path@2.2.0: resolution: {integrity: sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ==} - app-root-path@3.1.0: - resolution: {integrity: sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==} - engines: {node: '>= 6.0.0'} - aproba@2.1.0: resolution: {integrity: sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==} @@ -5124,10 +5102,6 @@ packages: peerDependencies: postcss: ^8.1.0 - available-typed-arrays@1.0.7: - resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} - engines: {node: '>= 0.4'} - babel-loader@9.2.1: resolution: {integrity: sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==} engines: {node: '>= 14.15.0'} @@ -5255,9 +5229,6 @@ packages: buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} - buffer@6.0.3: - resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} - bufferutil@4.0.9: resolution: {integrity: sha512-WDtdLmJvAuNNPzByAYpRo2rF1Mmradw6gvWsQKf63476DDXmomT9zUiGypLcG4ibIM67vhAj8jJRdbmEws2Aqw==} engines: {node: '>=6.14.2'} @@ -6077,14 +6048,6 @@ packages: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} - dedent@1.7.1: - resolution: {integrity: sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==} - peerDependencies: - babel-plugin-macros: ^3.1.0 - peerDependenciesMeta: - babel-plugin-macros: - optional: true - deep-eql@5.0.2: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} @@ -6320,101 +6283,6 @@ packages: resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} engines: {node: '>=12'} - dottie@2.0.6: - resolution: {integrity: sha512-iGCHkfUc5kFekGiqhe8B/mdaurD+lakO9txNnTvKtA6PISrw86LgqHvRzWYPyoE2Ph5aMIrCw9/uko6XHTKCwA==} - - drizzle-orm@0.45.1: - resolution: {integrity: sha512-Te0FOdKIistGNPMq2jscdqngBRfBpC8uMFVwqjf6gtTVJHIQ/dosgV/CLBU2N4ZJBsXL5savCba9b0YJskKdcA==} - peerDependencies: - '@aws-sdk/client-rds-data': '>=3' - '@cloudflare/workers-types': '>=4' - '@electric-sql/pglite': '>=0.2.0' - '@libsql/client': '>=0.10.0' - '@libsql/client-wasm': '>=0.10.0' - '@neondatabase/serverless': '>=0.10.0' - '@op-engineering/op-sqlite': '>=2' - '@opentelemetry/api': ^1.4.1 - '@planetscale/database': '>=1.13' - '@prisma/client': '*' - '@tidbcloud/serverless': '*' - '@types/better-sqlite3': '*' - '@types/pg': '*' - '@types/sql.js': '*' - '@upstash/redis': '>=1.34.7' - '@vercel/postgres': '>=0.8.0' - '@xata.io/client': '*' - better-sqlite3: '>=7' - bun-types: '*' - expo-sqlite: '>=14.0.0' - gel: '>=2' - knex: '*' - kysely: '*' - mysql2: '>=2' - pg: '>=8' - postgres: '>=3' - prisma: '*' - sql.js: '>=1' - sqlite3: '>=5' - peerDependenciesMeta: - '@aws-sdk/client-rds-data': - optional: true - '@cloudflare/workers-types': - optional: true - '@electric-sql/pglite': - optional: true - '@libsql/client': - optional: true - '@libsql/client-wasm': - optional: true - '@neondatabase/serverless': - optional: true - '@op-engineering/op-sqlite': - optional: true - '@opentelemetry/api': - optional: true - '@planetscale/database': - optional: true - '@prisma/client': - optional: true - '@tidbcloud/serverless': - optional: true - '@types/better-sqlite3': - optional: true - '@types/pg': - optional: true - '@types/sql.js': - optional: true - '@upstash/redis': - optional: true - '@vercel/postgres': - optional: true - '@xata.io/client': - optional: true - better-sqlite3: - optional: true - bun-types: - optional: true - expo-sqlite: - optional: true - gel: - optional: true - knex: - optional: true - kysely: - optional: true - mysql2: - optional: true - pg: - optional: true - postgres: - optional: true - prisma: - optional: true - sql.js: - optional: true - sqlite3: - optional: true - dts-resolver@2.1.2: resolution: {integrity: sha512-xeXHBQkn2ISSXxbJWD828PFjtyg+/UrMDo7W4Ffcs7+YWCquxU8YjV1KoxuiL+eJ5pg3ll+bC6flVv61L3LKZg==} engines: {node: '>=20.18.0'} @@ -6872,10 +6740,6 @@ packages: debug: optional: true - for-each@0.3.5: - resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} - engines: {node: '>= 0.4'} - for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} engines: {node: '>=0.10.0'} @@ -7150,10 +7014,6 @@ packages: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} - has-tostringtag@1.0.2: - resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} - engines: {node: '>= 0.4'} - has-unicode@2.0.1: resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} @@ -7454,10 +7314,6 @@ packages: resolution: {integrity: sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==} engines: {node: '>=12'} - inflection@1.13.4: - resolution: {integrity: sha512-6I/HUDeYFfuNCVS3td055BaXBwKYuzw7K3ExVMStBowKo9oOAMJIXIHvdyR3iboTCp1b+1i5DSkIZTcwIktuDw==} - engines: {'0': node >= 0.4.0} - inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. @@ -7552,10 +7408,6 @@ packages: resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} engines: {node: '>=4'} - is-callable@1.2.7: - resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} - engines: {node: '>= 0.4'} - is-ci@3.0.1: resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} hasBin: true @@ -7691,10 +7543,6 @@ packages: resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==} engines: {node: '>=8'} - is-typed-array@1.1.15: - resolution: {integrity: sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==} - engines: {node: '>= 0.4'} - is-typedarray@1.0.0: resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} @@ -7739,9 +7587,6 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} - isarray@2.0.5: - resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} - isbinaryfile@5.0.6: resolution: {integrity: sha512-I+NmIfBHUl+r2wcDd6JwE9yWje/PIVY/R5/CmV8dXLZd5K+L9X2klAOwfAHNnondLXkbHyTAleQAWonpTJBTtw==} engines: {node: '>= 18.0.0'} @@ -8505,12 +8350,6 @@ packages: engines: {node: '>=18'} hasBin: true - moment-timezone@0.5.48: - resolution: {integrity: sha512-f22b8LV1gbTO2ms2j2z13MuPogNoh5UzxL3nzNAYKGraILnbGc9NEE6dyiiiLv46DGRb8A4kg8UKWLjPthxBHw==} - - moment@2.30.1: - resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} - mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -9006,10 +8845,6 @@ packages: points-on-path@0.2.1: resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} - possible-typed-array-names@1.1.0: - resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} - engines: {node: '>= 0.4'} - postcss-attribute-case-insensitive@7.0.1: resolution: {integrity: sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==} engines: {node: '>=18'} @@ -9872,9 +9707,6 @@ packages: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} - retry-as-promised@7.1.1: - resolution: {integrity: sha512-hMD7odLOt3LkTjcif8aRZqi/hybjpLNgSk5oF5FCowfCjok6LukpN2bDX7R5wDmbgBQFn7YoBxSagmtXHaJYJw==} - retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} @@ -10073,43 +9905,6 @@ packages: resolution: {integrity: sha512-p4rRk4f23ynFEfcD9LA0xRYngj+IyGiEYyqqOak8kaN0TvNmuxC2dcVeBn62GpCeR2CpWqyHCNScTP91QbAVFg==} engines: {node: '>= 0.8.0'} - sequelize-pool@7.1.0: - resolution: {integrity: sha512-G9c0qlIWQSK29pR/5U2JF5dDQeqqHRragoyahj/Nx4KOOQ3CPPfzxnfqFPCSB7x5UgjOgnZ61nSxz+fjDpRlJg==} - engines: {node: '>= 10.0.0'} - - sequelize@6.37.7: - resolution: {integrity: sha512-mCnh83zuz7kQxxJirtFD7q6Huy6liPanI67BSlbzSYgVNl5eXVdE2CN1FuAeZwG1SNpGsNRCV+bJAVVnykZAFA==} - engines: {node: '>=10.0.0'} - peerDependencies: - ibm_db: '*' - mariadb: '*' - mysql2: '*' - oracledb: '*' - pg: '*' - pg-hstore: '*' - snowflake-sdk: '*' - sqlite3: '*' - tedious: '*' - peerDependenciesMeta: - ibm_db: - optional: true - mariadb: - optional: true - mysql2: - optional: true - oracledb: - optional: true - pg: - optional: true - pg-hstore: - optional: true - snowflake-sdk: - optional: true - sqlite3: - optional: true - tedious: - optional: true - serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} @@ -10134,11 +9929,6 @@ packages: setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} - sha.js@2.4.12: - resolution: {integrity: sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==} - engines: {node: '>= 0.10'} - hasBin: true - shallow-clone@3.0.1: resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} engines: {node: '>=8'} @@ -10294,10 +10084,6 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - sql-highlight@6.1.0: - resolution: {integrity: sha512-ed7OK4e9ywpE7pgRMkMQmZDPKSVdm0oX5IEtZiKnFucSF0zu6c80GZBe38UqHuVhTWJ9xsKgSMjCG2bml86KvA==} - engines: {node: '>=14'} - srcset@4.0.0: resolution: {integrity: sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==} engines: {node: '>=12'} @@ -10548,10 +10334,6 @@ packages: resolution: {integrity: sha512-Y1KQBgDd/NUc+LfOtKS6mNsC9CCaH+m2P1RoIZy7RAPo3C3/t8X45+zgut31cRZtZ3xKPjfn3TkGTrctC2TQIQ==} hasBin: true - to-buffer@1.2.2: - resolution: {integrity: sha512-db0E3UJjcFhpDhAF4tLo03oli3pwl3dbnzXOUIlRKrp+ldk/VUxzpWYZENsw2SZiuBjHAk7DfB0VU7NKdpb6sw==} - engines: {node: '>= 0.4'} - to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -10563,9 +10345,6 @@ packages: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} - toposort-class@1.0.1: - resolution: {integrity: sha512-OsLcGGbYF3rMjPUf8oKktyvCiUxSbqMMS39m33MAjLTC1DVIH6x3WSt63/M77ihI09+Sdfk1AXvfhCEeUmC7mg==} - totalist@3.0.1: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} @@ -10717,10 +10496,6 @@ packages: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} - typed-array-buffer@1.0.3: - resolution: {integrity: sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==} - engines: {node: '>= 0.4'} - typedarray-to-buffer@3.1.5: resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} @@ -10745,61 +10520,6 @@ packages: peerDependencies: typescript: 5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x - typeorm@0.3.28: - resolution: {integrity: sha512-6GH7wXhtfq2D33ZuRXYwIsl/qM5685WZcODZb7noOOcRMteM9KF2x2ap3H0EBjnSV0VO4gNAfJT5Ukp0PkOlvg==} - engines: {node: '>=16.13.0'} - hasBin: true - peerDependencies: - '@google-cloud/spanner': ^5.18.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 - '@sap/hana-client': ^2.14.22 - better-sqlite3: ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0 || ^12.0.0 - ioredis: ^5.0.4 - mongodb: ^5.8.0 || ^6.0.0 - mssql: ^9.1.1 || ^10.0.0 || ^11.0.0 || ^12.0.0 - mysql2: ^2.2.5 || ^3.0.1 - oracledb: ^6.3.0 - pg: ^8.5.1 - pg-native: ^3.0.0 - pg-query-stream: ^4.0.0 - redis: ^3.1.1 || ^4.0.0 || ^5.0.14 - sql.js: ^1.4.0 - sqlite3: ^5.0.3 - ts-node: ^10.7.0 - typeorm-aurora-data-api-driver: ^2.0.0 || ^3.0.0 - peerDependenciesMeta: - '@google-cloud/spanner': - optional: true - '@sap/hana-client': - optional: true - better-sqlite3: - optional: true - ioredis: - optional: true - mongodb: - optional: true - mssql: - optional: true - mysql2: - optional: true - oracledb: - optional: true - pg: - optional: true - pg-native: - optional: true - pg-query-stream: - optional: true - redis: - optional: true - sql.js: - optional: true - sqlite3: - optional: true - ts-node: - optional: true - typeorm-aurora-data-api-driver: - optional: true - typescript-eslint@8.49.0: resolution: {integrity: sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -11024,10 +10744,6 @@ packages: validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} - validator@13.15.26: - resolution: {integrity: sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==} - engines: {node: '>= 0.10'} - value-equal@1.0.1: resolution: {integrity: sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==} @@ -11277,10 +10993,6 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} - which-typed-array@1.1.20: - resolution: {integrity: sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==} - engines: {node: '>= 0.4'} - which@1.3.1: resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} hasBin: true @@ -11312,9 +11024,6 @@ packages: resolution: {integrity: sha512-1lOb3qdzw6OFmOzoY0nauhLG72TpWtb5qgYPiSh/62rjc1XidBSDio2qw0pwHh17VINF217ebIkZJdFLZFn9SA==} engines: {node: '>=18'} - wkx@0.5.0: - resolution: {integrity: sha512-Xng/d4Ichh8uN4l0FToV/258EjMGU9MGcA0HV2d9B/ZpZB3lqQm7nkOdZdm5GhKtLLhAE7PiVQwN4eN+2YJJUg==} - word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -16018,8 +15727,6 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 - '@sqltools/formatter@1.2.5': {} - '@standard-schema/spec@1.1.0': {} '@standard-schema/utils@0.3.0': {} @@ -16683,8 +16390,6 @@ snapshots: '@types/unist@3.0.3': {} - '@types/validator@13.15.10': {} - '@types/ws@8.18.1': dependencies: '@types/node': 24.10.1 @@ -17157,8 +16862,6 @@ snapshots: app-module-path@2.2.0: {} - app-root-path@3.1.0: {} - aproba@2.1.0: {} arg@5.0.2: {} @@ -17221,10 +16924,6 @@ snapshots: postcss: 8.5.6 postcss-value-parser: 4.2.0 - available-typed-arrays@1.0.7: - dependencies: - possible-typed-array-names: 1.1.0 - babel-loader@9.2.1(@babel/core@7.28.5)(webpack@5.103.0): dependencies: '@babel/core': 7.28.5 @@ -17384,11 +17083,6 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 - buffer@6.0.3: - dependencies: - base64-js: 1.5.1 - ieee754: 1.2.1 - bufferutil@4.0.9: dependencies: node-gyp-build: 4.8.4 @@ -18257,8 +17951,6 @@ snapshots: dependencies: mimic-response: 3.1.0 - dedent@1.7.1: {} - deep-eql@5.0.2: {} deep-extend@0.6.0: {} @@ -18509,14 +18201,6 @@ snapshots: dotenv@17.2.3: {} - dottie@2.0.6: {} - - drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0): - optionalDependencies: - '@opentelemetry/api': 1.9.0 - '@types/pg': 8.16.0 - pg: 8.18.0 - dts-resolver@2.1.2: {} dunder-proto@1.0.1: @@ -19043,10 +18727,6 @@ snapshots: follow-redirects@1.15.11: {} - for-each@0.3.5: - dependencies: - is-callable: 1.2.7 - for-in@1.0.2: {} for-own@1.0.0: @@ -19379,10 +19059,6 @@ snapshots: has-symbols@1.1.0: {} - has-tostringtag@1.0.2: - dependencies: - has-symbols: 1.1.0 - has-unicode@2.0.1: {} has-yarn@3.0.0: {} @@ -19858,8 +19534,6 @@ snapshots: infima@0.2.0-alpha.45: {} - inflection@1.13.4: {} - inflight@1.0.6: dependencies: once: 1.4.0 @@ -19949,8 +19623,6 @@ snapshots: is-buffer@2.0.5: {} - is-callable@1.2.7: {} - is-ci@3.0.1: dependencies: ci-info: 3.9.0 @@ -20040,10 +19712,6 @@ snapshots: dependencies: text-extensions: 2.4.0 - is-typed-array@1.1.15: - dependencies: - which-typed-array: 1.1.20 - is-typedarray@1.0.0: {} is-unc-path@1.0.0: @@ -20074,8 +19742,6 @@ snapshots: isarray@1.0.0: {} - isarray@2.0.5: {} - isbinaryfile@5.0.6: {} isexe@2.0.0: {} @@ -21142,12 +20808,6 @@ snapshots: requirejs: 2.3.8 requirejs-config-file: 4.0.0 - moment-timezone@0.5.48: - dependencies: - moment: 2.30.1 - - moment@2.30.1: {} - mri@1.2.0: {} mrmime@2.0.1: {} @@ -21658,8 +21318,6 @@ snapshots: path-data-parser: 0.1.0 points-on-curve: 0.2.0 - possible-typed-array-names@1.1.0: {} - postcss-attribute-case-insensitive@7.0.1(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -22705,8 +22363,6 @@ snapshots: onetime: 7.0.0 signal-exit: 4.1.0 - retry-as-promised@7.1.1: {} - retry@0.13.1: {} reusify@1.1.0: {} @@ -22969,31 +22625,6 @@ snapshots: transitivePeerDependencies: - supports-color - sequelize-pool@7.1.0: {} - - sequelize@6.37.7(pg@8.18.0): - dependencies: - '@types/debug': 4.1.12 - '@types/validator': 13.15.10 - debug: 4.4.3 - dottie: 2.0.6 - inflection: 1.13.4 - lodash: 4.17.21 - moment: 2.30.1 - moment-timezone: 0.5.48 - pg-connection-string: 2.11.0 - retry-as-promised: 7.1.1 - semver: 7.7.3 - sequelize-pool: 7.1.0 - toposort-class: 1.0.1 - uuid: 8.3.2 - validator: 13.15.26 - wkx: 0.5.0 - optionalDependencies: - pg: 8.18.0 - transitivePeerDependencies: - - supports-color - serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 @@ -23042,12 +22673,6 @@ snapshots: setprototypeof@1.2.0: {} - sha.js@2.4.12: - dependencies: - inherits: 2.0.4 - safe-buffer: 5.2.1 - to-buffer: 1.2.2 - shallow-clone@3.0.1: dependencies: kind-of: 6.0.3 @@ -23218,8 +22843,6 @@ snapshots: sprintf-js@1.0.3: {} - sql-highlight@6.1.0: {} - srcset@4.0.0: {} stackback@0.0.2: {} @@ -23432,12 +23055,6 @@ snapshots: dependencies: tldts-core: 7.0.17 - to-buffer@1.2.2: - dependencies: - isarray: 2.0.5 - safe-buffer: 5.2.1 - typed-array-buffer: 1.0.3 - to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -23449,8 +23066,6 @@ snapshots: toidentifier@1.0.1: {} - toposort-class@1.0.1: {} - totalist@3.0.1: {} tough-cookie@6.0.0: @@ -23574,12 +23189,6 @@ snapshots: media-typer: 0.3.0 mime-types: 2.1.35 - typed-array-buffer@1.0.3: - dependencies: - call-bound: 1.0.4 - es-errors: 1.3.0 - is-typed-array: 1.1.15 - typedarray-to-buffer@3.1.5: dependencies: is-typedarray: 1.0.0 @@ -23603,29 +23212,6 @@ snapshots: typescript: 5.6.3 yaml: 2.8.1 - typeorm@0.3.28(pg@8.18.0): - dependencies: - '@sqltools/formatter': 1.2.5 - ansis: 4.2.0 - app-root-path: 3.1.0 - buffer: 6.0.3 - dayjs: 1.11.19 - debug: 4.4.3 - dedent: 1.7.1 - dotenv: 16.6.1 - glob: 10.5.0 - reflect-metadata: 0.2.2 - sha.js: 2.4.12 - sql-highlight: 6.1.0 - tslib: 2.8.1 - uuid: 11.1.0 - yargs: 17.7.2 - optionalDependencies: - pg: 8.18.0 - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - typescript-eslint@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3): dependencies: '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3) @@ -23849,8 +23435,6 @@ snapshots: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 - validator@13.15.26: {} - value-equal@1.0.1: {} vary@1.1.2: {} @@ -24214,16 +23798,6 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 - which-typed-array@1.1.20: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.8 - call-bound: 1.0.4 - for-each: 0.3.5 - get-proto: 1.0.1 - gopd: 1.2.0 - has-tostringtag: 1.0.2 - which@1.3.1: dependencies: isexe: 2.0.0 @@ -24253,10 +23827,6 @@ snapshots: dependencies: execa: 8.0.1 - wkx@0.5.0: - dependencies: - '@types/node': 24.10.1 - word-wrap@1.2.5: {} wordwrap@1.0.0: {} From 40b6c1b2ae93b904674d3f5fed97c4a467ea5922 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 11:34:49 +0100 Subject: [PATCH 08/12] fix: include `@opentelemetry/api` in the packages --- packages/appkit/package.json | 2 +- packages/lakebase/package.json | 11 ++--------- pnpm-lock.yaml | 2 +- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 2b8ce014..7b6373d4 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -72,7 +72,7 @@ "devDependencies": { "@types/express": "^4.17.25", "@types/json-schema": "^7.0.15", - "@types/pg": "^8.16.0", + "@types/pg": "^8.16.0", "@types/ws": "^8.18.1", "@vitejs/plugin-react": "^5.1.1" }, diff --git a/packages/lakebase/package.json b/packages/lakebase/package.json index eb5676ef..82fa1ef4 100644 --- a/packages/lakebase/package.json +++ b/packages/lakebase/package.json @@ -48,15 +48,8 @@ }, "dependencies": { "@databricks/sdk-experimental": "^0.16.0", - "pg": "^8.18.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - }, - "peerDependenciesMeta": { - "@opentelemetry/api": { - "optional": true - } + "pg": "^8.18.0", + "@opentelemetry/api": "^1.9.0" }, "devDependencies": { "@types/pg": "^8.16.0" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0108e4d6..fb45926a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -503,7 +503,7 @@ importers: specifier: ^0.16.0 version: 0.16.0 '@opentelemetry/api': - specifier: ^1.0.0 + specifier: ^1.9.0 version: 1.9.0 pg: specifier: ^8.18.0 From 62aecaeb8192469f1551eb2930d55b6edda080ab Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 11:47:16 +0100 Subject: [PATCH 09/12] feat: add default logger configuration --- .../appkit/Interface.LakebasePoolConfig.md | 18 +- packages/lakebase/README.md | 68 +++++-- .../lakebase/src/__tests__/logger.test.ts | 184 ++++++++++++++++++ packages/lakebase/src/__tests__/pool.test.ts | 107 ++++++++-- packages/lakebase/src/index.ts | 1 + packages/lakebase/src/logger.ts | 54 +++++ packages/lakebase/src/pool.ts | 3 +- packages/lakebase/src/types.ts | 34 +++- 8 files changed, 432 insertions(+), 37 deletions(-) create mode 100644 packages/lakebase/src/__tests__/logger.test.ts create mode 100644 packages/lakebase/src/logger.ts diff --git a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md index ce099dbf..19610a9a 100644 --- a/docs/docs/api/appkit/Interface.LakebasePoolConfig.md +++ b/docs/docs/api/appkit/Interface.LakebasePoolConfig.md @@ -47,13 +47,17 @@ Can also be set via LAKEBASE_ENDPOINT environment variable ### logger? ```ts -optional logger: Logger; +optional logger: Logger | LoggerConfig; ``` -Optional logger instance for the driver. -When not provided, the driver operates silently (no logging). +Optional logger configuration. -#### Example +Supports three modes: +1. Logger instance - Use your own logger implementation +2. LoggerConfig - Enable/disable specific log levels (uses console) +3. Undefined - Defaults to error logging only + +#### Examples ```typescript import { createLogger } from '@databricks/appkit'; @@ -62,6 +66,12 @@ const pool = createLakebasePool({ }); ``` +```typescript +const pool = createLakebasePool({ + logger: { debug: true, info: true, error: true } +}); +``` + *** ### sslMode? diff --git a/packages/lakebase/README.md b/packages/lakebase/README.md index e753739a..4db37421 100644 --- a/packages/lakebase/README.md +++ b/packages/lakebase/README.md @@ -73,22 +73,54 @@ See [Databricks authentication docs](https://docs.databricks.com/en/dev-tools/au ## Configuration -| Option | Environment Variable | Description | Default | -| ------------------------- | ---------------------------------- | ------------------------------------ | -------------------- | -| `host` | `PGHOST` | Lakebase host | _Required_ | -| `database` | `PGDATABASE` | Database name | _Required_ | -| `endpoint` | `LAKEBASE_ENDPOINT` | Endpoint resource path | _Required_ | -| `user` | `PGUSER` or `DATABRICKS_CLIENT_ID` | Username or service principal ID | Auto-detected | -| `port` | `PGPORT` | Port number | `5432` | -| `sslMode` | `PGSSLMODE` | SSL mode | `require` | -| `max` | - | Max pool connections | `10` | -| `idleTimeoutMillis` | - | Idle connection timeout | `30000` | -| `connectionTimeoutMillis` | - | Connection timeout | `10000` | -| `logger` | - | Optional logger instance | `undefined` (silent) | +| Option | Environment Variable | Description | Default | +| ------------------------- | ---------------------------------- | --------------------------------------- | ----------------------- | +| `host` | `PGHOST` | Lakebase host | _Required_ | +| `database` | `PGDATABASE` | Database name | _Required_ | +| `endpoint` | `LAKEBASE_ENDPOINT` | Endpoint resource path | _Required_ | +| `user` | `PGUSER` or `DATABRICKS_CLIENT_ID` | Username or service principal ID | Auto-detected | +| `port` | `PGPORT` | Port number | `5432` | +| `sslMode` | `PGSSLMODE` | SSL mode | `require` | +| `max` | - | Max pool connections | `10` | +| `idleTimeoutMillis` | - | Idle connection timeout | `30000` | +| `connectionTimeoutMillis` | - | Connection timeout | `10000` | +| `logger` | - | Logger instance or config | `{ error: true }` | ## Logging -By default, the driver operates silently (no logging). You can inject a custom logger for observability: +By default, the driver logs errors only. You can configure logging in three ways: + +### 1. Config-Based Logger (Simple) + +Enable/disable specific log levels using boolean flags: + +```typescript +import { createLakebasePool } from "@databricks/lakebase"; + +// Development mode: enable debug and error logs +const pool = createLakebasePool({ + logger: { debug: true, error: true }, +}); + +// Production mode: errors only (same as default) +const pool = createLakebasePool({ + logger: { error: true }, +}); + +// Verbose mode: all logs enabled +const pool = createLakebasePool({ + logger: { debug: true, info: true, warn: true, error: true }, +}); + +// Silent mode: all logs disabled +const pool = createLakebasePool({ + logger: { debug: false, info: false, warn: false, error: false }, +}); +``` + +### 2. Custom Logger (Advanced) + +Inject your own logger implementation for custom formatting or integrations: ```typescript const logger = { @@ -101,6 +133,16 @@ const logger = { const pool = createLakebasePool({ logger }); ``` +### 3. Default Behavior + +If no logger is provided, the driver defaults to error-only logging: + +```typescript +// These are equivalent: +const pool1 = createLakebasePool(); +const pool2 = createLakebasePool({ logger: { error: true } }); +``` + When used with AppKit, logging is automatically configured - see the [AppKit Integration](#appkit-integration) section. ## ORM Examples diff --git a/packages/lakebase/src/__tests__/logger.test.ts b/packages/lakebase/src/__tests__/logger.test.ts new file mode 100644 index 00000000..0de3a208 --- /dev/null +++ b/packages/lakebase/src/__tests__/logger.test.ts @@ -0,0 +1,184 @@ +import { describe, expect, test, vi } from "vitest"; +import { resolveLogger } from "../logger"; +import type { Logger, LoggerConfig } from "../types"; + +describe("resolveLogger", () => { + describe("Logger instance passthrough", () => { + test("should return Logger instance as-is", () => { + const mockLogger: Logger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + + const result = resolveLogger(mockLogger); + + expect(result).toBe(mockLogger); + }); + }); + + describe("LoggerConfig resolution", () => { + test("should create console logger with all levels enabled", () => { + const config: LoggerConfig = { + debug: true, + info: true, + warn: true, + error: true, + }; + + const logger = resolveLogger(config); + + expect(typeof logger.debug).toBe("function"); + expect(typeof logger.info).toBe("function"); + expect(typeof logger.warn).toBe("function"); + expect(typeof logger.error).toBe("function"); + }); + + test("should create console logger with selective levels", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + debug: true, + info: false, + warn: true, + error: false, + }; + + const logger = resolveLogger(config); + + // Test enabled levels + logger.debug("test debug"); + expect(consoleDebugSpy).toHaveBeenCalledWith("test debug"); + + logger.warn("test warn"); + expect(consoleWarnSpy).toHaveBeenCalledWith("test warn"); + + // Test disabled levels (should be noop) + logger.info("test info"); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + + logger.error("test error"); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should create noop logger when all levels disabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + debug: false, + info: false, + warn: false, + error: false, + }; + + const logger = resolveLogger(config); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should handle empty LoggerConfig", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = {}; + + const logger = resolveLogger(config); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should support format strings and args", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); + + const config: LoggerConfig = { + error: true, + }; + + const logger = resolveLogger(config); + + logger.error("Error: %s %d", "test", 123); + + expect(consoleErrorSpy).toHaveBeenCalledWith("Error: %s %d", "test", 123); + + consoleErrorSpy.mockRestore(); + }); + }); + + describe("Default behavior", () => { + test("should create error-only logger when undefined", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const logger = resolveLogger(undefined); + + logger.debug("test"); + logger.info("test"); + logger.warn("test"); + logger.error("test error"); + + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).toHaveBeenCalledWith("test error"); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should create error-only logger when no argument provided", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); + + const logger = resolveLogger(); + + logger.error("test error"); + + expect(consoleErrorSpy).toHaveBeenCalledWith("test error"); + + consoleErrorSpy.mockRestore(); + }); + }); +}); diff --git a/packages/lakebase/src/__tests__/pool.test.ts b/packages/lakebase/src/__tests__/pool.test.ts index b6f65246..5cd3db30 100644 --- a/packages/lakebase/src/__tests__/pool.test.ts +++ b/packages/lakebase/src/__tests__/pool.test.ts @@ -619,23 +619,30 @@ describe("createLakebasePool", () => { }); describe("logger injection", () => { - test("should operate silently without logger", () => { - const consoleSpy = vi.spyOn(console, "log"); + test("should default to error-only logging when no logger provided", () => { const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); const pool = createLakebasePool({ workspaceClient: {} as any, }); expect(pool).toBeDefined(); - expect(consoleSpy).not.toHaveBeenCalled(); + // Default behavior: only errors are logged expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + // Error logging would happen on actual errors - consoleSpy.mockRestore(); consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); }); - test("should use injected logger", () => { + test("should use injected Logger instance", () => { const mockLogger = { debug: vi.fn(), info: vi.fn(), @@ -657,26 +664,96 @@ describe("createLakebasePool", () => { ); }); - test("should pass logger to error handlers", async () => { - const mockLogger = { - debug: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error: vi.fn(), - }; + test("should use LoggerConfig with selective levels", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: true, info: false, warn: false, error: true }, + }); + + expect(pool).toBeDefined(); + // Debug should be logged + expect(consoleDebugSpy).toHaveBeenCalledWith( + expect.stringContaining("Created Lakebase connection pool"), + expect.any(String), + expect.any(String), + expect.any(String), + ); + // Info and warn should not be called + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should use LoggerConfig with all levels enabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: true, info: true, warn: true, error: true }, + }); + + expect(pool).toBeDefined(); + expect(consoleDebugSpy).toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should use LoggerConfig with all levels disabled", () => { + const consoleDebugSpy = vi.spyOn(console, "debug"); + const consoleInfoSpy = vi.spyOn(console, "info"); + const consoleWarnSpy = vi.spyOn(console, "warn"); + const consoleErrorSpy = vi.spyOn(console, "error"); + + const pool = createLakebasePool({ + workspaceClient: {} as any, + logger: { debug: false, info: false, warn: false, error: false }, + }); + + expect(pool).toBeDefined(); + expect(consoleDebugSpy).not.toHaveBeenCalled(); + expect(consoleInfoSpy).not.toHaveBeenCalled(); + expect(consoleWarnSpy).not.toHaveBeenCalled(); + expect(consoleErrorSpy).not.toHaveBeenCalled(); + + consoleDebugSpy.mockRestore(); + consoleInfoSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + }); + + test("should pass resolved logger to error handlers", async () => { const { attachPoolMetrics } = await import("../telemetry"); createLakebasePool({ workspaceClient: {} as any, - logger: mockLogger, + logger: { debug: true, error: true }, }); - // Verify attachPoolMetrics was called with the logger + // Verify attachPoolMetrics was called with a resolved logger expect(attachPoolMetrics).toHaveBeenCalledWith( expect.anything(), expect.anything(), - mockLogger, + expect.objectContaining({ + debug: expect.any(Function), + info: expect.any(Function), + warn: expect.any(Function), + error: expect.any(Function), + }), ); }); }); diff --git a/packages/lakebase/src/index.ts b/packages/lakebase/src/index.ts index 7ea59006..b3c5a288 100644 --- a/packages/lakebase/src/index.ts +++ b/packages/lakebase/src/index.ts @@ -13,6 +13,7 @@ export type { GenerateDatabaseCredentialRequest, LakebasePoolConfig, Logger, + LoggerConfig, RequestedClaims, RequestedResource, } from "./types"; diff --git a/packages/lakebase/src/logger.ts b/packages/lakebase/src/logger.ts new file mode 100644 index 00000000..0e1ca271 --- /dev/null +++ b/packages/lakebase/src/logger.ts @@ -0,0 +1,54 @@ +import type { Logger, LoggerConfig } from "./types"; + +const LOGGER_METHODS = ["debug", "info", "warn", "error"] as const; + +/** + * Check if the provided value is a Logger instance + */ +function isLogger(value: unknown): value is Logger { + if (typeof value !== "object" || value === null) { + return false; + } + + return LOGGER_METHODS.every( + (method) => + method in value && + typeof (value as Record)[method] === "function", + ); +} + +/** + * Create a console-based logger from configuration + */ +function createConsoleLogger(config: LoggerConfig): Logger { + const noop = () => {}; + + return { + debug: config.debug ? console.debug.bind(console) : noop, + info: config.info ? console.info.bind(console) : noop, + warn: config.warn ? console.warn.bind(console) : noop, + error: config.error ? console.error.bind(console) : noop, + }; +} + +/** + * Resolve logger configuration to a Logger instance + * + * - If Logger instance provided, return as-is + * - If LoggerConfig provided, create console-based logger + * - If undefined, create error-only logger (default) + */ +export function resolveLogger(loggerConfig?: Logger | LoggerConfig): Logger { + // Already a Logger instance - use as-is + if (isLogger(loggerConfig)) { + return loggerConfig; + } + + // LoggerConfig provided - create console logger + if (loggerConfig && typeof loggerConfig === "object") { + return createConsoleLogger(loggerConfig); + } + + // Default: error-only logging + return createConsoleLogger({ error: true }); +} diff --git a/packages/lakebase/src/pool.ts b/packages/lakebase/src/pool.ts index a2d1016a..a586a74b 100644 --- a/packages/lakebase/src/pool.ts +++ b/packages/lakebase/src/pool.ts @@ -1,4 +1,5 @@ import pg from "pg"; +import { resolveLogger } from "./logger"; import { getLakebasePgConfig } from "./pool-config"; import { attachPoolMetrics, @@ -58,7 +59,7 @@ export function createLakebasePool( config?: Partial, ): pg.Pool { const userConfig = config ?? {}; - const logger = userConfig.logger; + const logger = resolveLogger(userConfig.logger); const telemetry = initTelemetry(); diff --git a/packages/lakebase/src/types.ts b/packages/lakebase/src/types.ts index c3611c54..0e930397 100644 --- a/packages/lakebase/src/types.ts +++ b/packages/lakebase/src/types.ts @@ -12,6 +12,21 @@ export interface Logger { error(message: string, ...args: unknown[]): void; } +/** + * Configuration for console-based logger. + * Specify which log levels should be enabled. + */ +export interface LoggerConfig { + /** Enable debug level logging */ + debug?: boolean; + /** Enable info level logging */ + info?: boolean; + /** Enable warning level logging */ + warn?: boolean; + /** Enable error level logging */ + error?: boolean; +} + /** * Telemetry configuration options */ @@ -77,18 +92,29 @@ export interface LakebasePoolConfig extends PoolConfig { telemetry?: TelemetryOptions; /** - * Optional logger instance for the driver. - * When not provided, the driver operates silently (no logging). + * Optional logger configuration. + * + * Supports three modes: + * 1. Logger instance - Use your own logger implementation + * 2. LoggerConfig - Enable/disable specific log levels (uses console) + * 3. Undefined - Defaults to error logging only * - * @example Using appkit logger + * @example Using custom logger * ```typescript * import { createLogger } from '@databricks/appkit'; * const pool = createLakebasePool({ * logger: createLogger('connectors:lakebase') * }); * ``` + * + * @example Using config-based logger + * ```typescript + * const pool = createLakebasePool({ + * logger: { debug: true, info: true, error: true } + * }); + * ``` */ - logger?: Logger; + logger?: Logger | LoggerConfig; } // --------------------------------------------------------------------------- From 24dee9232e157fad02426cc0222d2b335e0a1936 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 15:02:58 +0100 Subject: [PATCH 10/12] ci: fix Node version for Lakebase driver release pipeline --- .github/workflows/release-lakebase.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-lakebase.yml b/.github/workflows/release-lakebase.yml index 639ff8b3..9bd910f0 100644 --- a/.github/workflows/release-lakebase.yml +++ b/.github/workflows/release-lakebase.yml @@ -39,7 +39,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v4 with: - node-version: 20 + node-version: 24 registry-url: "https://registry.npmjs.org" cache: "pnpm" From c17d0b9321ef3494a6bf0985c2b16f77f1493419 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 15:42:17 +0100 Subject: [PATCH 11/12] chore: make the otel package optional --- packages/lakebase/package.json | 12 +- packages/lakebase/src/__tests__/pool.test.ts | 119 +++-- packages/lakebase/src/logger.ts | 18 + packages/lakebase/src/pool-config.ts | 7 +- packages/lakebase/src/pool.ts | 87 +--- packages/lakebase/src/telemetry-noop.ts | 101 +++++ packages/lakebase/src/telemetry.ts | 167 +++++-- packages/lakebase/src/token-refresh.ts | 9 +- pnpm-lock.yaml | 436 ++++++++++++++++++- 9 files changed, 810 insertions(+), 146 deletions(-) create mode 100644 packages/lakebase/src/telemetry-noop.ts diff --git a/packages/lakebase/package.json b/packages/lakebase/package.json index 82fa1ef4..09c186c2 100644 --- a/packages/lakebase/package.json +++ b/packages/lakebase/package.json @@ -48,11 +48,19 @@ }, "dependencies": { "@databricks/sdk-experimental": "^0.16.0", - "pg": "^8.18.0", + "pg": "^8.18.0" + }, + "peerDependencies": { "@opentelemetry/api": "^1.9.0" }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + } + }, "devDependencies": { - "@types/pg": "^8.16.0" + "@types/pg": "^8.16.0", + "@opentelemetry/api": "^1.9.0" }, "module": "./dist/index.js", "publishConfig": { diff --git a/packages/lakebase/src/__tests__/pool.test.ts b/packages/lakebase/src/__tests__/pool.test.ts index 5cd3db30..879080e5 100644 --- a/packages/lakebase/src/__tests__/pool.test.ts +++ b/packages/lakebase/src/__tests__/pool.test.ts @@ -10,16 +10,29 @@ vi.mock("pg", () => { const mockEnd = vi.fn().mockResolvedValue(undefined); const mockOn = vi.fn(); - const MockPool = vi.fn((config) => ({ - query: mockQuery, - connect: mockConnect, - end: mockEnd, - on: mockOn, - options: config, // Store config for inspection - totalCount: 3, - idleCount: 1, - waitingCount: 0, - })); + const MockPool = vi.fn((config) => { + const listeners: Map void>> = new Map(); + return { + query: mockQuery, + connect: mockConnect, + end: mockEnd, + on: vi.fn((event: string, handler: (...args: any[]) => void) => { + if (!listeners.has(event)) { + listeners.set(event, []); + } + listeners.get(event)?.push(handler); + }), + emit: vi.fn((event: string, ...args: any[]) => { + listeners.get(event)?.forEach((handler) => { + handler(...args); + }); + }), + options: config, // Store config for inspection + totalCount: 3, + idleCount: 1, + waitingCount: 0, + }; + }); return { default: { Pool: MockPool }, @@ -74,16 +87,44 @@ const mockMeter = { vi.mock("../telemetry", () => ({ SpanStatusCode: { OK: 1, ERROR: 2 }, SpanKind: { CLIENT: 3 }, - initTelemetry: vi.fn(() => ({ - tracer: mockTracer, - meter: mockMeter, - tokenRefreshDuration: { record: mockHistogramRecord }, - queryDuration: { record: mockHistogramRecord }, - poolErrors: { add: mockCounterAdd }, - })), + initTelemetry: vi.fn(() => + Promise.resolve({ + tracer: mockTracer, + meter: mockMeter, + tokenRefreshDuration: { record: mockHistogramRecord }, + queryDuration: { record: mockHistogramRecord }, + poolErrors: { add: mockCounterAdd }, + }), + ), attachPoolMetrics: vi.fn(), + wrapPoolQuery: vi.fn((pool, telemetry) => { + // Simulate wrapping pool.query with telemetry + const origQuery = pool.query; + pool.query = function queryWithTelemetry(...args: unknown[]) { + return telemetry.tracer.startActiveSpan( + "lakebase.query", + { kind: 2, attributes: {} }, + (span: any) => { + const result = origQuery.apply(pool, args); + if (result && typeof result.then === "function") { + return result.finally(() => span.end()); + } + span.end(); + return result; + }, + ); + } as any; + }), })); +vi.mock("../logger", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + // Keep the real resolveLogger implementation + }; +}); + // ── Test suite ─────────────────────────────────────────────────────── describe("createLakebasePool", () => { @@ -580,6 +621,9 @@ describe("createLakebasePool", () => { workspaceClient, }); + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + const passwordFn = pool.options.password as () => Promise; await passwordFn(); @@ -593,6 +637,9 @@ describe("createLakebasePool", () => { workspaceClient, }); + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + const passwordFn = pool.options.password as () => Promise; await passwordFn(); @@ -601,17 +648,19 @@ describe("createLakebasePool", () => { "lakebase.token.expires_at", expect.any(String), ); - expect(mockSpanSetStatus).toHaveBeenCalledWith({ - code: 1, // SpanStatusCode.OK - }); + // Note: We don't explicitly set span status to OK for successful operations + // (default UNSET status is interpreted as success) expect(mockSpanEnd).toHaveBeenCalled(); }); - test("should wrap pool.query to add telemetry tracking", () => { + test("should wrap pool.query to add telemetry tracking", async () => { const pool = createLakebasePool({ workspaceClient: {} as any, }); + // Wait for telemetry initialization + await new Promise((resolve) => setImmediate(resolve)); + // pool.query should be our wrapped function expect(typeof pool.query).toBe("function"); expect(pool.query.name).toBe("queryWithTelemetry"); @@ -736,25 +785,27 @@ describe("createLakebasePool", () => { consoleErrorSpy.mockRestore(); }); - test("should pass resolved logger to error handlers", async () => { - const { attachPoolMetrics } = await import("../telemetry"); + test("should pass resolved logger to error handlers", () => { + const consoleErrorSpy = vi.spyOn(console, "error"); - createLakebasePool({ + const pool = createLakebasePool({ workspaceClient: {} as any, logger: { debug: true, error: true }, }); - // Verify attachPoolMetrics was called with a resolved logger - expect(attachPoolMetrics).toHaveBeenCalledWith( - expect.anything(), - expect.anything(), - expect.objectContaining({ - debug: expect.any(Function), - info: expect.any(Function), - warn: expect.any(Function), - error: expect.any(Function), - }), + // Trigger a pool error to verify logger is attached + const testError = new Error("Test error") as Error & { code?: string }; + testError.code = "TEST_CODE"; + pool.emit("error", testError); + + // Verify error was logged with correct format + expect(consoleErrorSpy).toHaveBeenCalledWith( + expect.stringContaining("Connection pool error"), + expect.stringContaining("Test error"), + expect.stringContaining("TEST_CODE"), ); + + consoleErrorSpy.mockRestore(); }); }); }); diff --git a/packages/lakebase/src/logger.ts b/packages/lakebase/src/logger.ts index 0e1ca271..b6d80ce5 100644 --- a/packages/lakebase/src/logger.ts +++ b/packages/lakebase/src/logger.ts @@ -1,3 +1,4 @@ +import type pg from "pg"; import type { Logger, LoggerConfig } from "./types"; const LOGGER_METHODS = ["debug", "info", "warn", "error"] as const; @@ -52,3 +53,20 @@ export function resolveLogger(loggerConfig?: Logger | LoggerConfig): Logger { // Default: error-only logging return createConsoleLogger({ error: true }); } + +/** + * Attach error logging to the pool. + * This has no OpenTelemetry dependency and should always be called. + * + * @param pool - PostgreSQL connection pool + * @param logger - Logger for error logging + */ +export function attachPoolErrorLogging(pool: pg.Pool, logger: Logger): void { + pool.on("error", (error: Error & { code?: string }) => { + logger.error( + "Connection pool error: %s (code: %s)", + error.message, + error.code, + ); + }); +} diff --git a/packages/lakebase/src/pool-config.ts b/packages/lakebase/src/pool-config.ts index bba4d663..7ff2ce72 100644 --- a/packages/lakebase/src/pool-config.ts +++ b/packages/lakebase/src/pool-config.ts @@ -1,6 +1,7 @@ import type pg from "pg"; import { getUsernameSync, parsePoolConfig } from "./config"; -import { type DriverTelemetry, initTelemetry } from "./telemetry"; +import type { DriverTelemetry } from "./telemetry"; +import { createNoopTelemetry } from "./telemetry-noop"; import { createTokenRefreshCallback } from "./token-refresh"; import type { LakebasePoolConfig, Logger } from "./types"; @@ -53,12 +54,10 @@ export function getLakebasePgConfig( if (userConfig.password !== undefined) { passwordConfig = userConfig.password; } else if (poolConfig.endpoint) { - // endpoint is guaranteed here -- parsePoolConfig() throws if - // neither endpoint nor password is provided passwordConfig = createTokenRefreshCallback({ userConfig, endpoint: poolConfig.endpoint, - telemetry: telemetry ?? initTelemetry(), + telemetry: telemetry ?? createNoopTelemetry(), logger, }); } diff --git a/packages/lakebase/src/pool.ts b/packages/lakebase/src/pool.ts index a586a74b..085719f8 100644 --- a/packages/lakebase/src/pool.ts +++ b/packages/lakebase/src/pool.ts @@ -1,12 +1,8 @@ import pg from "pg"; -import { resolveLogger } from "./logger"; +import { attachPoolErrorLogging, resolveLogger } from "./logger"; import { getLakebasePgConfig } from "./pool-config"; -import { - attachPoolMetrics, - initTelemetry, - SpanKind, - SpanStatusCode, -} from "./telemetry"; +import { attachPoolMetrics, initTelemetry, wrapPoolQuery } from "./telemetry"; +import { createNoopTelemetry } from "./telemetry-noop"; import type { LakebasePoolConfig } from "./types"; /** @@ -61,75 +57,24 @@ export function createLakebasePool( const userConfig = config ?? {}; const logger = resolveLogger(userConfig.logger); - const telemetry = initTelemetry(); + // Start with noop telemetry for token refresh + const telemetry = createNoopTelemetry(); const poolConfig = getLakebasePgConfig(userConfig, telemetry, logger); - const pool = new pg.Pool(poolConfig); - attachPoolMetrics(pool, telemetry, logger); - - // Wrap pool.query to track query duration and create trace spans. - // pg.Pool.query has 15+ overloads that are difficult to type-preserve, - // so we use a loosely-typed wrapper and cast back. - const origQuery = pool.query.bind(pool); - const tracer = telemetry.tracer; - pool.query = function queryWithTelemetry( - ...args: unknown[] - ): ReturnType { - const firstArg = args[0]; - const sql = - typeof firstArg === "string" - ? firstArg - : (firstArg as { text?: string } | undefined)?.text; - const metricAttrs = { - "db.statement": sql ? sql.substring(0, 100) : "unknown", - }; - - return tracer.startActiveSpan( - "lakebase.query", - { - kind: SpanKind.CLIENT, - attributes: { - "db.system": "lakebase", - "db.statement": sql ? sql.substring(0, 500) : "unknown", - }, - }, - (span) => { - const start = Date.now(); - - const result = ( - origQuery as (...a: unknown[]) => Promise | undefined - )(...args); - - // Promise-based query: record duration and end span on completion - if (result && typeof result.then === "function") { - return (result as Promise<{ rowCount?: number | null }>) - .then( - (res) => { - span.setAttribute("db.rows_affected", res?.rowCount ?? 0); - span.setStatus({ code: SpanStatusCode.OK }); - return res; - }, - (err: Error) => { - span.recordException(err); - span.setStatus({ code: SpanStatusCode.ERROR }); - throw err; - }, - ) - .finally(() => { - telemetry.queryDuration.record(Date.now() - start, metricAttrs); - span.end(); - }) as unknown as ReturnType; - } + initTelemetry() + .then((t) => { + // Mutate telemetry object in place (token refresh callback has this reference) + Object.assign(telemetry, t); + attachPoolMetrics(pool, telemetry); + wrapPoolQuery(pool, telemetry); + }) + .catch((err) => { + logger.error("Failed to initialize telemetry:", err); + }); - // Callback-based query (void return): duration is approximate - telemetry.queryDuration.record(Date.now() - start, metricAttrs); - span.end(); - return result as ReturnType; - }, - ) as ReturnType; - } as typeof pool.query; + attachPoolErrorLogging(pool, logger); logger?.debug( "Created Lakebase connection pool for %s@%s/%s", diff --git a/packages/lakebase/src/telemetry-noop.ts b/packages/lakebase/src/telemetry-noop.ts new file mode 100644 index 00000000..6d5fc26c --- /dev/null +++ b/packages/lakebase/src/telemetry-noop.ts @@ -0,0 +1,101 @@ +import type { + Counter, + Histogram, + Meter, + ObservableResult, + Span, + Tracer, +} from "@opentelemetry/api"; +import type { DriverTelemetry } from "./telemetry"; + +// Re-export types for convenience +export type { Counter, Histogram, Meter, ObservableResult, Span, Tracer }; + +/** + * Invalid span context used for noop spans. + * Matches OpenTelemetry's INVALID_SPAN_CONTEXT format. + */ +export const INVALID_SPAN_CONTEXT = { + traceId: "00000000000000000000000000000000", + spanId: "0000000000000000", + traceFlags: 0, + isRemote: false, +}; + +/** + * Creates a universal noop proxy that handles all telemetry operations. + * + * This single proxy function handles spans, tracers, meters, histograms, + * counters, and observable gauges with intelligent behavior for special cases: + * + * Special behaviors: + * - `isRecording()` → returns `false` (not proxy) + * - `spanContext()` → returns `INVALID_SPAN_CONTEXT` object + * - `startActiveSpan()` → executes callback with noop span and returns result (critical for control flow) + * - Everything else → returns proxy for chaining + * + * @returns A self-referential proxy that handles all telemetry operations as no-ops + * + * @example + * ```typescript + * const noop = createNoopProxy(); + * const span = noop.startSpan("operation"); // Returns noop + * span.setAttribute("key", "value"); // Returns noop (chainable) + * span.isRecording(); // Returns false + * span.end(); // Returns noop + * ``` + */ +export function createNoopProxy(): any { + let proxy: any; + + const noop = () => proxy; + + proxy = new Proxy(noop, { + get(_target, prop) { + // Span + if (prop === "isRecording") return () => false; + if (prop === "spanContext") return () => INVALID_SPAN_CONTEXT; + + // Tracer + if (prop === "startActiveSpan") { + // Critical: Execute callback with noop span and return its result + // This ensures code like `tracer.startActiveSpan('name', span => doWork())` works correctly + return (_name: string, ...args: any[]) => { + const fn = args[args.length - 1]; + return typeof fn === "function" ? fn(proxy) : undefined; + }; + } + + // Everything else returns the proxy for chaining + return proxy; + }, + apply() { + return proxy; + }, + construct() { + return proxy; + }, + }); + + return proxy; +} + +/** + * Creates a noop telemetry object for use before OpenTelemetry is initialized. + * + * Returns a DriverTelemetry object with all properties set to noop proxies. + * This allows telemetry-dependent code (like token refresh) to work immediately + * while telemetry initialization happens in the background. + * + * @returns A DriverTelemetry object with noop implementations + */ +export function createNoopTelemetry(): DriverTelemetry { + const noop = createNoopProxy(); + return { + tracer: noop, + meter: noop, + tokenRefreshDuration: noop, + queryDuration: noop, + poolErrors: noop, + } as unknown as DriverTelemetry; +} diff --git a/packages/lakebase/src/telemetry.ts b/packages/lakebase/src/telemetry.ts index cced3bf0..425406f0 100644 --- a/packages/lakebase/src/telemetry.ts +++ b/packages/lakebase/src/telemetry.ts @@ -1,17 +1,47 @@ -import type { Counter, Histogram, Meter } from "@opentelemetry/api"; +import type pg from "pg"; import { - metrics, - SpanKind, - SpanStatusCode, + type Counter, + createNoopTelemetry, + type Histogram, + type Meter, + type ObservableResult, + type Span, type Tracer, - trace, -} from "@opentelemetry/api"; -import type pg from "pg"; -import type { Logger } from "./types"; +} from "./telemetry-noop"; + +export type { Span, Tracer }; + +// Three states: undefined = not loaded, null = load failed, object = loaded +let otelApi: typeof import("@opentelemetry/api") | null | undefined; +let loadPromise: Promise | null = null; + +/** + * Ensures OpenTelemetry API is loaded (or load attempt has been made). + * Safe to call multiple times - will only attempt load once. + */ +async function ensureOtelLoaded(): Promise { + if (otelApi !== undefined) return; // Already loaded or failed + + if (!loadPromise) { + loadPromise = (async () => { + try { + otelApi = await import("@opentelemetry/api"); + } catch { + otelApi = null; // Mark as failed + } + })(); + } + + await loadPromise; +} -// Re-export OpenTelemetry types for backward compatibility -export { SpanKind, SpanStatusCode }; -export type { Tracer }; +/** + * Check if OpenTelemetry API is available. + * Returns false if not yet loaded or if load failed. + */ +export function isOtelAvailable(): boolean { + return otelApi !== null && otelApi !== undefined; +} /** Telemetry instruments for the driver */ export interface DriverTelemetry { @@ -24,12 +54,17 @@ export interface DriverTelemetry { /** * Initialize telemetry using OpenTelemetry's global registry. - * If OTel providers are not initialized, operations will be no-ops automatically. + * If OpenTelemetry is not installed, returns noop implementations. */ -export function initTelemetry(): DriverTelemetry { - // Use global OTel registry - no injection needed! - const tracer = trace.getTracer("@databricks/lakebase"); - const meter = metrics.getMeter("@databricks/lakebase"); +export async function initTelemetry(): Promise { + await ensureOtelLoaded(); + + if (!otelApi) { + return createNoopTelemetry(); + } + + const tracer = otelApi.trace.getTracer("@databricks/lakebase"); + const meter = otelApi.metrics.getMeter("@databricks/lakebase"); return { tracer, @@ -53,19 +88,96 @@ export function initTelemetry(): DriverTelemetry { } /** - * Attach pool-level metrics collection, error counting, and error logging. + * Wraps pool.query with telemetry tracing if OpenTelemetry is available. + * If OpenTelemetry is not available, does nothing (no overhead). + * + * @param pool - PostgreSQL connection pool + * @param telemetry - Telemetry instruments + */ +export function wrapPoolQuery(pool: pg.Pool, telemetry: DriverTelemetry): void { + if (!otelApi) { + return; + } + + const { SpanKind, SpanStatusCode } = otelApi; + const origQuery = pool.query.bind(pool); + const tracer = telemetry.tracer; + + pool.query = function queryWithTelemetry( + ...args: unknown[] + ): ReturnType { + const firstArg = args[0]; + const sql = + typeof firstArg === "string" + ? firstArg + : (firstArg as { text?: string } | undefined)?.text; + const metricAttrs = { + "db.statement": sql ? sql.substring(0, 100) : "unknown", + }; + + return tracer.startActiveSpan( + "lakebase.query", + { + kind: SpanKind.CLIENT, + attributes: { + "db.system": "lakebase", + "db.statement": sql ? sql.substring(0, 500) : "unknown", + }, + }, + (span: Span) => { + const start = Date.now(); + + const result = ( + origQuery as (...a: unknown[]) => Promise | undefined + )(...args); + + // Promise-based query: record duration and end span on completion + if (result && typeof result.then === "function") { + return (result as Promise<{ rowCount?: number | null }>) + .then( + (res) => { + span.setAttribute("db.rows_affected", res?.rowCount ?? 0); + span.setStatus({ code: SpanStatusCode.OK }); + return res; + }, + (err: Error) => { + span.recordException(err); + span.setStatus({ code: SpanStatusCode.ERROR }); + throw err; + }, + ) + .finally(() => { + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + }) as unknown as ReturnType; + } + + // Callback-based query (void return): duration is approximate + telemetry.queryDuration.record(Date.now() - start, metricAttrs); + span.end(); + return result as ReturnType; + }, + ) as ReturnType; + } as typeof pool.query; +} + +/** + * Attach pool-level metrics collection using OpenTelemetry. + * Returns early if OpenTelemetry is not available (zero overhead). * * Uses observable gauges (pull model) for pool connection stats. * * @param pool - PostgreSQL connection pool * @param telemetry - Telemetry instruments - * @param logger - Optional logger for error logging (silent if not provided) */ export function attachPoolMetrics( pool: pg.Pool, telemetry: DriverTelemetry, - logger?: Logger, ): void { + if (!otelApi) { + return; + } + const meter = telemetry.meter; const poolTotal = meter.createObservableGauge( @@ -81,16 +193,17 @@ export function attachPoolMetrics( { description: "Clients waiting for a connection" }, ); - poolTotal.addCallback((result) => result.observe(pool.totalCount)); - poolIdle.addCallback((result) => result.observe(pool.idleCount)); - poolWaiting.addCallback((result) => result.observe(pool.waitingCount)); + poolTotal.addCallback((result: ObservableResult) => + result.observe(pool.totalCount), + ); + poolIdle.addCallback((result: ObservableResult) => + result.observe(pool.idleCount), + ); + poolWaiting.addCallback((result: ObservableResult) => + result.observe(pool.waitingCount), + ); pool.on("error", (error: Error & { code?: string }) => { - logger?.error( - "Connection pool error: %s (code: %s)", - error.message, - error.code, - ); telemetry.poolErrors.add(1, { "error.code": error.code ?? "unknown", }); diff --git a/packages/lakebase/src/token-refresh.ts b/packages/lakebase/src/token-refresh.ts index d22bc7d4..4e0cd292 100644 --- a/packages/lakebase/src/token-refresh.ts +++ b/packages/lakebase/src/token-refresh.ts @@ -1,7 +1,7 @@ import type { WorkspaceClient } from "@databricks/sdk-experimental"; import { getWorkspaceClient } from "./config"; import { generateDatabaseCredential } from "./credentials"; -import { type DriverTelemetry, SpanStatusCode } from "./telemetry"; +import type { DriverTelemetry, Span } from "./telemetry"; import type { LakebasePoolConfig, Logger } from "./types"; // 2-minute buffer before token expiration to prevent race conditions @@ -81,13 +81,12 @@ export function createTokenRefreshCallback( { attributes: { "lakebase.endpoint": deps.endpoint }, }, - async (span) => { + async (span: Span) => { const tokenResult = await refreshToken(client, deps.endpoint); span.setAttribute( "lakebase.token.expires_at", new Date(tokenResult.expiresAt).toISOString(), ); - span.setStatus({ code: SpanStatusCode.OK }); span.end(); return tokenResult; }, @@ -95,7 +94,7 @@ export function createTokenRefreshCallback( cachedToken = result.token; tokenExpiresAt = result.expiresAt; - return cachedToken; + return result.token; } catch (error) { deps.logger?.error("Failed to fetch OAuth token: %O", { error, @@ -110,6 +109,6 @@ export function createTokenRefreshCallback( })(); } - return refreshPromise; + return refreshPromise as Promise; }; } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index fb45926a..7db49f1b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -132,6 +132,18 @@ importers: '@databricks/appkit': specifier: workspace:* version: link:../../packages/appkit + drizzle-orm: + specifier: ^0.45.1 + version: 0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0) + reflect-metadata: + specifier: ^0.2.0 + version: 0.2.2 + sequelize: + specifier: ^6.37.7 + version: 6.37.7(pg@8.18.0) + typeorm: + specifier: ^0.3.20 + version: 0.3.28(pg@8.18.0) zod: specifier: ^4.1.13 version: 4.1.13 @@ -502,13 +514,13 @@ importers: '@databricks/sdk-experimental': specifier: ^0.16.0 version: 0.16.0 - '@opentelemetry/api': - specifier: ^1.9.0 - version: 1.9.0 pg: specifier: ^8.18.0 version: 8.18.0 devDependencies: + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 '@types/pg': specifier: ^8.16.0 version: 8.16.0 @@ -4107,6 +4119,9 @@ packages: '@slorber/remark-comment@1.0.0': resolution: {integrity: sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==} + '@sqltools/formatter@1.2.5': + resolution: {integrity: sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==} + '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -4695,6 +4710,9 @@ packages: '@types/unist@3.0.3': resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + '@types/validator@13.15.10': + resolution: {integrity: sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==} + '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -5028,6 +5046,10 @@ packages: app-module-path@2.2.0: resolution: {integrity: sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ==} + app-root-path@3.1.0: + resolution: {integrity: sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==} + engines: {node: '>= 6.0.0'} + aproba@2.1.0: resolution: {integrity: sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==} @@ -5102,6 +5124,10 @@ packages: peerDependencies: postcss: ^8.1.0 + available-typed-arrays@1.0.7: + resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} + engines: {node: '>= 0.4'} + babel-loader@9.2.1: resolution: {integrity: sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==} engines: {node: '>= 14.15.0'} @@ -5229,6 +5255,9 @@ packages: buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bufferutil@4.0.9: resolution: {integrity: sha512-WDtdLmJvAuNNPzByAYpRo2rF1Mmradw6gvWsQKf63476DDXmomT9zUiGypLcG4ibIM67vhAj8jJRdbmEws2Aqw==} engines: {node: '>=6.14.2'} @@ -6048,6 +6077,14 @@ packages: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} + dedent@1.7.1: + resolution: {integrity: sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + deep-eql@5.0.2: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} @@ -6283,6 +6320,101 @@ packages: resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} engines: {node: '>=12'} + dottie@2.0.6: + resolution: {integrity: sha512-iGCHkfUc5kFekGiqhe8B/mdaurD+lakO9txNnTvKtA6PISrw86LgqHvRzWYPyoE2Ph5aMIrCw9/uko6XHTKCwA==} + + drizzle-orm@0.45.1: + resolution: {integrity: sha512-Te0FOdKIistGNPMq2jscdqngBRfBpC8uMFVwqjf6gtTVJHIQ/dosgV/CLBU2N4ZJBsXL5savCba9b0YJskKdcA==} + peerDependencies: + '@aws-sdk/client-rds-data': '>=3' + '@cloudflare/workers-types': '>=4' + '@electric-sql/pglite': '>=0.2.0' + '@libsql/client': '>=0.10.0' + '@libsql/client-wasm': '>=0.10.0' + '@neondatabase/serverless': '>=0.10.0' + '@op-engineering/op-sqlite': '>=2' + '@opentelemetry/api': ^1.4.1 + '@planetscale/database': '>=1.13' + '@prisma/client': '*' + '@tidbcloud/serverless': '*' + '@types/better-sqlite3': '*' + '@types/pg': '*' + '@types/sql.js': '*' + '@upstash/redis': '>=1.34.7' + '@vercel/postgres': '>=0.8.0' + '@xata.io/client': '*' + better-sqlite3: '>=7' + bun-types: '*' + expo-sqlite: '>=14.0.0' + gel: '>=2' + knex: '*' + kysely: '*' + mysql2: '>=2' + pg: '>=8' + postgres: '>=3' + prisma: '*' + sql.js: '>=1' + sqlite3: '>=5' + peerDependenciesMeta: + '@aws-sdk/client-rds-data': + optional: true + '@cloudflare/workers-types': + optional: true + '@electric-sql/pglite': + optional: true + '@libsql/client': + optional: true + '@libsql/client-wasm': + optional: true + '@neondatabase/serverless': + optional: true + '@op-engineering/op-sqlite': + optional: true + '@opentelemetry/api': + optional: true + '@planetscale/database': + optional: true + '@prisma/client': + optional: true + '@tidbcloud/serverless': + optional: true + '@types/better-sqlite3': + optional: true + '@types/pg': + optional: true + '@types/sql.js': + optional: true + '@upstash/redis': + optional: true + '@vercel/postgres': + optional: true + '@xata.io/client': + optional: true + better-sqlite3: + optional: true + bun-types: + optional: true + expo-sqlite: + optional: true + gel: + optional: true + knex: + optional: true + kysely: + optional: true + mysql2: + optional: true + pg: + optional: true + postgres: + optional: true + prisma: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + dts-resolver@2.1.2: resolution: {integrity: sha512-xeXHBQkn2ISSXxbJWD828PFjtyg+/UrMDo7W4Ffcs7+YWCquxU8YjV1KoxuiL+eJ5pg3ll+bC6flVv61L3LKZg==} engines: {node: '>=20.18.0'} @@ -6740,6 +6872,10 @@ packages: debug: optional: true + for-each@0.3.5: + resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} + engines: {node: '>= 0.4'} + for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} engines: {node: '>=0.10.0'} @@ -7014,6 +7150,10 @@ packages: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + has-unicode@2.0.1: resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} @@ -7314,6 +7454,10 @@ packages: resolution: {integrity: sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==} engines: {node: '>=12'} + inflection@1.13.4: + resolution: {integrity: sha512-6I/HUDeYFfuNCVS3td055BaXBwKYuzw7K3ExVMStBowKo9oOAMJIXIHvdyR3iboTCp1b+1i5DSkIZTcwIktuDw==} + engines: {'0': node >= 0.4.0} + inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. @@ -7408,6 +7552,10 @@ packages: resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} engines: {node: '>=4'} + is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + is-ci@3.0.1: resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} hasBin: true @@ -7543,6 +7691,10 @@ packages: resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==} engines: {node: '>=8'} + is-typed-array@1.1.15: + resolution: {integrity: sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==} + engines: {node: '>= 0.4'} + is-typedarray@1.0.0: resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} @@ -7587,6 +7739,9 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + isarray@2.0.5: + resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + isbinaryfile@5.0.6: resolution: {integrity: sha512-I+NmIfBHUl+r2wcDd6JwE9yWje/PIVY/R5/CmV8dXLZd5K+L9X2klAOwfAHNnondLXkbHyTAleQAWonpTJBTtw==} engines: {node: '>= 18.0.0'} @@ -8350,6 +8505,12 @@ packages: engines: {node: '>=18'} hasBin: true + moment-timezone@0.5.48: + resolution: {integrity: sha512-f22b8LV1gbTO2ms2j2z13MuPogNoh5UzxL3nzNAYKGraILnbGc9NEE6dyiiiLv46DGRb8A4kg8UKWLjPthxBHw==} + + moment@2.30.1: + resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} + mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -8845,6 +9006,10 @@ packages: points-on-path@0.2.1: resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + possible-typed-array-names@1.1.0: + resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} + engines: {node: '>= 0.4'} + postcss-attribute-case-insensitive@7.0.1: resolution: {integrity: sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==} engines: {node: '>=18'} @@ -9707,6 +9872,9 @@ packages: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} + retry-as-promised@7.1.1: + resolution: {integrity: sha512-hMD7odLOt3LkTjcif8aRZqi/hybjpLNgSk5oF5FCowfCjok6LukpN2bDX7R5wDmbgBQFn7YoBxSagmtXHaJYJw==} + retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} @@ -9905,6 +10073,43 @@ packages: resolution: {integrity: sha512-p4rRk4f23ynFEfcD9LA0xRYngj+IyGiEYyqqOak8kaN0TvNmuxC2dcVeBn62GpCeR2CpWqyHCNScTP91QbAVFg==} engines: {node: '>= 0.8.0'} + sequelize-pool@7.1.0: + resolution: {integrity: sha512-G9c0qlIWQSK29pR/5U2JF5dDQeqqHRragoyahj/Nx4KOOQ3CPPfzxnfqFPCSB7x5UgjOgnZ61nSxz+fjDpRlJg==} + engines: {node: '>= 10.0.0'} + + sequelize@6.37.7: + resolution: {integrity: sha512-mCnh83zuz7kQxxJirtFD7q6Huy6liPanI67BSlbzSYgVNl5eXVdE2CN1FuAeZwG1SNpGsNRCV+bJAVVnykZAFA==} + engines: {node: '>=10.0.0'} + peerDependencies: + ibm_db: '*' + mariadb: '*' + mysql2: '*' + oracledb: '*' + pg: '*' + pg-hstore: '*' + snowflake-sdk: '*' + sqlite3: '*' + tedious: '*' + peerDependenciesMeta: + ibm_db: + optional: true + mariadb: + optional: true + mysql2: + optional: true + oracledb: + optional: true + pg: + optional: true + pg-hstore: + optional: true + snowflake-sdk: + optional: true + sqlite3: + optional: true + tedious: + optional: true + serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} @@ -9929,6 +10134,11 @@ packages: setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + sha.js@2.4.12: + resolution: {integrity: sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==} + engines: {node: '>= 0.10'} + hasBin: true + shallow-clone@3.0.1: resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} engines: {node: '>=8'} @@ -10084,6 +10294,10 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + sql-highlight@6.1.0: + resolution: {integrity: sha512-ed7OK4e9ywpE7pgRMkMQmZDPKSVdm0oX5IEtZiKnFucSF0zu6c80GZBe38UqHuVhTWJ9xsKgSMjCG2bml86KvA==} + engines: {node: '>=14'} + srcset@4.0.0: resolution: {integrity: sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==} engines: {node: '>=12'} @@ -10334,6 +10548,10 @@ packages: resolution: {integrity: sha512-Y1KQBgDd/NUc+LfOtKS6mNsC9CCaH+m2P1RoIZy7RAPo3C3/t8X45+zgut31cRZtZ3xKPjfn3TkGTrctC2TQIQ==} hasBin: true + to-buffer@1.2.2: + resolution: {integrity: sha512-db0E3UJjcFhpDhAF4tLo03oli3pwl3dbnzXOUIlRKrp+ldk/VUxzpWYZENsw2SZiuBjHAk7DfB0VU7NKdpb6sw==} + engines: {node: '>= 0.4'} + to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -10345,6 +10563,9 @@ packages: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} + toposort-class@1.0.1: + resolution: {integrity: sha512-OsLcGGbYF3rMjPUf8oKktyvCiUxSbqMMS39m33MAjLTC1DVIH6x3WSt63/M77ihI09+Sdfk1AXvfhCEeUmC7mg==} + totalist@3.0.1: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} @@ -10496,6 +10717,10 @@ packages: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} + typed-array-buffer@1.0.3: + resolution: {integrity: sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==} + engines: {node: '>= 0.4'} + typedarray-to-buffer@3.1.5: resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} @@ -10520,6 +10745,61 @@ packages: peerDependencies: typescript: 5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x + typeorm@0.3.28: + resolution: {integrity: sha512-6GH7wXhtfq2D33ZuRXYwIsl/qM5685WZcODZb7noOOcRMteM9KF2x2ap3H0EBjnSV0VO4gNAfJT5Ukp0PkOlvg==} + engines: {node: '>=16.13.0'} + hasBin: true + peerDependencies: + '@google-cloud/spanner': ^5.18.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 + '@sap/hana-client': ^2.14.22 + better-sqlite3: ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0 || ^12.0.0 + ioredis: ^5.0.4 + mongodb: ^5.8.0 || ^6.0.0 + mssql: ^9.1.1 || ^10.0.0 || ^11.0.0 || ^12.0.0 + mysql2: ^2.2.5 || ^3.0.1 + oracledb: ^6.3.0 + pg: ^8.5.1 + pg-native: ^3.0.0 + pg-query-stream: ^4.0.0 + redis: ^3.1.1 || ^4.0.0 || ^5.0.14 + sql.js: ^1.4.0 + sqlite3: ^5.0.3 + ts-node: ^10.7.0 + typeorm-aurora-data-api-driver: ^2.0.0 || ^3.0.0 + peerDependenciesMeta: + '@google-cloud/spanner': + optional: true + '@sap/hana-client': + optional: true + better-sqlite3: + optional: true + ioredis: + optional: true + mongodb: + optional: true + mssql: + optional: true + mysql2: + optional: true + oracledb: + optional: true + pg: + optional: true + pg-native: + optional: true + pg-query-stream: + optional: true + redis: + optional: true + sql.js: + optional: true + sqlite3: + optional: true + ts-node: + optional: true + typeorm-aurora-data-api-driver: + optional: true + typescript-eslint@8.49.0: resolution: {integrity: sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -10744,6 +11024,10 @@ packages: validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + validator@13.15.26: + resolution: {integrity: sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==} + engines: {node: '>= 0.10'} + value-equal@1.0.1: resolution: {integrity: sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==} @@ -10993,6 +11277,10 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + which-typed-array@1.1.20: + resolution: {integrity: sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==} + engines: {node: '>= 0.4'} + which@1.3.1: resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} hasBin: true @@ -11024,6 +11312,9 @@ packages: resolution: {integrity: sha512-1lOb3qdzw6OFmOzoY0nauhLG72TpWtb5qgYPiSh/62rjc1XidBSDio2qw0pwHh17VINF217ebIkZJdFLZFn9SA==} engines: {node: '>=18'} + wkx@0.5.0: + resolution: {integrity: sha512-Xng/d4Ichh8uN4l0FToV/258EjMGU9MGcA0HV2d9B/ZpZB3lqQm7nkOdZdm5GhKtLLhAE7PiVQwN4eN+2YJJUg==} + word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -15727,6 +16018,8 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 + '@sqltools/formatter@1.2.5': {} + '@standard-schema/spec@1.1.0': {} '@standard-schema/utils@0.3.0': {} @@ -16390,6 +16683,8 @@ snapshots: '@types/unist@3.0.3': {} + '@types/validator@13.15.10': {} + '@types/ws@8.18.1': dependencies: '@types/node': 24.10.1 @@ -16862,6 +17157,8 @@ snapshots: app-module-path@2.2.0: {} + app-root-path@3.1.0: {} + aproba@2.1.0: {} arg@5.0.2: {} @@ -16924,6 +17221,10 @@ snapshots: postcss: 8.5.6 postcss-value-parser: 4.2.0 + available-typed-arrays@1.0.7: + dependencies: + possible-typed-array-names: 1.1.0 + babel-loader@9.2.1(@babel/core@7.28.5)(webpack@5.103.0): dependencies: '@babel/core': 7.28.5 @@ -17083,6 +17384,11 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + bufferutil@4.0.9: dependencies: node-gyp-build: 4.8.4 @@ -17951,6 +18257,8 @@ snapshots: dependencies: mimic-response: 3.1.0 + dedent@1.7.1: {} + deep-eql@5.0.2: {} deep-extend@0.6.0: {} @@ -18201,6 +18509,14 @@ snapshots: dotenv@17.2.3: {} + dottie@2.0.6: {} + + drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0): + optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@types/pg': 8.16.0 + pg: 8.18.0 + dts-resolver@2.1.2: {} dunder-proto@1.0.1: @@ -18727,6 +19043,10 @@ snapshots: follow-redirects@1.15.11: {} + for-each@0.3.5: + dependencies: + is-callable: 1.2.7 + for-in@1.0.2: {} for-own@1.0.0: @@ -19059,6 +19379,10 @@ snapshots: has-symbols@1.1.0: {} + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + has-unicode@2.0.1: {} has-yarn@3.0.0: {} @@ -19534,6 +19858,8 @@ snapshots: infima@0.2.0-alpha.45: {} + inflection@1.13.4: {} + inflight@1.0.6: dependencies: once: 1.4.0 @@ -19623,6 +19949,8 @@ snapshots: is-buffer@2.0.5: {} + is-callable@1.2.7: {} + is-ci@3.0.1: dependencies: ci-info: 3.9.0 @@ -19712,6 +20040,10 @@ snapshots: dependencies: text-extensions: 2.4.0 + is-typed-array@1.1.15: + dependencies: + which-typed-array: 1.1.20 + is-typedarray@1.0.0: {} is-unc-path@1.0.0: @@ -19742,6 +20074,8 @@ snapshots: isarray@1.0.0: {} + isarray@2.0.5: {} + isbinaryfile@5.0.6: {} isexe@2.0.0: {} @@ -20808,6 +21142,12 @@ snapshots: requirejs: 2.3.8 requirejs-config-file: 4.0.0 + moment-timezone@0.5.48: + dependencies: + moment: 2.30.1 + + moment@2.30.1: {} + mri@1.2.0: {} mrmime@2.0.1: {} @@ -21318,6 +21658,8 @@ snapshots: path-data-parser: 0.1.0 points-on-curve: 0.2.0 + possible-typed-array-names@1.1.0: {} + postcss-attribute-case-insensitive@7.0.1(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -22363,6 +22705,8 @@ snapshots: onetime: 7.0.0 signal-exit: 4.1.0 + retry-as-promised@7.1.1: {} + retry@0.13.1: {} reusify@1.1.0: {} @@ -22625,6 +22969,31 @@ snapshots: transitivePeerDependencies: - supports-color + sequelize-pool@7.1.0: {} + + sequelize@6.37.7(pg@8.18.0): + dependencies: + '@types/debug': 4.1.12 + '@types/validator': 13.15.10 + debug: 4.4.3 + dottie: 2.0.6 + inflection: 1.13.4 + lodash: 4.17.21 + moment: 2.30.1 + moment-timezone: 0.5.48 + pg-connection-string: 2.11.0 + retry-as-promised: 7.1.1 + semver: 7.7.3 + sequelize-pool: 7.1.0 + toposort-class: 1.0.1 + uuid: 8.3.2 + validator: 13.15.26 + wkx: 0.5.0 + optionalDependencies: + pg: 8.18.0 + transitivePeerDependencies: + - supports-color + serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 @@ -22673,6 +23042,12 @@ snapshots: setprototypeof@1.2.0: {} + sha.js@2.4.12: + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + to-buffer: 1.2.2 + shallow-clone@3.0.1: dependencies: kind-of: 6.0.3 @@ -22843,6 +23218,8 @@ snapshots: sprintf-js@1.0.3: {} + sql-highlight@6.1.0: {} + srcset@4.0.0: {} stackback@0.0.2: {} @@ -23055,6 +23432,12 @@ snapshots: dependencies: tldts-core: 7.0.17 + to-buffer@1.2.2: + dependencies: + isarray: 2.0.5 + safe-buffer: 5.2.1 + typed-array-buffer: 1.0.3 + to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -23066,6 +23449,8 @@ snapshots: toidentifier@1.0.1: {} + toposort-class@1.0.1: {} + totalist@3.0.1: {} tough-cookie@6.0.0: @@ -23189,6 +23574,12 @@ snapshots: media-typer: 0.3.0 mime-types: 2.1.35 + typed-array-buffer@1.0.3: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + is-typed-array: 1.1.15 + typedarray-to-buffer@3.1.5: dependencies: is-typedarray: 1.0.0 @@ -23212,6 +23603,29 @@ snapshots: typescript: 5.6.3 yaml: 2.8.1 + typeorm@0.3.28(pg@8.18.0): + dependencies: + '@sqltools/formatter': 1.2.5 + ansis: 4.2.0 + app-root-path: 3.1.0 + buffer: 6.0.3 + dayjs: 1.11.19 + debug: 4.4.3 + dedent: 1.7.1 + dotenv: 16.6.1 + glob: 10.5.0 + reflect-metadata: 0.2.2 + sha.js: 2.4.12 + sql-highlight: 6.1.0 + tslib: 2.8.1 + uuid: 11.1.0 + yargs: 17.7.2 + optionalDependencies: + pg: 8.18.0 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + typescript-eslint@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3): dependencies: '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3) @@ -23435,6 +23849,8 @@ snapshots: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 + validator@13.15.26: {} + value-equal@1.0.1: {} vary@1.1.2: {} @@ -23798,6 +24214,16 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 + which-typed-array@1.1.20: + dependencies: + available-typed-arrays: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.4 + for-each: 0.3.5 + get-proto: 1.0.1 + gopd: 1.2.0 + has-tostringtag: 1.0.2 + which@1.3.1: dependencies: isexe: 2.0.0 @@ -23827,6 +24253,10 @@ snapshots: dependencies: execa: 8.0.1 + wkx@0.5.0: + dependencies: + '@types/node': 24.10.1 + word-wrap@1.2.5: {} wordwrap@1.0.0: {} From bb8f929b4e82a0b072827c4a895676e25a370210 Mon Sep 17 00:00:00 2001 From: Pawel Kosiec Date: Tue, 17 Feb 2026 15:49:59 +0100 Subject: [PATCH 12/12] fix: update package-lock --- pnpm-lock.yaml | 430 ------------------------------------------------- 1 file changed, 430 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7db49f1b..f1c9010b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -132,18 +132,6 @@ importers: '@databricks/appkit': specifier: workspace:* version: link:../../packages/appkit - drizzle-orm: - specifier: ^0.45.1 - version: 0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0) - reflect-metadata: - specifier: ^0.2.0 - version: 0.2.2 - sequelize: - specifier: ^6.37.7 - version: 6.37.7(pg@8.18.0) - typeorm: - specifier: ^0.3.20 - version: 0.3.28(pg@8.18.0) zod: specifier: ^4.1.13 version: 4.1.13 @@ -4119,9 +4107,6 @@ packages: '@slorber/remark-comment@1.0.0': resolution: {integrity: sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==} - '@sqltools/formatter@1.2.5': - resolution: {integrity: sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw==} - '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -4710,9 +4695,6 @@ packages: '@types/unist@3.0.3': resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} - '@types/validator@13.15.10': - resolution: {integrity: sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==} - '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} @@ -5046,10 +5028,6 @@ packages: app-module-path@2.2.0: resolution: {integrity: sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ==} - app-root-path@3.1.0: - resolution: {integrity: sha512-biN3PwB2gUtjaYy/isrU3aNWI5w+fAfvHkSvCKeQGxhmYpwKFUxudR3Yya+KqVRHBmEDYh+/lTozYCFbmzX4nA==} - engines: {node: '>= 6.0.0'} - aproba@2.1.0: resolution: {integrity: sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==} @@ -5124,10 +5102,6 @@ packages: peerDependencies: postcss: ^8.1.0 - available-typed-arrays@1.0.7: - resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} - engines: {node: '>= 0.4'} - babel-loader@9.2.1: resolution: {integrity: sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==} engines: {node: '>= 14.15.0'} @@ -5255,9 +5229,6 @@ packages: buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} - buffer@6.0.3: - resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} - bufferutil@4.0.9: resolution: {integrity: sha512-WDtdLmJvAuNNPzByAYpRo2rF1Mmradw6gvWsQKf63476DDXmomT9zUiGypLcG4ibIM67vhAj8jJRdbmEws2Aqw==} engines: {node: '>=6.14.2'} @@ -6077,14 +6048,6 @@ packages: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} - dedent@1.7.1: - resolution: {integrity: sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==} - peerDependencies: - babel-plugin-macros: ^3.1.0 - peerDependenciesMeta: - babel-plugin-macros: - optional: true - deep-eql@5.0.2: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} @@ -6320,101 +6283,6 @@ packages: resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} engines: {node: '>=12'} - dottie@2.0.6: - resolution: {integrity: sha512-iGCHkfUc5kFekGiqhe8B/mdaurD+lakO9txNnTvKtA6PISrw86LgqHvRzWYPyoE2Ph5aMIrCw9/uko6XHTKCwA==} - - drizzle-orm@0.45.1: - resolution: {integrity: sha512-Te0FOdKIistGNPMq2jscdqngBRfBpC8uMFVwqjf6gtTVJHIQ/dosgV/CLBU2N4ZJBsXL5savCba9b0YJskKdcA==} - peerDependencies: - '@aws-sdk/client-rds-data': '>=3' - '@cloudflare/workers-types': '>=4' - '@electric-sql/pglite': '>=0.2.0' - '@libsql/client': '>=0.10.0' - '@libsql/client-wasm': '>=0.10.0' - '@neondatabase/serverless': '>=0.10.0' - '@op-engineering/op-sqlite': '>=2' - '@opentelemetry/api': ^1.4.1 - '@planetscale/database': '>=1.13' - '@prisma/client': '*' - '@tidbcloud/serverless': '*' - '@types/better-sqlite3': '*' - '@types/pg': '*' - '@types/sql.js': '*' - '@upstash/redis': '>=1.34.7' - '@vercel/postgres': '>=0.8.0' - '@xata.io/client': '*' - better-sqlite3: '>=7' - bun-types: '*' - expo-sqlite: '>=14.0.0' - gel: '>=2' - knex: '*' - kysely: '*' - mysql2: '>=2' - pg: '>=8' - postgres: '>=3' - prisma: '*' - sql.js: '>=1' - sqlite3: '>=5' - peerDependenciesMeta: - '@aws-sdk/client-rds-data': - optional: true - '@cloudflare/workers-types': - optional: true - '@electric-sql/pglite': - optional: true - '@libsql/client': - optional: true - '@libsql/client-wasm': - optional: true - '@neondatabase/serverless': - optional: true - '@op-engineering/op-sqlite': - optional: true - '@opentelemetry/api': - optional: true - '@planetscale/database': - optional: true - '@prisma/client': - optional: true - '@tidbcloud/serverless': - optional: true - '@types/better-sqlite3': - optional: true - '@types/pg': - optional: true - '@types/sql.js': - optional: true - '@upstash/redis': - optional: true - '@vercel/postgres': - optional: true - '@xata.io/client': - optional: true - better-sqlite3: - optional: true - bun-types: - optional: true - expo-sqlite: - optional: true - gel: - optional: true - knex: - optional: true - kysely: - optional: true - mysql2: - optional: true - pg: - optional: true - postgres: - optional: true - prisma: - optional: true - sql.js: - optional: true - sqlite3: - optional: true - dts-resolver@2.1.2: resolution: {integrity: sha512-xeXHBQkn2ISSXxbJWD828PFjtyg+/UrMDo7W4Ffcs7+YWCquxU8YjV1KoxuiL+eJ5pg3ll+bC6flVv61L3LKZg==} engines: {node: '>=20.18.0'} @@ -6872,10 +6740,6 @@ packages: debug: optional: true - for-each@0.3.5: - resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} - engines: {node: '>= 0.4'} - for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} engines: {node: '>=0.10.0'} @@ -7150,10 +7014,6 @@ packages: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} - has-tostringtag@1.0.2: - resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} - engines: {node: '>= 0.4'} - has-unicode@2.0.1: resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} @@ -7454,10 +7314,6 @@ packages: resolution: {integrity: sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==} engines: {node: '>=12'} - inflection@1.13.4: - resolution: {integrity: sha512-6I/HUDeYFfuNCVS3td055BaXBwKYuzw7K3ExVMStBowKo9oOAMJIXIHvdyR3iboTCp1b+1i5DSkIZTcwIktuDw==} - engines: {'0': node >= 0.4.0} - inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. @@ -7552,10 +7408,6 @@ packages: resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} engines: {node: '>=4'} - is-callable@1.2.7: - resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} - engines: {node: '>= 0.4'} - is-ci@3.0.1: resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} hasBin: true @@ -7691,10 +7543,6 @@ packages: resolution: {integrity: sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==} engines: {node: '>=8'} - is-typed-array@1.1.15: - resolution: {integrity: sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==} - engines: {node: '>= 0.4'} - is-typedarray@1.0.0: resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} @@ -7739,9 +7587,6 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} - isarray@2.0.5: - resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} - isbinaryfile@5.0.6: resolution: {integrity: sha512-I+NmIfBHUl+r2wcDd6JwE9yWje/PIVY/R5/CmV8dXLZd5K+L9X2klAOwfAHNnondLXkbHyTAleQAWonpTJBTtw==} engines: {node: '>= 18.0.0'} @@ -8505,12 +8350,6 @@ packages: engines: {node: '>=18'} hasBin: true - moment-timezone@0.5.48: - resolution: {integrity: sha512-f22b8LV1gbTO2ms2j2z13MuPogNoh5UzxL3nzNAYKGraILnbGc9NEE6dyiiiLv46DGRb8A4kg8UKWLjPthxBHw==} - - moment@2.30.1: - resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} - mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -9006,10 +8845,6 @@ packages: points-on-path@0.2.1: resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} - possible-typed-array-names@1.1.0: - resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} - engines: {node: '>= 0.4'} - postcss-attribute-case-insensitive@7.0.1: resolution: {integrity: sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==} engines: {node: '>=18'} @@ -9872,9 +9707,6 @@ packages: resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==} engines: {node: '>=18'} - retry-as-promised@7.1.1: - resolution: {integrity: sha512-hMD7odLOt3LkTjcif8aRZqi/hybjpLNgSk5oF5FCowfCjok6LukpN2bDX7R5wDmbgBQFn7YoBxSagmtXHaJYJw==} - retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} @@ -10073,43 +9905,6 @@ packages: resolution: {integrity: sha512-p4rRk4f23ynFEfcD9LA0xRYngj+IyGiEYyqqOak8kaN0TvNmuxC2dcVeBn62GpCeR2CpWqyHCNScTP91QbAVFg==} engines: {node: '>= 0.8.0'} - sequelize-pool@7.1.0: - resolution: {integrity: sha512-G9c0qlIWQSK29pR/5U2JF5dDQeqqHRragoyahj/Nx4KOOQ3CPPfzxnfqFPCSB7x5UgjOgnZ61nSxz+fjDpRlJg==} - engines: {node: '>= 10.0.0'} - - sequelize@6.37.7: - resolution: {integrity: sha512-mCnh83zuz7kQxxJirtFD7q6Huy6liPanI67BSlbzSYgVNl5eXVdE2CN1FuAeZwG1SNpGsNRCV+bJAVVnykZAFA==} - engines: {node: '>=10.0.0'} - peerDependencies: - ibm_db: '*' - mariadb: '*' - mysql2: '*' - oracledb: '*' - pg: '*' - pg-hstore: '*' - snowflake-sdk: '*' - sqlite3: '*' - tedious: '*' - peerDependenciesMeta: - ibm_db: - optional: true - mariadb: - optional: true - mysql2: - optional: true - oracledb: - optional: true - pg: - optional: true - pg-hstore: - optional: true - snowflake-sdk: - optional: true - sqlite3: - optional: true - tedious: - optional: true - serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} @@ -10134,11 +9929,6 @@ packages: setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} - sha.js@2.4.12: - resolution: {integrity: sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==} - engines: {node: '>= 0.10'} - hasBin: true - shallow-clone@3.0.1: resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} engines: {node: '>=8'} @@ -10294,10 +10084,6 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - sql-highlight@6.1.0: - resolution: {integrity: sha512-ed7OK4e9ywpE7pgRMkMQmZDPKSVdm0oX5IEtZiKnFucSF0zu6c80GZBe38UqHuVhTWJ9xsKgSMjCG2bml86KvA==} - engines: {node: '>=14'} - srcset@4.0.0: resolution: {integrity: sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==} engines: {node: '>=12'} @@ -10548,10 +10334,6 @@ packages: resolution: {integrity: sha512-Y1KQBgDd/NUc+LfOtKS6mNsC9CCaH+m2P1RoIZy7RAPo3C3/t8X45+zgut31cRZtZ3xKPjfn3TkGTrctC2TQIQ==} hasBin: true - to-buffer@1.2.2: - resolution: {integrity: sha512-db0E3UJjcFhpDhAF4tLo03oli3pwl3dbnzXOUIlRKrp+ldk/VUxzpWYZENsw2SZiuBjHAk7DfB0VU7NKdpb6sw==} - engines: {node: '>= 0.4'} - to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -10563,9 +10345,6 @@ packages: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} - toposort-class@1.0.1: - resolution: {integrity: sha512-OsLcGGbYF3rMjPUf8oKktyvCiUxSbqMMS39m33MAjLTC1DVIH6x3WSt63/M77ihI09+Sdfk1AXvfhCEeUmC7mg==} - totalist@3.0.1: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} @@ -10717,10 +10496,6 @@ packages: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} - typed-array-buffer@1.0.3: - resolution: {integrity: sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==} - engines: {node: '>= 0.4'} - typedarray-to-buffer@3.1.5: resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} @@ -10745,61 +10520,6 @@ packages: peerDependencies: typescript: 5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x - typeorm@0.3.28: - resolution: {integrity: sha512-6GH7wXhtfq2D33ZuRXYwIsl/qM5685WZcODZb7noOOcRMteM9KF2x2ap3H0EBjnSV0VO4gNAfJT5Ukp0PkOlvg==} - engines: {node: '>=16.13.0'} - hasBin: true - peerDependencies: - '@google-cloud/spanner': ^5.18.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 - '@sap/hana-client': ^2.14.22 - better-sqlite3: ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0 || ^12.0.0 - ioredis: ^5.0.4 - mongodb: ^5.8.0 || ^6.0.0 - mssql: ^9.1.1 || ^10.0.0 || ^11.0.0 || ^12.0.0 - mysql2: ^2.2.5 || ^3.0.1 - oracledb: ^6.3.0 - pg: ^8.5.1 - pg-native: ^3.0.0 - pg-query-stream: ^4.0.0 - redis: ^3.1.1 || ^4.0.0 || ^5.0.14 - sql.js: ^1.4.0 - sqlite3: ^5.0.3 - ts-node: ^10.7.0 - typeorm-aurora-data-api-driver: ^2.0.0 || ^3.0.0 - peerDependenciesMeta: - '@google-cloud/spanner': - optional: true - '@sap/hana-client': - optional: true - better-sqlite3: - optional: true - ioredis: - optional: true - mongodb: - optional: true - mssql: - optional: true - mysql2: - optional: true - oracledb: - optional: true - pg: - optional: true - pg-native: - optional: true - pg-query-stream: - optional: true - redis: - optional: true - sql.js: - optional: true - sqlite3: - optional: true - ts-node: - optional: true - typeorm-aurora-data-api-driver: - optional: true - typescript-eslint@8.49.0: resolution: {integrity: sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -11024,10 +10744,6 @@ packages: validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} - validator@13.15.26: - resolution: {integrity: sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==} - engines: {node: '>= 0.10'} - value-equal@1.0.1: resolution: {integrity: sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==} @@ -11277,10 +10993,6 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} - which-typed-array@1.1.20: - resolution: {integrity: sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==} - engines: {node: '>= 0.4'} - which@1.3.1: resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} hasBin: true @@ -11312,9 +11024,6 @@ packages: resolution: {integrity: sha512-1lOb3qdzw6OFmOzoY0nauhLG72TpWtb5qgYPiSh/62rjc1XidBSDio2qw0pwHh17VINF217ebIkZJdFLZFn9SA==} engines: {node: '>=18'} - wkx@0.5.0: - resolution: {integrity: sha512-Xng/d4Ichh8uN4l0FToV/258EjMGU9MGcA0HV2d9B/ZpZB3lqQm7nkOdZdm5GhKtLLhAE7PiVQwN4eN+2YJJUg==} - word-wrap@1.2.5: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} @@ -16018,8 +15727,6 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 - '@sqltools/formatter@1.2.5': {} - '@standard-schema/spec@1.1.0': {} '@standard-schema/utils@0.3.0': {} @@ -16683,8 +16390,6 @@ snapshots: '@types/unist@3.0.3': {} - '@types/validator@13.15.10': {} - '@types/ws@8.18.1': dependencies: '@types/node': 24.10.1 @@ -17157,8 +16862,6 @@ snapshots: app-module-path@2.2.0: {} - app-root-path@3.1.0: {} - aproba@2.1.0: {} arg@5.0.2: {} @@ -17221,10 +16924,6 @@ snapshots: postcss: 8.5.6 postcss-value-parser: 4.2.0 - available-typed-arrays@1.0.7: - dependencies: - possible-typed-array-names: 1.1.0 - babel-loader@9.2.1(@babel/core@7.28.5)(webpack@5.103.0): dependencies: '@babel/core': 7.28.5 @@ -17384,11 +17083,6 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 - buffer@6.0.3: - dependencies: - base64-js: 1.5.1 - ieee754: 1.2.1 - bufferutil@4.0.9: dependencies: node-gyp-build: 4.8.4 @@ -18257,8 +17951,6 @@ snapshots: dependencies: mimic-response: 3.1.0 - dedent@1.7.1: {} - deep-eql@5.0.2: {} deep-extend@0.6.0: {} @@ -18509,14 +18201,6 @@ snapshots: dotenv@17.2.3: {} - dottie@2.0.6: {} - - drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(pg@8.18.0): - optionalDependencies: - '@opentelemetry/api': 1.9.0 - '@types/pg': 8.16.0 - pg: 8.18.0 - dts-resolver@2.1.2: {} dunder-proto@1.0.1: @@ -19043,10 +18727,6 @@ snapshots: follow-redirects@1.15.11: {} - for-each@0.3.5: - dependencies: - is-callable: 1.2.7 - for-in@1.0.2: {} for-own@1.0.0: @@ -19379,10 +19059,6 @@ snapshots: has-symbols@1.1.0: {} - has-tostringtag@1.0.2: - dependencies: - has-symbols: 1.1.0 - has-unicode@2.0.1: {} has-yarn@3.0.0: {} @@ -19858,8 +19534,6 @@ snapshots: infima@0.2.0-alpha.45: {} - inflection@1.13.4: {} - inflight@1.0.6: dependencies: once: 1.4.0 @@ -19949,8 +19623,6 @@ snapshots: is-buffer@2.0.5: {} - is-callable@1.2.7: {} - is-ci@3.0.1: dependencies: ci-info: 3.9.0 @@ -20040,10 +19712,6 @@ snapshots: dependencies: text-extensions: 2.4.0 - is-typed-array@1.1.15: - dependencies: - which-typed-array: 1.1.20 - is-typedarray@1.0.0: {} is-unc-path@1.0.0: @@ -20074,8 +19742,6 @@ snapshots: isarray@1.0.0: {} - isarray@2.0.5: {} - isbinaryfile@5.0.6: {} isexe@2.0.0: {} @@ -21142,12 +20808,6 @@ snapshots: requirejs: 2.3.8 requirejs-config-file: 4.0.0 - moment-timezone@0.5.48: - dependencies: - moment: 2.30.1 - - moment@2.30.1: {} - mri@1.2.0: {} mrmime@2.0.1: {} @@ -21658,8 +21318,6 @@ snapshots: path-data-parser: 0.1.0 points-on-curve: 0.2.0 - possible-typed-array-names@1.1.0: {} - postcss-attribute-case-insensitive@7.0.1(postcss@8.5.6): dependencies: postcss: 8.5.6 @@ -22705,8 +22363,6 @@ snapshots: onetime: 7.0.0 signal-exit: 4.1.0 - retry-as-promised@7.1.1: {} - retry@0.13.1: {} reusify@1.1.0: {} @@ -22969,31 +22625,6 @@ snapshots: transitivePeerDependencies: - supports-color - sequelize-pool@7.1.0: {} - - sequelize@6.37.7(pg@8.18.0): - dependencies: - '@types/debug': 4.1.12 - '@types/validator': 13.15.10 - debug: 4.4.3 - dottie: 2.0.6 - inflection: 1.13.4 - lodash: 4.17.21 - moment: 2.30.1 - moment-timezone: 0.5.48 - pg-connection-string: 2.11.0 - retry-as-promised: 7.1.1 - semver: 7.7.3 - sequelize-pool: 7.1.0 - toposort-class: 1.0.1 - uuid: 8.3.2 - validator: 13.15.26 - wkx: 0.5.0 - optionalDependencies: - pg: 8.18.0 - transitivePeerDependencies: - - supports-color - serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 @@ -23042,12 +22673,6 @@ snapshots: setprototypeof@1.2.0: {} - sha.js@2.4.12: - dependencies: - inherits: 2.0.4 - safe-buffer: 5.2.1 - to-buffer: 1.2.2 - shallow-clone@3.0.1: dependencies: kind-of: 6.0.3 @@ -23218,8 +22843,6 @@ snapshots: sprintf-js@1.0.3: {} - sql-highlight@6.1.0: {} - srcset@4.0.0: {} stackback@0.0.2: {} @@ -23432,12 +23055,6 @@ snapshots: dependencies: tldts-core: 7.0.17 - to-buffer@1.2.2: - dependencies: - isarray: 2.0.5 - safe-buffer: 5.2.1 - typed-array-buffer: 1.0.3 - to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -23449,8 +23066,6 @@ snapshots: toidentifier@1.0.1: {} - toposort-class@1.0.1: {} - totalist@3.0.1: {} tough-cookie@6.0.0: @@ -23574,12 +23189,6 @@ snapshots: media-typer: 0.3.0 mime-types: 2.1.35 - typed-array-buffer@1.0.3: - dependencies: - call-bound: 1.0.4 - es-errors: 1.3.0 - is-typed-array: 1.1.15 - typedarray-to-buffer@3.1.5: dependencies: is-typedarray: 1.0.0 @@ -23603,29 +23212,6 @@ snapshots: typescript: 5.6.3 yaml: 2.8.1 - typeorm@0.3.28(pg@8.18.0): - dependencies: - '@sqltools/formatter': 1.2.5 - ansis: 4.2.0 - app-root-path: 3.1.0 - buffer: 6.0.3 - dayjs: 1.11.19 - debug: 4.4.3 - dedent: 1.7.1 - dotenv: 16.6.1 - glob: 10.5.0 - reflect-metadata: 0.2.2 - sha.js: 2.4.12 - sql-highlight: 6.1.0 - tslib: 2.8.1 - uuid: 11.1.0 - yargs: 17.7.2 - optionalDependencies: - pg: 8.18.0 - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - typescript-eslint@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3): dependencies: '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.1(jiti@2.6.1))(typescript@5.9.3) @@ -23849,8 +23435,6 @@ snapshots: spdx-correct: 3.2.0 spdx-expression-parse: 3.0.1 - validator@13.15.26: {} - value-equal@1.0.1: {} vary@1.1.2: {} @@ -24214,16 +23798,6 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 - which-typed-array@1.1.20: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.8 - call-bound: 1.0.4 - for-each: 0.3.5 - get-proto: 1.0.1 - gopd: 1.2.0 - has-tostringtag: 1.0.2 - which@1.3.1: dependencies: isexe: 2.0.0 @@ -24253,10 +23827,6 @@ snapshots: dependencies: execa: 8.0.1 - wkx@0.5.0: - dependencies: - '@types/node': 24.10.1 - word-wrap@1.2.5: {} wordwrap@1.0.0: {}