diff --git a/apps/docs/components/HomePageCover.tsx b/apps/docs/components/HomePageCover.tsx index fae8094b3c085..a6f7ef1ef5541 100644 --- a/apps/docs/components/HomePageCover.tsx +++ b/apps/docs/components/HomePageCover.tsx @@ -60,7 +60,7 @@ const HomePageCover = (props) => { tooltip: 'TanStack Start', icon: '/docs/img/icons/tanstack-icon', href: '/guides/getting-started/quickstarts/tanstack', - hasLightIcon: false, + hasLightIcon: true, }, { tooltip: 'Vue', diff --git a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts index a57f010ade708..dc336db3fb88e 100644 --- a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts +++ b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts @@ -2827,6 +2827,7 @@ export const self_hosting: NavMenuConstant = { items: [ { name: 'Overview', url: '/guides/self-hosting' }, { name: 'Self-Hosting with Docker', url: '/guides/self-hosting/docker' }, + { name: 'Restore from Platform', url: '/guides/self-hosting/restore-from-platform' }, { name: 'Configuration', items: [{ name: 'Enabling MCP server', url: '/guides/self-hosting/enable-mcp' }], diff --git a/apps/docs/content/errorCodes/realtimeErrorCodes.toml b/apps/docs/content/errorCodes/realtimeErrorCodes.toml index d594f4f1da830..1daa1e73becf3 100644 --- a/apps/docs/content/errorCodes/realtimeErrorCodes.toml +++ b/apps/docs/content/errorCodes/realtimeErrorCodes.toml @@ -61,6 +61,10 @@ description = "The rate of joins per second from your clients has reached the ch [RealtimeDisabledForTenant] description = "Realtime has been disabled for the tenant." +resolution = "Your project may have been suspended for exceeding usage quotas. Contact support with your project reference ID and a description of your Realtime use case." +[[RealtimeDisabledForTenant.references]] +href = "https://supabase.com/docs/troubleshooting/realtime-project-suspended-for-exceeding-quotas" +description = "Troubleshooting guide for suspended projects" [UnableToConnectToTenantDatabase] description = "Realtime was not able to connect to the tenant's database." diff --git a/apps/docs/content/guides/getting-started.mdx b/apps/docs/content/guides/getting-started.mdx index 43578f9c3c3d8..3509c2a3c9a9c 100644 --- a/apps/docs/content/guides/getting-started.mdx +++ b/apps/docs/content/guides/getting-started.mdx @@ -186,6 +186,15 @@ hideToc: true icon: '/docs/img/icons/vuejs-icon', enabled: isFeatureEnabled('docs:framework_quickstarts'), }, + { + title: 'TanStack Start', + href: '/guides/getting-started/quickstarts/tanstack', + description: + 'Learn how to create a Supabase project, add some sample data to your database, and query the data from a TanStack Start app.', + icon: '/docs/img/icons/tanstack-icon', + hasLightIcon: true, + enabled: isFeatureEnabled('docs:framework_quickstarts'), + }, { title: 'Refine', href: '/guides/getting-started/quickstarts/refine', diff --git a/apps/docs/content/guides/platform/sso.mdx b/apps/docs/content/guides/platform/sso.mdx index 2a32952fc1fec..4315e7ddeb0d0 100644 --- a/apps/docs/content/guides/platform/sso.mdx +++ b/apps/docs/content/guides/platform/sso.mdx @@ -45,6 +45,20 @@ When SSO is enabled for an organization: - If an SSO user with the following email of `alice@foocorp.com` attempts to sign in with a GitHub account that uses the same email, a separate Supabase account is created and will not be linked to the SSO user's account. - SSO users will only see organizations/projects they've been invited to or auto-joined into. See [access control](/docs/guides/platform/access-control) for more details. +## Enabling SSO for an organization + +- Review the steps above to configure your setup. +- Invite users to the organization and ensure they join with their SSO linked account. +- If a user is already a member of the organization under a non SSO account, they will need to be removed and invited again for them to join under their SSO account. + + + +**No automatic linking:** Each user account verified using a SSO identity provider will not be automatically linked to existing user accounts in the system. That is, if a user `valid.email@supabase.io` had signed up with a password, and then uses their company SSO login with your project, there will be two `valid.email@supabase.io` user accounts in the system. + +Users will need to ensure they are logged in with the correct account when accepting invites or accessing organizations/projects. + + + ## Disabling SSO for an organization If you disable the SSO provider for an organization, **all SSO users will immediately be unable to sign in**. Before disabling SSO, ensure you have at least one non-SSO owner account to prevent being locked out. diff --git a/apps/docs/content/guides/self-hosting.mdx b/apps/docs/content/guides/self-hosting.mdx index 37ac3e9b57190..6802d6b3dfe88 100644 --- a/apps/docs/content/guides/self-hosting.mdx +++ b/apps/docs/content/guides/self-hosting.mdx @@ -28,10 +28,10 @@ The fastest and recommended way to self-host Supabase is using Docker. -## Other deployment options +## Community-driven projects {/* supa-mdx-lint-disable-next-line Rule004ExcludeWords */} -There are several other ways to deploy Supabase with the help of community-driven projects. These projects may be outdated and are seeking active maintainers. If you're interested in maintaining one of these projects, [contact the Community team](/open-source/contributing/supasquad). +There are several other options to deploy Supabase. If you're interested in helping these projects, visit our [Community](/contribute) page.
{selfHostingCommunity.map((x) => ( @@ -41,7 +41,6 @@ There are several other ways to deploy Supabase with the help of community-drive title={ {x.name} - Maintainer needed } > diff --git a/apps/docs/content/guides/self-hosting/docker.mdx b/apps/docs/content/guides/self-hosting/docker.mdx index 365055a18b627..d1f39a54d5c18 100644 --- a/apps/docs/content/guides/self-hosting/docker.mdx +++ b/apps/docs/content/guides/self-hosting/docker.mdx @@ -27,7 +27,7 @@ This guide assumes you're comfortable with: - Docker and Docker Compose - Networking fundamentals (ports, DNS, firewalls) -If you're new to these topics, consider starting with [managed Supabase](/dashboard) for free, or try [local development with the CLI](/docs/guides/local-development). +If you're new to these topics, consider starting with managed [Supabase platform](/dashboard) for free. You need the following installed on your system: @@ -147,7 +147,7 @@ sh ./utils/generate-keys.sh The script is experimental, so review the output before proceeding and also check `.env` after it's updated by the script. -Alternatively, configure all secrets manually as follows. +**Alternatively, configure all secrets manually as follows.** ### Configure database password diff --git a/apps/docs/content/guides/self-hosting/restore-from-platform.mdx b/apps/docs/content/guides/self-hosting/restore-from-platform.mdx new file mode 100644 index 0000000000000..05ae9cbc32bde --- /dev/null +++ b/apps/docs/content/guides/self-hosting/restore-from-platform.mdx @@ -0,0 +1,180 @@ +--- +title: 'Restore a Platform Project to Self-Hosted' +description: 'Restore your database from the Supabase platform to a self-hosted instance.' +subtitle: 'Restore your database from the Supabase platform to a self-hosted instance.' +--- + +This guide walks you through restoring your database from a Supabase platform project to a [self-hosted Docker instance](/docs/guides/self-hosting/docker). Storage objects transfer or redeploying edge functions is not covered here. + +## Before you begin + +You need: + +- A new self-hosted Supabase instance ([Docker setup guide](/docs/guides/self-hosting/docker)) +- [Supabase CLI](/docs/guides/local-development/cli/getting-started) installed (or use `npx supabase`) +- [Docker Desktop](https://docs.docker.com/get-started/get-docker/) installed (required by the CLI) +- `psql` installed ([official installation guide](https://www.postgresql.org/download/)) +- Your Supabase database passwords (for platform and self-hosted) + +## Step 1: Get your platform connection string + +On your managed Supabase project dashboard, click [**Connect**](/dashboard/project/_?showConnect=true) and copy the connection string (use the session pooler or direct connection). + +## Step 2: Back up your platform database + +Export roles, schema, and data as three separate SQL files: + +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f roles.sql --role-only +``` + +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f schema.sql +``` + +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f data.sql --use-copy --data-only +``` + +This produces SQL files that are compatible across Postgres versions. + + + +Using `supabase db dump` executes `pg_dump` under the hood but applies Supabase-specific filtering - it excludes internal schemas, strips reserved roles, and adds idempotent `IF NOT EXISTS` clauses. Using raw `pg_dump` directly will include Supabase internals and cause permission errors during restore. CLI requires Docker because it runs `pg_dump` inside a container from the Supabase Postgres image rather than requiring a local Postgres installation. + + + +## Step 3: Prepare your self-hosted instance + +Before restoring, check the following on your self-hosted instance: + +- **Extensions**: Enable any non-default extensions your Supabase project uses. You can check which extensions are active by querying `select * from pg_extension;` on your managed database (or check Database Extensions in Dashboard). + +## Step 4: Restore to your self-hosted database + +Connect to your self-hosted Postgres and restore the dump files. The [default](/docs/guides/self-hosting/docker#accessing-postgres) connection string for self-hosted Supabase is: + +``` +postgres://postgres.your-tenant-id:[POSTGRES_PASSWORD]@[your-domain]:5432/postgres +``` + +Where `[POSTGRES_PASSWORD]` is the value of `POSTGRES_PASSWORD` in your self-hosted `.env` file. + +Use your domain name, your server IP, or localhost for `[your-domain]` depending on whether you are running self-hosted Supabase on a VPS, or locally. + +Run `psql` to restore: + +```bash +psql \ + --single-transaction \ + --variable ON_ERROR_STOP=1 \ + --file roles.sql \ + --file schema.sql \ + --command 'SET session_replication_role = replica' \ + --file data.sql \ + --dbname "postgres://postgres.your-tenant-id:[POSTGRES_PASSWORD]@[your-domain]:5432/postgres" +``` + +Setting `session_replication_role` to `replica` disables triggers during the data import, preventing issues like double-encryption of columns. + +## Step 5: Verify the restore + +Connect to your self-hosted database and run a few checks: + +```bash +psql "postgres://postgres.your-tenant-id:[POSTGRES_PASSWORD]@[your-domain]:5432/postgres" +``` + +```sql +-- Check your tables are present +\dt public.* + +-- Verify row counts on key tables +SELECT count(*) FROM auth.users; + +-- Check extensions +SELECT * FROM pg_extension; +``` + +## What's included and what's not + +The database dump includes your schema, data, roles, RLS policies, database functions, triggers, and `auth.users`. However, several things require separate configuration on your self-hosted instance: + +| Requires manual setup | How to configure | +| --- | --- | +| JWT secrets and API keys | Generate new ones and update `.env` | +| Auth provider settings (OAuth, Apple, etc.) | Configure `GOTRUE_EXTERNAL_*` variables in `.env` | +| Edge functions | Manually copy to your self-hosted instance | +| Storage objects | Transfer separately (not covered in this guide) | +| SMTP / email settings | Configure `SMTP_*` variables in `.env` | +| Custom domains and DNS | Point your DNS to the self-hosted server | + +## Auth considerations + +Your `auth.users` table and related data are included in the database dump, so user accounts are preserved. However: + +- **JWT secrets differ** between your platform and self-hosted instances. Existing tokens issued by the platform project will not be valid. Users will need to re-authenticate. +- **Social auth providers** (Apple, Google, GitHub, etc.) need to be configured in your self-hosted `.env` file. Set the relevant `GOTRUE_EXTERNAL_*` variables. See the Auth repository [README](https://github.com/supabase/auth) for all available options. +- **Redirect URLs** in your OAuth provider consoles (Apple Developer, Google Cloud Console, etc.) must be updated to point to your self-hosted hostname instead of `*.supabase.co`. + +## Postgres version compatibility + +Managed Supabase may run a newer Postgres version (Postgres 17) than the self-hosted Docker image (currently Postgres 15). The `supabase db dump` command produces plain SQL files that work across major Postgres versions. + +Keep in mind: + +- The data dump may include Postgres 17-only settings or reference tables/columns from newer Auth and Storage versions that don't exist on self-hosted yet. See [Version mismatches](#version-mismatches-between-platform-and-self-hosted) in the troubleshooting section. +- Run the restore on a test self-hosted instance first to identify any incompatibilities. +- Check that all extensions you use are available on the self-hosted Postgres version. + +## Troubleshooting + +### Version mismatches between platform and self-hosted + +The platform may run a newer Postgres version (17 vs 15) and newer Auth service versions than self-hosted. The data dump can contain settings, tables, or columns that don't exist on your new self-hosted instance. + +**Common issues in `data.sql`:** + +- `SET transaction_timeout = 0` - a Postgres 17-only setting that fails on Postgres 15 +- `COPY` statements for tables that don't exist on self-hosted (e.g., `auth.oauth_clients`, `storage.buckets_vectors`, `storage.vector_indexes`) +- `COPY` statements with columns added in newer Auth versions (e.g., `auth.flow_state` with `oauth_client_state_id`, `linking_target_id`) + +**Workaround:** Edit `data.sql` before restoring: + +```bash +# Comment out PG17-only transaction_timeout +sed -i 's/^SET transaction_timeout/-- &/' data.sql +``` + +For missing tables or column mismatches, comment out the relevant `COPY ... FROM stdin;` line and its corresponding `\.` terminator. Run the restore without `--single-transaction` first to identify all failures, then fix them and run the final restore with `--single-transaction`. + +Keeping your self-hosted configuration [up to date](https://github.com/supabase/supabase/blob/master/docker/CHANGELOG.md) will minimize these gaps. + +### Extension not available + +If the restore fails because an extension isn't available, check whether it's supported on your self-hosted Postgres version. You can list available extensions with: + +```sql +SELECT * FROM pg_available_extensions; +``` + +### Connection refused + +Make sure your self-hosted Postgres port is accessible. In the default [self-hosted Supabase](/docs/guides/self-hosting/docker#accessing-postgres) setup, the user is `postgres.your-tenant-id` with Supavisor on port `5432`. + +### Legacy Studio configuration + +Studio in self-hosted Supabase historically used `supabase_admin` role (superuser) instead of `postgres`. Objects created via Studio UI were owned by `supabase_admin`. Check your `docker-compose.yml` [configuration](https://github.com/supabase/supabase/blob/2cb5befaa377a42b6d6ca152b98105b59054f2f4/docker/docker-compose.yml#L30) to see if `POSTGRES_USER_READ_WRITE` is set to `postgres`. + +### Custom roles missing passwords + +If you created custom database roles with the `LOGIN` attribute on your platform project, their passwords are not included in the dump. Set them manually after restore: + +```sql +ALTER ROLE your_custom_role WITH PASSWORD 'new-password'; +``` + +### Additional resources + +- [Backup and Restore using the CLI](/docs/guides/platform/migrating-within-supabase/backup-restore) +- [Restore Dashboard backup](/docs/guides/platform/migrating-within-supabase/dashboard-restore) diff --git a/apps/docs/content/troubleshooting/app-store-rejection-tls-error-in-ipv6-only-environments-4e9c62.mdx b/apps/docs/content/troubleshooting/app-store-rejection-tls-error-in-ipv6-only-environments-4e9c62.mdx index 719890ba60f4b..1bee3196363eb 100644 --- a/apps/docs/content/troubleshooting/app-store-rejection-tls-error-in-ipv6-only-environments-4e9c62.mdx +++ b/apps/docs/content/troubleshooting/app-store-rejection-tls-error-in-ipv6-only-environments-4e9c62.mdx @@ -2,6 +2,7 @@ title = "App Store Rejection: 'TLS error' in IPv6-only environments" topics = [ "platform" ] keywords = [] +database_id = "ce8c04e4-d493-4e15-832a-e59bdcf2b093" --- If your App Store submission is rejected with a 'TLS error' when tested in an IPv6-only environment, often citing a lack of AAAA records, it typically indicates application-level issues rather than a Supabase configuration problem. diff --git a/apps/docs/content/troubleshooting/auth-hooks-invalid-payload-when-anonymous-users-attempt-phone-changes-022c47.mdx b/apps/docs/content/troubleshooting/auth-hooks-invalid-payload-when-anonymous-users-attempt-phone-changes-022c47.mdx index 672aff0c2cc81..dc0918ea46810 100644 --- a/apps/docs/content/troubleshooting/auth-hooks-invalid-payload-when-anonymous-users-attempt-phone-changes-022c47.mdx +++ b/apps/docs/content/troubleshooting/auth-hooks-invalid-payload-when-anonymous-users-attempt-phone-changes-022c47.mdx @@ -2,10 +2,11 @@ title = "Auth Hooks: 'Invalid payload' when anonymous users attempt phone changes" topics = [ "auth", "cli" ] keywords = [] +database_id = "c1de4561-e95f-41e3-b298-fac9ae331a54" + [[errors]] http_status_code = 500 message = "Invalid payload sent to hook" - --- An 'Invalid payload sent to hook' error (500) occurs in Auth hooks when the payload includes `new_phone` for an anonymous user. diff --git a/apps/docs/content/troubleshooting/autovacuum-stalled-due-to-inactive-replication-slot-d55aa2.mdx b/apps/docs/content/troubleshooting/autovacuum-stalled-due-to-inactive-replication-slot-d55aa2.mdx index dc812890e83b7..97295762a134f 100644 --- a/apps/docs/content/troubleshooting/autovacuum-stalled-due-to-inactive-replication-slot-d55aa2.mdx +++ b/apps/docs/content/troubleshooting/autovacuum-stalled-due-to-inactive-replication-slot-d55aa2.mdx @@ -2,6 +2,7 @@ title = "Autovacuum Stalled Due to Inactive Replication Slot" topics = [ "database" ] keywords = [] +database_id = "a931b8af-3210-4188-bb03-87452923a498" --- If you observe that `supabase inspect db vacuum-stats` reports "Expect autovacuum? yes" for your tables, but autovacuum activity has been inactive for an extended period, leading to increasing database RAM usage, this typically indicates a stalled autovacuum process. One of the reasons for autovacuum to get stalled is an inactive replication slot for which this guide talks about. diff --git a/apps/docs/content/troubleshooting/cloudflare-origin-error-1016-on-custom-domain-a57af4.mdx b/apps/docs/content/troubleshooting/cloudflare-origin-error-1016-on-custom-domain-a57af4.mdx index d10be1232fa71..e7f64a04736ee 100644 --- a/apps/docs/content/troubleshooting/cloudflare-origin-error-1016-on-custom-domain-a57af4.mdx +++ b/apps/docs/content/troubleshooting/cloudflare-origin-error-1016-on-custom-domain-a57af4.mdx @@ -2,6 +2,8 @@ title = "'Cloudflare Origin Error 1016' on Custom Domain" topics = [ "platform" ] keywords = [] +database_id = "a60ee728-3add-438d-b8bf-433f1746cb3e" + [[errors]] code = "1016" message = "Cloudflare Origin Error" diff --git a/apps/docs/content/troubleshooting/deprecated-rls-features-Pm77Zs.mdx b/apps/docs/content/troubleshooting/deprecated-rls-features-Pm77Zs.mdx index b27677b816be1..6c4c8fdd41ae6 100644 --- a/apps/docs/content/troubleshooting/deprecated-rls-features-Pm77Zs.mdx +++ b/apps/docs/content/troubleshooting/deprecated-rls-features-Pm77Zs.mdx @@ -4,6 +4,7 @@ github_url = "https://github.com/orgs/supabase/discussions/16703" date_created = "2023-08-22T13:17:50+00:00" topics = ["database"] keywords = ["rls", "deprecated", "auth", "policy"] +database_id = "58c0cb7c-50a0-4a96-9551-bc97c28b7393" --- ## The `auth.role()` function is now deprecated diff --git a/apps/docs/content/troubleshooting/do-i-need-to-expose-security-definer-functions-in-row-level-security-policies-iI0uOw.mdx b/apps/docs/content/troubleshooting/do-i-need-to-expose-security-definer-functions-in-row-level-security-policies-iI0uOw.mdx index 6c515857f4647..a7afdafff70e3 100644 --- a/apps/docs/content/troubleshooting/do-i-need-to-expose-security-definer-functions-in-row-level-security-policies-iI0uOw.mdx +++ b/apps/docs/content/troubleshooting/do-i-need-to-expose-security-definer-functions-in-row-level-security-policies-iI0uOw.mdx @@ -4,6 +4,7 @@ github_url = "https://github.com/orgs/supabase/discussions/16784" date_created = "2023-08-24T13:45:01+00:00" topics = ["database"] keywords = ["security", "function", "schema", "policy", "definer"] +database_id = "1e02e989-fc12-4838-b304-c7d1356f6d2c" --- PostgREST supports 2 config parameters: diff --git a/apps/docs/content/troubleshooting/edge-function-546-error-response.mdx b/apps/docs/content/troubleshooting/edge-function-546-error-response.mdx index b5271bad1d91f..e2cb48c0ca96f 100644 --- a/apps/docs/content/troubleshooting/edge-function-546-error-response.mdx +++ b/apps/docs/content/troubleshooting/edge-function-546-error-response.mdx @@ -2,6 +2,7 @@ title = "Edge Function 546 error response" topics = [ "functions" ] keywords = [ "546", "error", "resource", "memory", "cpu", "event loop", "edge function" ] +database_id = "4e6ff2e4-2abb-4fba-8233-5883b3d56fb0" [[errors]] http_status_code = 546 diff --git a/apps/docs/content/troubleshooting/edge-function-bundle-size-issues.mdx b/apps/docs/content/troubleshooting/edge-function-bundle-size-issues.mdx index cb3104a2d0399..cd35811b0da60 100644 --- a/apps/docs/content/troubleshooting/edge-function-bundle-size-issues.mdx +++ b/apps/docs/content/troubleshooting/edge-function-bundle-size-issues.mdx @@ -2,6 +2,7 @@ title = "Edge Function bundle size issues" topics = [ "functions" ] keywords = [ "bundle", "size", "limit", "dependencies", "edge function", "10MB" ] +database_id = "aaf9e673-64ae-460a-88e0-b83ea4963382" [api] cli = ["supabase-functions-deploy"] diff --git a/apps/docs/content/troubleshooting/edge-function-cpu-limits.mdx b/apps/docs/content/troubleshooting/edge-function-cpu-limits.mdx index bef2d93305722..3ff348531e2d2 100644 --- a/apps/docs/content/troubleshooting/edge-function-cpu-limits.mdx +++ b/apps/docs/content/troubleshooting/edge-function-cpu-limits.mdx @@ -2,6 +2,7 @@ title = "Understanding Edge Function CPU limits" topics = [ "functions" ] keywords = [ "CPU", "limit", "isolate", "soft limit", "hard limit", "edge function" ] +database_id = "1765884f-81d6-415a-a78f-7085f7b7ddbf" --- Learn how Edge Functions manage CPU resources and what happens when limits are reached. diff --git a/apps/docs/content/troubleshooting/edge-function-dependency-analysis.mdx b/apps/docs/content/troubleshooting/edge-function-dependency-analysis.mdx index 271ab44020a54..9d1f742018b19 100644 --- a/apps/docs/content/troubleshooting/edge-function-dependency-analysis.mdx +++ b/apps/docs/content/troubleshooting/edge-function-dependency-analysis.mdx @@ -2,6 +2,7 @@ title = "Edge Function dependency analysis" topics = [ "functions" ] keywords = [ "dependencies", "npm", "deno", "imports", "bundle", "optimization", "edge function" ] +database_id = "e079a9d0-419a-4e31-b7ec-1206d9012d0b" --- Optimize your Edge Function dependencies for better performance. Large or unnecessary dependencies can significantly impact bundle size, boot time, and memory usage. diff --git a/apps/docs/content/troubleshooting/edge-function-monitoring-resource-usage.mdx b/apps/docs/content/troubleshooting/edge-function-monitoring-resource-usage.mdx index 424d2898b393d..38a8b415c054a 100644 --- a/apps/docs/content/troubleshooting/edge-function-monitoring-resource-usage.mdx +++ b/apps/docs/content/troubleshooting/edge-function-monitoring-resource-usage.mdx @@ -2,6 +2,7 @@ title = "Monitoring Edge Function resource usage" topics = [ "functions" ] keywords = [ "monitoring", "metrics", "CPU", "memory", "performance", "edge function" ] +database_id = "33e8e407-951b-4f7d-b8b4-8e9085cd4d10" --- Learn how to track your Edge Function's performance and identify potential resource issues. diff --git a/apps/docs/content/troubleshooting/edge-function-shutdown-reasons-explained.mdx b/apps/docs/content/troubleshooting/edge-function-shutdown-reasons-explained.mdx index 9b7a4a16cf670..5e917995a23a3 100644 --- a/apps/docs/content/troubleshooting/edge-function-shutdown-reasons-explained.mdx +++ b/apps/docs/content/troubleshooting/edge-function-shutdown-reasons-explained.mdx @@ -2,6 +2,7 @@ title = "Edge Function shutdown reasons explained" topics = [ "functions" ] keywords = [ "shutdown", "termination", "event loop", "wall clock", "cpu time", "memory", "early drop" ] +database_id = "109b964f-c28c-4554-b059-cd8b165c63f8" [[errors]] http_status_code = 546 diff --git a/apps/docs/content/troubleshooting/edge-function-takes-too-long-to-respond.mdx b/apps/docs/content/troubleshooting/edge-function-takes-too-long-to-respond.mdx index 75597f52f89f9..e47714f1608bf 100644 --- a/apps/docs/content/troubleshooting/edge-function-takes-too-long-to-respond.mdx +++ b/apps/docs/content/troubleshooting/edge-function-takes-too-long-to-respond.mdx @@ -2,6 +2,7 @@ title = "Edge Function takes too long to respond" topics = [ "functions" ] keywords = [ "slow", "timeout", "performance", "boot", "response time", "edge function" ] +database_id = "89b868a9-17fe-4c6d-86f6-0b04e5794678" --- Edge Functions have a 60-second execution limit. If your function is taking too long to respond, follow these steps to diagnose and optimize performance. diff --git a/apps/docs/content/troubleshooting/email-password-login-disabled-supabase-vercel-marketplace-a7dd36.mdx b/apps/docs/content/troubleshooting/email-password-login-disabled-supabase-vercel-marketplace-a7dd36.mdx index 1b54fa0a54fcb..4684ead253fd3 100644 --- a/apps/docs/content/troubleshooting/email-password-login-disabled-supabase-vercel-marketplace-a7dd36.mdx +++ b/apps/docs/content/troubleshooting/email-password-login-disabled-supabase-vercel-marketplace-a7dd36.mdx @@ -2,6 +2,7 @@ title = "Email/Password Login Disabled for Supabase Accounts Created via Vercel Marketplace" topics = [ "auth" ] keywords = [ "Vercel"] +database_id = "f988e832-b0c3-4b30-8e20-8d5854943b32" --- If your Supabase account was initiated via the Vercel Marketplace, you will not be able to log in directly using email/password or reset your password. This is because such accounts are tightly coupled to Vercel's authentication system, restricting login to Vercel's integrated methods. diff --git a/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx b/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx index 9d32a135a95e6..8f7013891791d 100644 --- a/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx +++ b/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx @@ -1,6 +1,7 @@ --- title = "Enabling the IPv4 add-on FAQ" topics = [ "database", "platform", "supavisor" ] +database_id = "02f5f200-3c1c-425e-945f-b80b3fce3ec0" --- Enabling the IPv4 add-on will attach an IPv4 address to your project's compute instance, while preserving the existing IPv6 address. Both DNS records `ref.supabase.co` and `db.ref.supabase.co` will be updated to point to the IPv4 address. diff --git a/apps/docs/content/troubleshooting/error-invalid-byte-sequence-for-encoding-utf8-0x00-when-accessing-triggers-or-webhooks-e78cf8.mdx b/apps/docs/content/troubleshooting/error-invalid-byte-sequence-for-encoding-utf8-0x00-when-accessing-triggers-or-webhooks-e78cf8.mdx index cc55733f3ecdb..71907b1b6552b 100644 --- a/apps/docs/content/troubleshooting/error-invalid-byte-sequence-for-encoding-utf8-0x00-when-accessing-triggers-or-webhooks-e78cf8.mdx +++ b/apps/docs/content/troubleshooting/error-invalid-byte-sequence-for-encoding-utf8-0x00-when-accessing-triggers-or-webhooks-e78cf8.mdx @@ -2,6 +2,7 @@ title = "Error: 'invalid byte sequence for encoding 'UTF8': 0x00' when accessing Triggers or Webhooks" topics = [ "cli", "database" ] keywords = [] +database_id = "12128731-0af6-4c57-9aa2-66e177f0c3f4" --- If you encounter the error: `'invalid byte sequence for encoding "UTF8": 0x00'` when attempting to access your project's [Triggers](/dashboard/project/_/database/triggers) or [Webhooks](/dashboard/project/_/database/webhooks) via the dashboard, it indicates that the `standard_conforming_strings` database setting is currently `off`. diff --git a/apps/docs/content/troubleshooting/error-target-organization-is-not-managed-by-vercel-marketplace-during-project-transfer-5262c3.mdx b/apps/docs/content/troubleshooting/error-target-organization-is-not-managed-by-vercel-marketplace-during-project-transfer-5262c3.mdx index 4188ee80bd6df..bee1e46a14e98 100644 --- a/apps/docs/content/troubleshooting/error-target-organization-is-not-managed-by-vercel-marketplace-during-project-transfer-5262c3.mdx +++ b/apps/docs/content/troubleshooting/error-target-organization-is-not-managed-by-vercel-marketplace-during-project-transfer-5262c3.mdx @@ -2,6 +2,7 @@ title = "'Error: 'Target organization is not managed by Vercel Marketplace' during project transfer'" topics = [ "platform" ] keywords = [] +database_id = "6083c9eb-b9fc-4d19-bc64-bb4b7efeecdd" --- If you encounter the error "Target organization is not managed by Vercel Marketplace (currently unsupported)" when attempting a project transfer, it indicates an attempt to move a project from a Supabase-managed organization to a Vercel Marketplace-managed organization. This transfer direction is currently not supported due to existing Vercel Marketplace API limitations. diff --git a/apps/docs/content/troubleshooting/get-detailed-storage-metrics-with-the-aws-cli-587a7d.mdx b/apps/docs/content/troubleshooting/get-detailed-storage-metrics-with-the-aws-cli-587a7d.mdx index e7ff31288db56..3d053f7ff55b9 100644 --- a/apps/docs/content/troubleshooting/get-detailed-storage-metrics-with-the-aws-cli-587a7d.mdx +++ b/apps/docs/content/troubleshooting/get-detailed-storage-metrics-with-the-aws-cli-587a7d.mdx @@ -2,6 +2,7 @@ title = "'Get detailed Storage metrics with the AWS CLI'" topics = [ "cli", "storage", "studio" ] keywords = [] +database_id = "2c33968e-26c0-4613-aee1-5de36d068394" --- Supabase Studio primarily lists the current objects within your buckets. You can use standard S3 tooling such as the AWS CLI to review your Supabase project's Storage usage, or perform operations on the bucket contents. diff --git a/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx b/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx index f666c533d6816..a67de5dfa99cc 100644 --- a/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx +++ b/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx @@ -4,6 +4,7 @@ topics = [ "platform" ] keywords = ["cooldown", "disk resize"] +database_id = "ab90c813-6152-4dad-86f7-937d799ca123" --- This cooldown period isn't a Supabase limitation. It's rooted in how Amazon EBS (the underlying storage instance for our databases) manages volume modifications. After modifying a volume (e.g. increasing size, changing type, or IOPS), AWS enforces a mandatory 6-hour cooldown before allowing another modification on the same volume. This is to ensure data integrity and stability of the volume under load. diff --git a/apps/docs/content/troubleshooting/issues-serving-edge-functions-locally.mdx b/apps/docs/content/troubleshooting/issues-serving-edge-functions-locally.mdx index 2a2da6b188472..5ba3f8251bc2e 100644 --- a/apps/docs/content/troubleshooting/issues-serving-edge-functions-locally.mdx +++ b/apps/docs/content/troubleshooting/issues-serving-edge-functions-locally.mdx @@ -2,6 +2,7 @@ title = "Issues serving Edge Functions locally" topics = [ "functions", "cli" ] keywords = [ "local", "serve", "development", "debug", "port", "edge function" ] +database_id = "1cff12df-7ad6-48c5-b518-5ea468b54bab" [api] cli = ["supabase-functions-serve"] diff --git a/apps/docs/content/troubleshooting/keeping-free-projects-after-pro-upgrade-Kf9Xm2.mdx b/apps/docs/content/troubleshooting/keeping-free-projects-after-pro-upgrade-Kf9Xm2.mdx index afde3c90e8a1c..642457a9adc69 100644 --- a/apps/docs/content/troubleshooting/keeping-free-projects-after-pro-upgrade-Kf9Xm2.mdx +++ b/apps/docs/content/troubleshooting/keeping-free-projects-after-pro-upgrade-Kf9Xm2.mdx @@ -3,6 +3,7 @@ title = "Keeping your 2 Free projects after upgrading to Pro" date_created = "2026-01-23T00:00:00+00:00" topics = [ "platform" ] keywords = [ "free tier", "pro plan", "billing", "organizations", "project transfer" ] +database_id = "449482fb-6e21-4a25-99ee-1ce6fffbc975" --- ## Can you keep your 2 free projects after upgrading to pro? diff --git a/apps/docs/content/troubleshooting/manually-created-databases-are-not-visible-in-the-supabase-dashboard-4415aa.mdx b/apps/docs/content/troubleshooting/manually-created-databases-are-not-visible-in-the-supabase-dashboard-4415aa.mdx index 8497ac1d1390e..0c1fee2f9e6bd 100644 --- a/apps/docs/content/troubleshooting/manually-created-databases-are-not-visible-in-the-supabase-dashboard-4415aa.mdx +++ b/apps/docs/content/troubleshooting/manually-created-databases-are-not-visible-in-the-supabase-dashboard-4415aa.mdx @@ -2,6 +2,7 @@ title = "'Manually created databases are not visible in the Supabase Dashboard'" topics = [ "auth", "cli", "database", "functions", "platform", "storage" ] keywords = [] +database_id = "f6420e72-ea67-4825-b3f7-e722ea5c0d96" --- If you've manually created an additional database within your Supabase project, such as `example_database`, you might observe that it's accessible via external database tools but is not visible in the Supabase Dashboard. This guide explains the underlying reasons for this behavior and how Supabase is designed to handle databases. diff --git a/apps/docs/content/troubleshooting/otp-verification-failures-token-has-expired-or-otp_expired-errors-5ee4d0.mdx b/apps/docs/content/troubleshooting/otp-verification-failures-token-has-expired-or-otp_expired-errors-5ee4d0.mdx index 4108652cd17a0..98e8ca34353f6 100644 --- a/apps/docs/content/troubleshooting/otp-verification-failures-token-has-expired-or-otp_expired-errors-5ee4d0.mdx +++ b/apps/docs/content/troubleshooting/otp-verification-failures-token-has-expired-or-otp_expired-errors-5ee4d0.mdx @@ -2,6 +2,8 @@ title = "'OTP Verification Failures: 'token has expired' or 'otp_expired' errors'" topics = [ "auth", "cli" ] keywords = [] +database_id = "25eddb73-3cca-485b-b87f-7279dd46b7a7" + [[errors]] http_status_code = 403 message = "Forbidden" @@ -9,7 +11,6 @@ message = "Forbidden" [[errors]] code = "otp_expired" message = "OTP expired" - --- When users attempt to exchange One-Time Passwords (OTPs), they may encounter various errors indicating that the token is no longer valid. These include messages like "token has expired or is invalid," "Email link is invalid or has expired," or they might receive '403 Forbidden' HTTP responses on the verification endpoint. Authentication logs often show specific `otp_expired` error codes. diff --git a/apps/docs/content/troubleshooting/pg_cron-launcher-crashes-with-duplicate-key-value-violates-unique-constraint-cc6472.mdx b/apps/docs/content/troubleshooting/pg_cron-launcher-crashes-with-duplicate-key-value-violates-unique-constraint-cc6472.mdx index d465120d62afd..fdbacf98077e6 100644 --- a/apps/docs/content/troubleshooting/pg_cron-launcher-crashes-with-duplicate-key-value-violates-unique-constraint-cc6472.mdx +++ b/apps/docs/content/troubleshooting/pg_cron-launcher-crashes-with-duplicate-key-value-violates-unique-constraint-cc6472.mdx @@ -2,6 +2,7 @@ title = "`pg_cron launcher crashes with 'duplicate key value violates unique constraint'`" topics = [ "platform" ] keywords = [] +database_id = "c000b4ea-23e2-4e19-af54-63ccd00c4904" --- The `pg_cron` launcher process crashes approximately every minute, displaying the error `'duplicate key value violates unique constraint "job_run_details_pkey"'`. diff --git a/apps/docs/content/troubleshooting/pkce-flow-errors-cannot-parse-response-or-zgotmplz-in-magic-link-emails-433665.mdx b/apps/docs/content/troubleshooting/pkce-flow-errors-cannot-parse-response-or-zgotmplz-in-magic-link-emails-433665.mdx index 946549c8ee04f..4aad6fcfb3788 100644 --- a/apps/docs/content/troubleshooting/pkce-flow-errors-cannot-parse-response-or-zgotmplz-in-magic-link-emails-433665.mdx +++ b/apps/docs/content/troubleshooting/pkce-flow-errors-cannot-parse-response-or-zgotmplz-in-magic-link-emails-433665.mdx @@ -2,6 +2,8 @@ title = "PKCE Flow errors: 'cannot parse response' or '#ZgotmplZ' in magic link emails" topics = [ "auth", "cli" ] keywords = [] +database_id = "f9f9842b-e66c-4ea3-a45b-713895d8b7c6" + [[errors]] code = "#ZgotmplZ" message = "Go template sanitization of unsafe URL scheme" @@ -9,7 +11,6 @@ message = "Go template sanitization of unsafe URL scheme" [[errors]] code = "cannot parse response" message = "PKCE flow interrupted by email client or token consumed by link scanner" - --- When setting up authentication with magic links and mobile deep linking, you might encounter specific errors like `#ZgotmplZ` in your email templates or a 'cannot parse response' error during the login flow. This guide explains the underlying causes and provides a robust solution. diff --git a/apps/docs/content/troubleshooting/postgrest-not-recognizing-new-columns-or-functions-bd75f5.mdx b/apps/docs/content/troubleshooting/postgrest-not-recognizing-new-columns-or-functions-bd75f5.mdx index 051323c67f0b3..d603975c4d709 100644 --- a/apps/docs/content/troubleshooting/postgrest-not-recognizing-new-columns-or-functions-bd75f5.mdx +++ b/apps/docs/content/troubleshooting/postgrest-not-recognizing-new-columns-or-functions-bd75f5.mdx @@ -2,6 +2,7 @@ title = "PostgREST not recognizing new columns, tables, views or functions" topics = [ "cli", "platform", "database", "functions" ] keywords = [] +database_id = "c3481097-1a32-4e94-b4f6-7aeb88132a41" --- If PostgREST is returning errors by not recognizing new database columns, tables, views or functions, and logging errors similar to: diff --git a/apps/docs/content/troubleshooting/rclone-error-s3-protocol-error-received-listing-v1-with-istruncated-set-no-nextmarker-and-no-contents-e64d34.mdx b/apps/docs/content/troubleshooting/rclone-error-s3-protocol-error-received-listing-v1-with-istruncated-set-no-nextmarker-and-no-contents-e64d34.mdx index 7300fdc01261e..594101b4b4b3b 100644 --- a/apps/docs/content/troubleshooting/rclone-error-s3-protocol-error-received-listing-v1-with-istruncated-set-no-nextmarker-and-no-contents-e64d34.mdx +++ b/apps/docs/content/troubleshooting/rclone-error-s3-protocol-error-received-listing-v1-with-istruncated-set-no-nextmarker-and-no-contents-e64d34.mdx @@ -2,6 +2,7 @@ title = "rclone error: 's3 protocol error: received listing v1 with IsTruncated set, no NextMarker and no Contents'" topics = [ "storage" ] keywords = [] +database_id = "0dab3e43-2ba9-4421-9af3-b97e45123069" --- When attempting to list objects from a Supabase bucket using `rclone lsf`, you might encounter the error: `'s3 protocol error: received listing v1 with IsTruncated set, no NextMarker and no Contents'`. diff --git a/apps/docs/content/troubleshooting/realtime-handling-silent-disconnections-in-backgrounded-applications-592794.mdx b/apps/docs/content/troubleshooting/realtime-handling-silent-disconnections-in-backgrounded-applications-592794.mdx index 949dc21939f5f..7941565055d70 100644 --- a/apps/docs/content/troubleshooting/realtime-handling-silent-disconnections-in-backgrounded-applications-592794.mdx +++ b/apps/docs/content/troubleshooting/realtime-handling-silent-disconnections-in-backgrounded-applications-592794.mdx @@ -2,6 +2,7 @@ title = "Realtime: Handling Silent Disconnections in Background Applications" topics = [ "cli", "database", "realtime" ] keywords = [] +database_id = "b826b34a-f7c0-405e-a836-54c543198964" --- If your Supabase Realtime subscriptions stop receiving events after some time without any explicit error messages, you might be experiencing a silent disconnection. This guide explains why this occurs and provides robust solutions to maintain connection stability. diff --git a/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx b/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx new file mode 100644 index 0000000000000..a3ff143175482 --- /dev/null +++ b/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx @@ -0,0 +1,69 @@ +--- +title = "Realtime: Project suspended for exceeding quotas" +date_created = "2026-02-06T00:00:00+00:00" +topics = [ "realtime" ] +keywords = [ "suspended", "quota", "abuse", "limits", "connections", "messages", "ban", "RealtimeDisabledForTenant", "disabled" ] +--- + +If your project has been suspended due to Realtime usage, it means your project exceeded the quotas for your plan and was flagged for unusually high consumption of Realtime resources. + +## How to know if your project was suspended + +When Realtime is disabled for your project, you will see the error code `RealtimeDisabledForTenant` in your [Realtime logs](/dashboard/project/_/database/realtime-logs). On the client side, connections will fail to establish and existing subscriptions will stop receiving events. + +If you encounter this error, it means Realtime has been explicitly disabled for your project and you should [contact support](/dashboard/support/new) to understand why and resolve the issue. + +## Why projects get suspended + +Supabase monitors Realtime usage across all projects to ensure platform stability for everyone. When a project consistently exceeds its plan limits by a significant margin, it may be manually suspended. This can happen when: + +- Your project far exceeds the [concurrent connections limit](/docs/guides/realtime/limits#limits-by-plan) for your plan +- Your project sends or receives messages well beyond the [messages per second limit](/docs/guides/realtime/limits#limits-by-plan) +- Usage patterns suggest unintended or runaway behavior, such as a client reconnection loop or uncontrolled channel creation + +Suspension is not automatic and is applied after review. The goal is to protect shared infrastructure while giving you the opportunity to explain and resolve the situation. + +## Common causes of excessive usage + +In most cases, quota overages are accidental rather than intentional: + +- **Reconnection loops**: A client that fails to authenticate or subscribe may retry rapidly, creating thousands of short-lived connections +- **Uncontrolled channels**: Creating channels without cleaning them up leads to resource exhaustion (see [Fixing the TooManyChannels Error](/docs/troubleshooting/realtime-too-many-channels-error)) +- **Missing cleanup on component tear down**: Single-page applications that don't unsubscribe when components are removed can accumulate connections over time +- **Unexpected traffic spikes**: A viral event or bot traffic can push usage well beyond normal levels +- **Development or testing misconfiguration**: Load tests or staging environments accidentally pointed at a production project + +## What to do if your project is suspended + +1. **Open a support ticket**: [Contact support](/dashboard/support/new) and include: + - Your project reference ID + - A description of your Realtime use case (what features use Broadcast, Presence, or Postgres Changes) + - An estimate of your expected concurrent connections and message throughput + - Any recent changes to your application that may have caused the spike + +2. **Review your usage**: While waiting for a response, check the [Realtime reports](/dashboard/project/_/database/realtime-logs) in your project dashboard to understand what drove the elevated usage. + +3. **Identify and fix the root cause**: Common fixes include: + - Adding proper channel cleanup in your client code + - Implementing exponential backoff for reconnection logic + - Upgrading your plan to match your actual usage needs + - Separating development and production environments + +The Supabase team will review your case to understand whether the usage was accidental or expected. If the usage is legitimate, the team can work with you to find the right plan or adjust limits for your project. If it was accidental, once you've resolved the underlying issue, the suspension can be lifted. + +## How to avoid suspension + +- Monitor your usage regularly through the [Realtime reports](/dashboard/project/_/database/realtime-logs) page +- Set up alerts if your connections or messages approach your plan limits +- Follow the [channel management best practices](/docs/troubleshooting/realtime-too-many-channels-error#best-practices-for-channel-management) to avoid resource leaks +- Review the [Realtime limits](/docs/guides/realtime/limits) for your plan and upgrade before you outgrow them +- Use the [Realtime Inspector](https://realtime.supabase.com/inspector/new) to test and debug connections before deploying + +## Related troubleshooting guides + +- [Fixing the TooManyChannels Error](/docs/troubleshooting/realtime-too-many-channels-error) — channel lifecycle management and cleanup best practices +- [Concurrent Peak Connections quota](/docs/troubleshooting/realtime-concurrent-peak-connections-quota-jdDqcp) — understanding the concurrent connections quota and how to adjust it +- [Handling Silent Disconnections in Background Applications](/docs/troubleshooting/realtime-handling-silent-disconnections-in-backgrounded-applications-592794) — fixing WebSocket drops caused by browser or OS background mode +- [TIMED_OUT connection errors](/docs/troubleshooting/realtime-connections-timed_out-status) — resolving Node.js version incompatibilities +- [Debug Realtime with Logger and Log Levels](/docs/troubleshooting/realtime-debugging-with-logger) — enabling client-side logging to diagnose connection and message issues +- [Realtime Heartbeat Messages](/docs/troubleshooting/realtime-heartbeat-messages) — monitoring connection health with heartbeat callbacks diff --git a/apps/docs/content/troubleshooting/running-explain-analyze-on-functions.mdx b/apps/docs/content/troubleshooting/running-explain-analyze-on-functions.mdx index f851933943a2c..9e7696711e26f 100644 --- a/apps/docs/content/troubleshooting/running-explain-analyze-on-functions.mdx +++ b/apps/docs/content/troubleshooting/running-explain-analyze-on-functions.mdx @@ -2,6 +2,7 @@ title = "Running EXPLAIN ANALYZE on functions" topics = ["database", "functions"] keywords = [] # any strings (topics are automatically added so no need to duplicate) +database_id = "1d62cace-c0f6-47a0-8690-002a797da33b" [api] sdk = ["rpc"] diff --git a/apps/docs/content/troubleshooting/scan-error-on-column-confirmation_token-converting-null-to-string-is-unsupported-during-auth-login-a0c686.mdx b/apps/docs/content/troubleshooting/scan-error-on-column-confirmation_token-converting-null-to-string-is-unsupported-during-auth-login-a0c686.mdx index 268e22e2ab1a3..8e0f51128fd65 100644 --- a/apps/docs/content/troubleshooting/scan-error-on-column-confirmation_token-converting-null-to-string-is-unsupported-during-auth-login-a0c686.mdx +++ b/apps/docs/content/troubleshooting/scan-error-on-column-confirmation_token-converting-null-to-string-is-unsupported-during-auth-login-a0c686.mdx @@ -2,10 +2,11 @@ title = "'Scan error on column confirmation_token: converting NULL to string is unsupported' during Auth login" topics = [ "auth" ] keywords = [] +database_id = "02a26b95-3029-49a1-9319-161cf1c4c21d" + [[errors]] http_status_code = 500 message = "error finding user: sql: Scan error on column 'confirmation_token': converting NULL to string is unsupported" - --- If you encounter a HTTP 500 error during authentication with the message `error finding user: sql: Scan error on column "confirmation_token": converting NULL to string is unsupported`, this typically indicates that the GoTrue Auth service found a `NULL` value in the `auth.users.confirmation_token` column, where a non-nullable string is expected. diff --git a/apps/docs/content/troubleshooting/supabase-project-provisioned-via-bolt-not-visible-in-dashboard-7188fc.mdx b/apps/docs/content/troubleshooting/supabase-project-provisioned-via-bolt-not-visible-in-dashboard-7188fc.mdx index 4ea43656a396f..0151c181373cc 100644 --- a/apps/docs/content/troubleshooting/supabase-project-provisioned-via-bolt-not-visible-in-dashboard-7188fc.mdx +++ b/apps/docs/content/troubleshooting/supabase-project-provisioned-via-bolt-not-visible-in-dashboard-7188fc.mdx @@ -4,6 +4,7 @@ topics = [ "database" ] keywords = [ "Bolt" ] +database_id = "d64f5f5d-ef80-4423-ba24-1a79b4e69ce6" --- If your Supabase project, provisioned through Bolt's Claude Agent, isn't appearing in your Supabase dashboard, it indicates an ownership difference. diff --git a/apps/docs/content/troubleshooting/supabase-storage-inefficient-folder-operations-and-hierarchical-rls-challenges-b05a4d.mdx b/apps/docs/content/troubleshooting/supabase-storage-inefficient-folder-operations-and-hierarchical-rls-challenges-b05a4d.mdx index 186aa5fa1b376..c4fdf40594260 100644 --- a/apps/docs/content/troubleshooting/supabase-storage-inefficient-folder-operations-and-hierarchical-rls-challenges-b05a4d.mdx +++ b/apps/docs/content/troubleshooting/supabase-storage-inefficient-folder-operations-and-hierarchical-rls-challenges-b05a4d.mdx @@ -2,6 +2,7 @@ title = "'Supabase Storage: Inefficient folder operations and hierarchical RLS challenges'" topics = [ "functions", "storage" ] keywords = [] +database_id = "3b52daf2-d78d-4630-8e9f-8bf5d90208bf" --- Supabase Storage lacks native folder concepts or APIs for batch folder operations, which can lead to inefficient folder operations (move, rename, delete) and difficulties in implementing hierarchical access controls for objects. diff --git a/apps/docs/content/troubleshooting/transfer-edge-function-from-one-project-to-another.mdx b/apps/docs/content/troubleshooting/transfer-edge-function-from-one-project-to-another.mdx index f4d25b74433b2..769cb245814ea 100644 --- a/apps/docs/content/troubleshooting/transfer-edge-function-from-one-project-to-another.mdx +++ b/apps/docs/content/troubleshooting/transfer-edge-function-from-one-project-to-another.mdx @@ -2,6 +2,7 @@ title = "Transfer edge functions from one project to another" topics = ["cli", "database", "functions"] keywords = ["functions", "typescript", "deno"] +database_id = "99b787e0-9ec5-4268-a07d-4d6c377cb1ac" --- This guide shows how you can transfer your Edge Functions from one project to another using the Supabase CLI or the Supabase Dashboard. diff --git a/apps/docs/content/troubleshooting/transferring-from-cloud-to-self-host-in-supabase-2oWNvW.mdx b/apps/docs/content/troubleshooting/transferring-from-cloud-to-self-host-in-supabase-2oWNvW.mdx index 0d682cc00bf1b..3ee20895f7f29 100644 --- a/apps/docs/content/troubleshooting/transferring-from-cloud-to-self-host-in-supabase-2oWNvW.mdx +++ b/apps/docs/content/troubleshooting/transferring-from-cloud-to-self-host-in-supabase-2oWNvW.mdx @@ -1,5 +1,5 @@ --- -title = "Transferring from cloud to self-host in Supabase" +title = "Transferring from platform to self-hosted Supabase" github_url = "https://github.com/orgs/supabase/discussions/22712" date_created = "2024-04-14T15:19:10+00:00" topics = [ "database", "self-hosting" ] @@ -7,10 +7,33 @@ keywords = [ "migrate", "pg_dump", "psql", "self-host" ] database_id = "c6b6ae3c-1b5b-4ba8-a40f-5f2ca1f007e2" --- -To migrate from cloud to self-hosting, you can use the [pg_dump](https://www.postgresql.org/docs/9.6/app-pgdump.html) command to export your database to an SQL file, which then you can run on any database to load the same data in. +For a detailed, step-by-step guide on restoring your database from the Supabase platform to a [self-hosted Supabase](/docs/guides/self-hosting) instance, see [Restore a Platform Project to Self-Hosted](/docs/guides/self-hosting/restore-from-platform). -You can then try to import your SQL files using psql from the terminal: +### Quick reference -`psql -h 127.0.0.1 -p 5432 -d postgres -U postgres -f .sql` +Back up your cloud database: -You can also find some useful information about self-hosting here: https://supabase.com/docs/guides/self-hosting. +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f roles.sql --role-only +``` + +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f schema.sql +``` + +```bash +supabase db dump --db-url "[CONNECTION_STRING]" -f data.sql --use-copy --data-only +``` + +Restore to your self-hosted instance: + +```bash +psql \ + --single-transaction \ + --variable ON_ERROR_STOP=1 \ + --file roles.sql \ + --file schema.sql \ + --command 'SET session_replication_role = replica' \ + --file data.sql \ + --dbname "postgres://postgres.your-tenant-id:[POSTGRES_PASSWORD]@[your-domain]:5432/postgres" +``` diff --git a/apps/docs/content/troubleshooting/unable-to-call-edge-function.mdx b/apps/docs/content/troubleshooting/unable-to-call-edge-function.mdx index 2cfa16cb57054..81743b46b22f5 100644 --- a/apps/docs/content/troubleshooting/unable-to-call-edge-function.mdx +++ b/apps/docs/content/troubleshooting/unable-to-call-edge-function.mdx @@ -2,6 +2,7 @@ title = "Unable to call Edge Function" topics = [ "functions" ] keywords = [ "invoke", "call", "CORS", "authentication", "JWT", "edge function" ] +database_id = "cd298838-6f13-4b70-a198-a8bd26b2c0bd" --- If you're having trouble invoking an Edge Function or experiencing CORS issues, follow these steps to diagnose and resolve the problem. diff --git a/apps/docs/content/troubleshooting/unable-to-deploy-edge-function.mdx b/apps/docs/content/troubleshooting/unable-to-deploy-edge-function.mdx index a125946930dff..f026a5ff606f6 100644 --- a/apps/docs/content/troubleshooting/unable-to-deploy-edge-function.mdx +++ b/apps/docs/content/troubleshooting/unable-to-deploy-edge-function.mdx @@ -2,6 +2,7 @@ title = "Unable to deploy Edge Function" topics = [ "functions" ] keywords = [ "deploy", "deployment", "edge function", "deno", "syntax", "bundle" ] +database_id = "68bb6df6-6a07-40fa-ad4d-dfa5e21d7e9c" [api] cli = ["supabase-functions-deploy"] diff --git a/apps/docs/content/troubleshooting/why-supabase-edge-functions-cannot-provide-static-egress-ips-for-whitelisting-3d78b0.mdx b/apps/docs/content/troubleshooting/why-supabase-edge-functions-cannot-provide-static-egress-ips-for-whitelisting-3d78b0.mdx index 00f8b9e19f47e..6526e5892f082 100644 --- a/apps/docs/content/troubleshooting/why-supabase-edge-functions-cannot-provide-static-egress-ips-for-whitelisting-3d78b0.mdx +++ b/apps/docs/content/troubleshooting/why-supabase-edge-functions-cannot-provide-static-egress-ips-for-whitelisting-3d78b0.mdx @@ -2,6 +2,7 @@ title = "'Why Supabase Edge Functions cannot provide static egress IPs for allow listing'" topics = [ "auth", "functions", "platform", "self-hosting" ] keywords = [] +database_id = "99b56e92-62f2-4602-8e13-d38caf7aff4c" --- When trying to establish secure connections from Supabase Edge Functions to external services, you might encounter difficulties with traditional IP allow listing. This guide explains why this problem occurs and provides various solutions to address it. diff --git a/apps/docs/public/img/icons/tanstack-icon-light.svg b/apps/docs/public/img/icons/tanstack-icon-light.svg new file mode 100644 index 0000000000000..b6acd4cf6f3f1 --- /dev/null +++ b/apps/docs/public/img/icons/tanstack-icon-light.svg @@ -0,0 +1 @@ + diff --git a/apps/docs/public/img/icons/tanstack-icon.svg b/apps/docs/public/img/icons/tanstack-icon.svg index 0905a07a20a05..be6eed002243c 100644 --- a/apps/docs/public/img/icons/tanstack-icon.svg +++ b/apps/docs/public/img/icons/tanstack-icon.svg @@ -1,125 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/apps/studio/components/interfaces/Database/Hooks/EditHookPanel.tsx b/apps/studio/components/interfaces/Database/Hooks/EditHookPanel.tsx index 1222c74c94fcc..9c078d71c8892 100644 --- a/apps/studio/components/interfaces/Database/Hooks/EditHookPanel.tsx +++ b/apps/studio/components/interfaces/Database/Hooks/EditHookPanel.tsx @@ -4,7 +4,7 @@ import type { PostgresTrigger } from '@supabase/postgres-meta' import { useQueryClient } from '@tanstack/react-query' import { useParams } from 'common' import { parseAsBoolean, parseAsString, useQueryState } from 'nuqs' -import { useEffect, useState } from 'react' +import { useEffect, useRef, useState } from 'react' import { SubmitHandler, useForm } from 'react-hook-form' import { toast } from 'sonner' import { Button, Form_Shadcn_, SidePanel } from 'ui' @@ -97,12 +97,25 @@ export const EditHookPanel = () => { ) const selectedHook = hooks.find((hook) => hook.id.toString() === selectedHookIdToEdit) + // Webhook IDs aren't stable across edits because the update mutation drops and recreates the + // trigger, assigning a new ID. This causes a brief window where the old selectedHookIdToEdit + // no longer matches any hook, incorrectly triggering the "Webhook not found" toast. Since this + // is an edge case, we use an ad-hoc ref to suppress the toast when the panel is closing rather + // than a more involved solution + const isClosingRef = useRef(false) + const visible = showCreateHookForm || !!selectedHook + const onClose = () => { + isClosingRef.current = true + setShowCreateHookForm(false) + setSelectedHookIdToEdit(null) + } + const { mutate: createDatabaseTrigger, isPending: isCreating } = useDatabaseTriggerCreateMutation( { - onSuccess: (res) => { - toast.success(`Successfully created new webhook "${res.name}"`) + onSuccess: (_, variables) => { + toast.success(`Successfully created new webhook "${variables.payload.name}"`) onClose() }, onError: (error) => { @@ -149,18 +162,20 @@ export const EditHookPanel = () => { }, }) - const onClose = () => { - setShowCreateHookForm(false) - setSelectedHookIdToEdit(null) - } - useEffect(() => { - if (isSuccess && !!selectedHookIdToEdit && !selectedHook) { + if (isSuccess && !!selectedHookIdToEdit && !selectedHook && !isClosingRef.current) { toast('Webhook not found') setSelectedHookIdToEdit(null) } }, [isSuccess, selectedHook, selectedHookIdToEdit, setSelectedHookIdToEdit]) + // Reset the closing ref when the panel fully closes + useEffect(() => { + if (!visible) { + isClosingRef.current = false + } + }, [visible]) + // Reset form when panel opens with new selectedHook useEffect(() => { if (visible) { diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryCosts.tsx b/apps/studio/components/interfaces/QueryPerformance/QueryCosts.tsx deleted file mode 100644 index 197ab6ea489fd..0000000000000 --- a/apps/studio/components/interfaces/QueryPerformance/QueryCosts.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import { cn } from 'ui' - -interface QueryCostsProps { - currentCost?: number - improvedCost?: number - improvement?: number - className?: string -} - -export const QueryCosts = ({ - currentCost, - improvedCost, - improvement, - className, -}: QueryCostsProps) => { - if (!currentCost) return null - - return ( -
-

Query costs

-
-
-

Total cost of query

-
-
-

Currently:

-

- {typeof currentCost === 'number' && !isNaN(currentCost) && isFinite(currentCost) - ? currentCost.toFixed(2) - : 'N/A'} -

-
- {improvedCost && - typeof improvedCost === 'number' && - !isNaN(improvedCost) && - isFinite(improvedCost) && ( -
-

With index:

-
-

{improvedCost.toFixed(2)}

- {improvement && - typeof improvement === 'number' && - !isNaN(improvement) && - isFinite(improvement) && ( -

↓ {improvement.toFixed(1)}%

- )} -
-
- )} -
-
- -
-
- ) -} diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryDetail.tsx b/apps/studio/components/interfaces/QueryPerformance/QueryDetail.tsx index 0780903e036c3..4aa6696c21e2d 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryDetail.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/QueryDetail.tsx @@ -12,10 +12,7 @@ import { Alert_Shadcn_, AlertDescription_Shadcn_, AlertTitle_Shadcn_, Button, cn import { QueryPanelContainer, QueryPanelSection } from './QueryPanel' import { buildQueryExplanationPrompt } from './QueryPerformance.ai' -import { - QUERY_PERFORMANCE_COLUMNS, - QUERY_PERFORMANCE_REPORT_TYPES, -} from './QueryPerformance.constants' +import { QUERY_PERFORMANCE_COLUMNS } from './QueryPerformance.constants' import { QueryPerformanceRow } from './QueryPerformance.types' import { formatDuration } from './QueryPerformance.utils' diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.constants.ts b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.constants.ts index 601ef33ca07ba..ecbaa3ea46760 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.constants.ts +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.constants.ts @@ -22,6 +22,7 @@ export const QUERY_PERFORMANCE_COLUMNS = [ { id: 'rows_read', name: 'Rows processed', description: undefined, minWidth: 130 }, { id: 'cache_hit_rate', name: 'Cache hit rate', description: undefined, minWidth: 130 }, { id: 'rolname', name: 'Role', description: undefined, minWidth: 200 }, + { id: 'application_name', name: 'Application', description: undefined, minWidth: 150 }, ] as const export const QUERY_PERFORMANCE_ROLE_DESCRIPTION = [ @@ -88,46 +89,40 @@ export const QUERY_PERFORMANCE_CHART_TABS = [ }, ] -export const QUERY_PERFORMANCE_TIME_RANGES = [ - { - id: 'last_60_minutes', - label: 'Last 60 minutes', - }, - { - id: 'last_3_hours', - label: 'Last 3 hours', - }, - { - id: 'last_24_hours', - label: 'Last 24 hours', - }, -] - -export const getPgStatMonitorLogsQuery = (startTime: string, endTime: string) => +export const getSupamonitorLogsQuery = (startTime: string, endTime: string) => ` -select - id, - pgl.timestamp as timestamp, - 'postgres' as log_type, - CAST(pgl_parsed.sql_state_code AS STRING) as status, - CASE - WHEN pgl_parsed.error_severity = 'LOG' THEN 'success' - WHEN pgl_parsed.error_severity = 'WARNING' THEN 'warning' - WHEN pgl_parsed.error_severity = 'FATAL' THEN 'error' - WHEN pgl_parsed.error_severity = 'ERROR' THEN 'error' - ELSE null - END as level, - event_message as event_message -from postgres_logs as pgl -cross join unnest(pgl.metadata) as pgl_metadata -cross join unnest(pgl_metadata.parsed) as pgl_parsed -WHERE pgl.event_message LIKE '%[pg_stat_monitor]%' - AND pgl.timestamp >= CAST('${startTime}' AS TIMESTAMP) - AND pgl.timestamp <= CAST('${endTime}' AS TIMESTAMP) +select + TIMESTAMP_TRUNC(sml.timestamp, MINUTE) as timestamp, + CAST(sml_parsed.application_name AS STRING) as application_name, + SUM(sml_parsed.calls) as calls, + CAST(sml_parsed.database_name AS STRING) as database_name, + CAST(sml_parsed.query AS STRING) as query, + sml_parsed.query_id as query_id, + SUM(sml_parsed.total_exec_time) as total_exec_time, + SUM(sml_parsed.total_plan_time) as total_plan_time, + CAST(sml_parsed.user_name AS STRING) as user_name, + CASE WHEN SUM(sml_parsed.calls) > 0 + THEN SUM(sml_parsed.total_exec_time) / SUM(sml_parsed.calls) + ELSE 0 + END as mean_exec_time, + MIN(NULLIF(sml_parsed.total_exec_time, 0)) as min_exec_time, + MAX(sml_parsed.total_exec_time) as max_exec_time, + CASE WHEN SUM(sml_parsed.calls) > 0 + THEN SUM(sml_parsed.total_plan_time) / SUM(sml_parsed.calls) + ELSE 0 + END as mean_plan_time, + MIN(NULLIF(sml_parsed.total_plan_time, 0)) as min_plan_time, + MAX(sml_parsed.total_plan_time) as max_plan_time, + APPROX_QUANTILES(sml_parsed.total_exec_time, 100)[OFFSET(50)] as p50_exec_time, + APPROX_QUANTILES(sml_parsed.total_exec_time, 100)[OFFSET(95)] as p95_exec_time, + APPROX_QUANTILES(sml_parsed.total_plan_time, 100)[OFFSET(50)] as p50_plan_time, + APPROX_QUANTILES(sml_parsed.total_plan_time, 100)[OFFSET(95)] as p95_plan_time +from supamonitor_logs as sml +cross join unnest(sml.metadata) as sml_metadata +cross join unnest(sml_metadata.supamonitor) as sml_parsed +WHERE sml.event_message = 'log' + AND sml.timestamp >= CAST('${startTime}' AS TIMESTAMP) + AND sml.timestamp <= CAST('${endTime}' AS TIMESTAMP) +GROUP BY timestamp, user_name, database_name, application_name, query_id, query ORDER BY timestamp DESC `.trim() - -export const PG_STAT_MONITOR_LOGS_QUERY = getPgStatMonitorLogsQuery( - new Date(Date.now() - 24 * 60 * 60 * 1000).toISOString(), - new Date().toISOString() -) diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.tsx b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.tsx index 82844f1ca1900..7ff8243739153 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.tsx @@ -1,7 +1,7 @@ import { useEffect } from 'react' -import { WithMonitor } from './WithMonitor/WithMonitor' import { WithStatements } from './WithStatements/WithStatements' +import { WithSupamonitor } from './WithSupamonitor/WithSupamonitor' import { useParams } from 'common' import { DbQueryHook } from 'hooks/analytics/useDbQuery' import { useDatabaseSelectorStateSnapshot } from 'state/database-selector' @@ -11,7 +11,7 @@ interface QueryPerformanceProps { queryHitRate: PresetHookResult queryPerformanceQuery: DbQueryHook queryMetrics: PresetHookResult - isPgStatMonitorEnabled: boolean + isSupamonitorEnabled: boolean dateRange?: { period_start: { date: string; time_period: string } period_end: { date: string; time_period: string } @@ -24,7 +24,7 @@ export const QueryPerformance = ({ queryHitRate, queryPerformanceQuery, queryMetrics, - isPgStatMonitorEnabled, + isSupamonitorEnabled, dateRange, onDateRangeChange, }: QueryPerformanceProps) => { @@ -36,8 +36,8 @@ export const QueryPerformance = ({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [ref]) - if (isPgStatMonitorEnabled) { - return + if (isSupamonitorEnabled) { + return } return ( diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.types.ts b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.types.ts index e7499b46d9a9f..a684c9bedd7e6 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.types.ts +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.types.ts @@ -11,8 +11,48 @@ export interface QueryPerformanceRow { rows_read: number cache_hit_rate: number rolname: string + application_name?: string index_advisor_result?: GetIndexAdvisorResultResponse | null _total_cache_hits?: number _total_cache_misses?: number _count?: number } + +export interface ChartDataPoint { + period_start: number + timestamp: string + query_latency: number + mean_time: number + min_time: number + max_time: number + stddev_time: number + p50_time: number + p95_time: number + rows_read: number + calls: number + cache_hits: number + cache_misses: number +} + +export interface ParsedLogEntry { + timestamp?: string + application_name?: string + calls?: number + database_name?: string + query?: string + query_id?: number + total_exec_time?: number + total_plan_time?: number + user_name?: string + mean_exec_time?: number + mean_plan_time?: number + min_exec_time?: number + max_exec_time?: number + min_plan_time?: number + max_plan_time?: number + p50_exec_time?: number + p95_exec_time?: number + p50_plan_time?: number + p95_plan_time?: number + [key: string]: any +} diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.test.ts b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.test.ts index a2dc1994abf23..fe898f94d90bd 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.test.ts +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.test.ts @@ -1,6 +1,5 @@ import { describe, it, expect } from 'vitest' import { formatDuration } from './QueryPerformance.utils' -import { calculatePercentilesFromHistogram } from './WithMonitor/WithMonitor.utils' describe('formatDuration', () => { it('should format seconds', () => { @@ -23,23 +22,3 @@ describe('formatDuration', () => { expect(formatDuration(90061000)).toBe('1d 1h 1m 1s') }) }) - -describe('calculatePercentilesFromHistogram', () => { - it('should return zero for empty histogram', () => { - const result = calculatePercentilesFromHistogram([]) - expect(result.p95).toBe(0) - }) - - it('should return valid p95 for typical distribution', () => { - const result = calculatePercentilesFromHistogram([10, 20, 30, 20, 10, 10]) - expect(result.p95).toBeGreaterThan(0) - expect(result.p95).toBeGreaterThanOrEqual(result.p50) - }) - - it('should return consistent p95 for same input', () => { - const histogram = [10, 20, 30, 20, 10, 10] - const result1 = calculatePercentilesFromHistogram(histogram) - const result2 = calculatePercentilesFromHistogram(histogram) - expect(result1.p95).toBe(result2.p95) - }) -}) diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.ts b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.ts index 2d4dbff2c1752..386c94fa946d2 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.ts +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformance.utils.ts @@ -28,22 +28,11 @@ export const formatDuration = (milliseconds: number) => { return parts.length > 0 ? parts.join(' ') : '0s' } -export const transformLogsToJSON = (log: string) => { - try { - let jsonString = log.replace('[pg_stat_monitor] ', '') - jsonString = jsonString.replace(/""/g, '","') - const jsonObject = JSON.parse(jsonString) - return jsonObject - } catch (error) { - return null - } -} - export type QueryPerformanceErrorContext = { projectRef?: string databaseIdentifier?: string queryPreset?: string - queryType?: 'hitRate' | 'metrics' | 'mainQuery' | 'monitor' | 'slowQueriesCount' + queryType?: 'hitRate' | 'metrics' | 'mainQuery' | 'slowQueriesCount' | 'supamonitor' sql?: string errorMessage?: string postgresVersion?: string diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceChart.tsx b/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceChart.tsx index a8dc5314b8353..4fcc479a7df23 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceChart.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceChart.tsx @@ -4,8 +4,7 @@ import { QUERY_PERFORMANCE_CHART_TABS } from './QueryPerformance.constants' import { Loader2 } from 'lucide-react' import { ComposedChart } from 'components/ui/Charts/ComposedChart' import type { MultiAttribute } from 'components/ui/Charts/ComposedChart.utils' -import type { ChartDataPoint } from './WithMonitor/WithMonitor.utils' -import { calculatePercentilesFromHistogram } from './WithMonitor/WithMonitor.utils' +import type { ChartDataPoint } from './QueryPerformance.types' interface QueryPerformanceChartProps { dateRange?: { @@ -62,33 +61,11 @@ export const QueryPerformanceChart = ({ switch (selectedMetric) { case 'query_latency': { - let trueP95: number = 0 - - if (parsedLogs && parsedLogs.length > 0) { - const bucketCount = parsedLogs[0]?.resp_calls?.length || 50 - const combinedHistogram = new Array(bucketCount).fill(0) - - parsedLogs.forEach((log) => { - if (log.resp_calls && Array.isArray(log.resp_calls)) { - log.resp_calls.forEach((count: number, index: number) => { - if (index < combinedHistogram.length) { - combinedHistogram[index] += count - } - }) - } - }) - - // [kemal]: this might need a revisit - const percentiles = calculatePercentilesFromHistogram(combinedHistogram) - trueP95 = percentiles.p95 - } else { - // [kemal]: fallback to weighted average - const totalCalls = chartData.reduce((sum, d) => sum + d.calls, 0) - trueP95 = - totalCalls > 0 - ? chartData.reduce((sum, d) => sum + d.p95_time * d.calls, 0) / totalCalls - : 0 - } + const totalCalls = chartData.reduce((sum, d) => sum + d.calls, 0) + const trueP95 = + totalCalls > 0 + ? chartData.reduce((sum, d) => sum + d.p95_time * d.calls, 0) / totalCalls + : 0 return [ { @@ -167,12 +144,7 @@ export const QueryPerformanceChart = ({ >() queryLogs.forEach((log) => { - const timestamps = [log.bucket_start_time, log.bucket, log.timestamp, log.ts] - const validTimestamp = timestamps.find((t) => t && !isNaN(new Date(t).getTime())) - - if (!validTimestamp) return - - const time = new Date(validTimestamp).getTime() + const time = new Date(log.timestamp).getTime() const meanTime = log.mean_time ?? log.mean_exec_time ?? log.mean_query_time ?? 0 const rowsRead = log.rows_read ?? log.rows ?? 0 const calls = log.calls ?? 0 @@ -258,8 +230,14 @@ export const QueryPerformanceChart = ({ const baseAttributes = attributeMap[selectedMetric] || [] - // Add selected query line based on current metric if (currentSelectedQuery && querySpecificData) { + const dimmedBaseAttributes = baseAttributes.map((attr) => ({ + ...attr, + color: attr.color + ? { light: attr.color.light + '4D', dark: attr.color.dark + '4D' } + : attr.color, + })) + const selectedQueryAttributes: Record = { query_latency: { attribute: 'selected_query_time', @@ -297,7 +275,7 @@ export const QueryPerformanceChart = ({ const selectedQueryAttr = selectedQueryAttributes[selectedMetric] if (selectedQueryAttr) { - return [...baseAttributes, selectedQueryAttr] + return [...dimmedBaseAttributes, selectedQueryAttr] } } @@ -360,12 +338,7 @@ export const QueryPerformanceChart = ({ hideHighlightArea={true} showTooltip={true} showGrid={true} - showLegend={ - selectedMetric === 'query_latency' || - selectedMetric === 'cache_hits' || - selectedMetric === 'rows_read' || - selectedMetric === 'calls' - } + showLegend={true} showTotal={false} showMaxValue={false} updateDateRange={updateDateRange} diff --git a/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceGrid.tsx b/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceGrid.tsx index 4dbeada04b8a8..21524688097f1 100644 --- a/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceGrid.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/QueryPerformanceGrid.tsx @@ -34,7 +34,6 @@ import { QueryDetail } from './QueryDetail' import { QueryIndexes } from './QueryIndexes' import { QUERY_PERFORMANCE_COLUMNS, - QUERY_PERFORMANCE_REPORT_TYPES, QUERY_PERFORMANCE_ROLE_DESCRIPTION, } from './QueryPerformance.constants' import { QueryPerformanceRow } from './QueryPerformance.types' @@ -98,7 +97,6 @@ export const QueryPerformanceGrid = ({ const [view, setView] = useState<'details' | 'suggestion'>('details') const [selectedRow, setSelectedRow] = useState() - const reportType = QUERY_PERFORMANCE_REPORT_TYPES.UNIFIED const columns = QUERY_PERFORMANCE_COLUMNS.map((col) => { const nonSortableColumns = ['query'] @@ -341,6 +339,18 @@ export const QueryPerformanceGrid = ({ ) } + if (col.id === 'application_name') { + return ( +
+ {value ? ( +

{value}

+ ) : ( +

+ )} +
+ ) + } + return (

{formattedValue}

diff --git a/apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.utils.ts b/apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.utils.ts deleted file mode 100644 index f65cfb30930e8..0000000000000 --- a/apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.utils.ts +++ /dev/null @@ -1,301 +0,0 @@ -import dayjs from 'dayjs' -import utc from 'dayjs/plugin/utc' -import { transformLogsToJSON } from '../QueryPerformance.utils' -import { QueryPerformanceRow } from '../QueryPerformance.types' - -dayjs.extend(utc) - -export interface ParsedLogEntry { - bucket_start_time?: string - bucket?: string - timestamp?: string - ts?: string - mean_time?: number - mean_exec_time?: number - mean_query_time?: number - min_time?: number - min_exec_time?: number - min_query_time?: number - max_time?: number - max_exec_time?: number - max_query_time?: number - stddev_time?: number - stddev_exec_time?: number - stddev_query_time?: number - rows?: number - calls?: number - shared_blks_hit?: number - shared_blks_read?: number - query?: string - userid?: string - rolname?: string - resp_calls?: number[] - [key: string]: any -} - -export interface ChartDataPoint { - period_start: number - timestamp: string - query_latency: number - mean_time: number - min_time: number - max_time: number - stddev_time: number - p50_time: number - p95_time: number - rows_read: number - calls: number - cache_hits: number - cache_misses: number -} - -export const parsePgStatMonitorLogs = (logData: any[]): ParsedLogEntry[] => { - if (!logData || logData.length === 0) return [] - - const validParsedLogs = logData - .map((log) => ({ - ...log, - parsedEventMessage: transformLogsToJSON(log.event_message), - })) - .filter((log) => log.parsedEventMessage !== null) - .filter((log) => log.parsedEventMessage?.event === 'bucket_query') - - return validParsedLogs.map((log) => log.parsedEventMessage) -} - -export const transformLogsToChartData = (parsedLogs: ParsedLogEntry[]): ChartDataPoint[] => { - if (!parsedLogs || parsedLogs.length === 0) return [] - - // [kemal]: here for debugging - // if (parsedLogs.length > 0) { - // console.log('🟡 Parsed logs:', parsedLogs) - // } - - return parsedLogs - .map((log: ParsedLogEntry) => { - const possibleTimestamps = [log.bucket_start_time, log.bucket, log.timestamp, log.ts] - - let periodStart: number | null = null - - for (const ts of possibleTimestamps) { - if (ts) { - const date = new Date(ts) - const time = date.getTime() - if (!isNaN(time) && time > 0 && time > 946684800000) { - periodStart = time - break - } - } - } - - if (!periodStart) { - return null - } - - const percentiles = - log.resp_calls && Array.isArray(log.resp_calls) - ? calculatePercentilesFromHistogram(log.resp_calls) - : { p50: 0, p95: 0 } - - return { - period_start: periodStart, - timestamp: possibleTimestamps.find((t) => t) || '', - query_latency: parseFloat( - String(log.mean_time ?? log.mean_exec_time ?? log.mean_query_time ?? 0) - ), - mean_time: parseFloat( - String(log.mean_time ?? log.mean_exec_time ?? log.mean_query_time ?? 0) - ), - min_time: parseFloat(String(log.min_time ?? log.min_exec_time ?? log.min_query_time ?? 0)), - max_time: parseFloat(String(log.max_time ?? log.max_exec_time ?? log.max_query_time ?? 0)), - stddev_time: parseFloat( - String(log.stddev_time ?? log.stddev_exec_time ?? log.stddev_query_time ?? 0) - ), - p50_time: percentiles.p50, - p95_time: percentiles.p95, - rows_read: parseInt(String(log.rows ?? 0), 10), - calls: parseInt(String(log.calls ?? 0), 10), - cache_hits: parseFloat(String(log.shared_blks_hit ?? 0)), - cache_misses: parseFloat(String(log.shared_blks_read ?? 0)), - } - }) - .filter((item): item is NonNullable => item !== null) - .sort((a, b) => a.period_start - b.period_start) -} - -const normalizeQuery = (query: string): string => { - return query.replace(/\s+/g, ' ').trim() -} - -export const aggregateLogsByQuery = (parsedLogs: ParsedLogEntry[]): QueryPerformanceRow[] => { - if (!parsedLogs || parsedLogs.length === 0) return [] - - const queryGroups = new Map() - - parsedLogs.forEach((log) => { - const query = normalizeQuery(log.query || '') - if (!query) return - - if (!queryGroups.has(query)) { - queryGroups.set(query, []) - } - queryGroups.get(query)!.push(log) - }) - - const aggregatedData: QueryPerformanceRow[] = [] - let totalExecutionTime = 0 - - const queryStats = Array.from(queryGroups.entries()).map(([query, logs]) => { - const count = logs.length - let totalCalls = 0 - let totalRowsRead = 0 - let totalCacheHits = 0 - let totalCacheMisses = 0 - let rolname = logs[0].username - let minTime = Infinity - let maxTime = -Infinity - let totalExecutionTimeForQuery = 0 - - logs.forEach((log) => { - const logMeanTime = parseFloat( - String(log.mean_time ?? log.mean_exec_time ?? log.mean_query_time ?? 0) - ) - const logMinTime = parseFloat( - String(log.min_time ?? log.min_exec_time ?? log.min_query_time ?? 0) - ) - const logMaxTime = parseFloat( - String(log.max_time ?? log.max_exec_time ?? log.max_query_time ?? 0) - ) - const logCalls = parseInt(String(log.calls ?? 0), 10) - const logRows = parseInt(String(log.rows ?? 0), 10) - const logCacheHits = parseFloat(String(log.shared_blks_hit ?? 0)) - const logCacheMisses = parseFloat(String(log.shared_blks_read ?? 0)) - - minTime = Math.min(minTime, logMinTime) - maxTime = Math.max(maxTime, logMaxTime) - totalCalls += logCalls - totalRowsRead += logRows - totalCacheHits += logCacheHits - totalCacheMisses += logCacheMisses - totalExecutionTimeForQuery += logMeanTime * logCalls - }) - - // Overall mean time is the weighted average - const avgMeanTime = totalCalls > 0 ? totalExecutionTimeForQuery / totalCalls : 0 - const finalMinTime = minTime === Infinity ? 0 : minTime - const finalMaxTime = maxTime === -Infinity ? 0 : maxTime - - totalExecutionTime += totalExecutionTimeForQuery - - return { - query, - rolname, - count, - avgMeanTime, - minTime: finalMinTime, - maxTime: finalMaxTime, - totalCalls, - totalRowsRead, - totalTime: totalExecutionTimeForQuery, - totalCacheHits, - totalCacheMisses, - } - }) - - queryStats.forEach((stats) => { - const totalCacheAccess = stats.totalCacheHits + stats.totalCacheMisses - const cacheHitRate = totalCacheAccess > 0 ? (stats.totalCacheHits / totalCacheAccess) * 100 : 0 - - const propTotalTime = totalExecutionTime > 0 ? (stats.totalTime / totalExecutionTime) * 100 : 0 - - aggregatedData.push({ - query: stats.query, - rolname: stats.rolname, - calls: stats.totalCalls, - mean_time: stats.avgMeanTime, - min_time: stats.minTime, - max_time: stats.maxTime, - total_time: stats.totalTime, - rows_read: stats.totalRowsRead, - cache_hit_rate: cacheHitRate, - prop_total_time: propTotalTime, - index_advisor_result: null, - _total_cache_hits: stats.totalCacheHits, - _total_cache_misses: stats.totalCacheMisses, - _count: stats.count, - }) - }) - - return aggregatedData.sort((a, b) => b.total_time - a.total_time) -} - -export const calculatePercentilesFromHistogram = ( - respCalls: number[] -): { - p50: number - p95: number -} => { - const bucketBoundaries = [ - { min: 0, max: 1 }, - { min: 1, max: 10 }, - { min: 10, max: 100 }, - { min: 100, max: 1000 }, - { min: 1000, max: 10000 }, - { min: 10000, max: 100000 }, - ] - - const totalCalls = respCalls.reduce((sum, count) => sum + count, 0) - - if (totalCalls === 0) { - return { p50: 0, p95: 0 } - } - - const distribution: { - minValue: number - maxValue: number - cumulativeCount: number - count: number - }[] = [] - let cumulativeCount = 0 - - respCalls.forEach((count, index) => { - if (count > 0 && index < bucketBoundaries.length) { - const bucket = bucketBoundaries[index] - cumulativeCount += count - distribution.push({ - minValue: bucket.min, - maxValue: bucket.max, - cumulativeCount, - count, - }) - } - }) - - const getPercentile = (percentile: number): number => { - const targetCount = totalCalls * percentile - - for (let i = 0; i < distribution.length; i++) { - const prevCumulativeCount = i > 0 ? distribution[i - 1].cumulativeCount : 0 - - if (distribution[i].cumulativeCount >= targetCount) { - const positionInBucket = (targetCount - prevCumulativeCount) / distribution[i].count - const bucketMin = distribution[i].minValue - const bucketMax = distribution[i].maxValue - const logMin = Math.log10(Math.max(bucketMin, 0.1)) - const logMax = Math.log10(bucketMax) - const logValue = logMin + positionInBucket * (logMax - logMin) - - return Math.pow(10, logValue) - } - } - - return distribution[distribution.length - 1]?.maxValue || 0 - } - - const result = { - p50: getPercentile(0.5), - p95: getPercentile(0.95), - } - - return result -} diff --git a/apps/studio/components/interfaces/QueryPerformance/WithStatements/WithStatements.utils.test.ts b/apps/studio/components/interfaces/QueryPerformance/WithStatements/WithStatements.utils.test.ts new file mode 100644 index 0000000000000..26e0d4c4575b1 --- /dev/null +++ b/apps/studio/components/interfaces/QueryPerformance/WithStatements/WithStatements.utils.test.ts @@ -0,0 +1,148 @@ +import { describe, it, expect, vi } from 'vitest' +import { transformStatementDataToRows } from './WithStatements.utils' + +vi.mock('../IndexAdvisor/index-advisor.utils', () => ({ + filterProtectedSchemaIndexAdvisorResult: vi.fn((result) => { + if (result?._mock_filter_null) return null + return result + }), + queryInvolvesProtectedSchemas: vi.fn((query: string) => { + return query?.toLowerCase().includes('auth.') + }), +})) + +const makeRow = (overrides: Record = {}) => ({ + query: 'SELECT 1', + rolname: 'postgres', + calls: 10, + mean_time: 5.0, + min_time: 1.0, + max_time: 20.0, + total_time: 50.0, + rows_read: 100, + cache_hit_rate: 0.95, + index_advisor_result: null, + ...overrides, +}) + +describe('transformStatementDataToRows', () => { + it('returns empty array for null or empty input', () => { + expect(transformStatementDataToRows(null as any)).toEqual([]) + expect(transformStatementDataToRows([])).toEqual([]) + }) + + it('transforms basic rows correctly', () => { + const data = [makeRow()] + const result = transformStatementDataToRows(data) + + expect(result).toHaveLength(1) + expect(result[0]).toMatchObject({ + query: 'SELECT 1', + rolname: 'postgres', + calls: 10, + mean_time: 5.0, + min_time: 1.0, + max_time: 20.0, + total_time: 50.0, + rows_read: 100, + cache_hit_rate: 0.95, + }) + }) + + it('defaults missing numeric fields to 0', () => { + const data = [{ query: 'SELECT 1' }] + const result = transformStatementDataToRows(data) + + expect(result).toHaveLength(1) + expect(result[0].calls).toBe(0) + expect(result[0].mean_time).toBe(0) + expect(result[0].min_time).toBe(0) + expect(result[0].max_time).toBe(0) + expect(result[0].total_time).toBe(0) + expect(result[0].rows_read).toBe(0) + expect(result[0].cache_hit_rate).toBe(0) + }) + + it('sets rolname to undefined when missing', () => { + const data = [makeRow({ rolname: undefined })] + const result = transformStatementDataToRows(data) + expect(result[0].rolname).toBeUndefined() + }) + + it('calculates prop_total_time as percentage of total time', () => { + const data = [ + makeRow({ query: 'Q1', total_time: 75 }), + makeRow({ query: 'Q2', total_time: 25 }), + ] + const result = transformStatementDataToRows(data) + + expect(result[0].prop_total_time).toBe(75) + expect(result[1].prop_total_time).toBe(25) + }) + + it('handles prop_total_time when total is zero', () => { + const data = [makeRow({ total_time: 0 })] + const result = transformStatementDataToRows(data) + expect(result[0].prop_total_time).toBe(0) + }) + + it('applies index_advisor_result filtering', () => { + const data = [ + makeRow({ + index_advisor_result: { index_statements: ['CREATE INDEX ON public.users (id)'] }, + }), + ] + const result = transformStatementDataToRows(data) + + expect(result[0].index_advisor_result).toEqual({ + index_statements: ['CREATE INDEX ON public.users (id)'], + }) + }) + + it('sets index_advisor_result to null when source is null', () => { + const data = [makeRow({ index_advisor_result: null })] + const result = transformStatementDataToRows(data) + expect(result[0].index_advisor_result).toBeNull() + }) + + describe('filterIndexAdvisor mode', () => { + it('keeps rows for non-protected schema queries', () => { + const data = [makeRow({ query: 'SELECT * FROM public.users' })] + const result = transformStatementDataToRows(data, true) + expect(result).toHaveLength(1) + }) + + it('keeps protected-schema rows that have valid recommendations', () => { + const data = [ + makeRow({ + query: 'SELECT * FROM auth.users', + index_advisor_result: { index_statements: ['CREATE INDEX ON auth.users (id)'] }, + }), + ] + const result = transformStatementDataToRows(data, true) + expect(result).toHaveLength(1) + }) + + it('filters out protected-schema rows with no valid recommendations', () => { + const data = [ + makeRow({ + query: 'SELECT * FROM auth.users', + index_advisor_result: { _mock_filter_null: true }, + }), + ] + const result = transformStatementDataToRows(data, true) + expect(result).toHaveLength(0) + }) + + it('does not filter protected-schema rows when filterIndexAdvisor is false', () => { + const data = [ + makeRow({ + query: 'SELECT * FROM auth.users', + index_advisor_result: { _mock_filter_null: true }, + }), + ] + const result = transformStatementDataToRows(data, false) + expect(result).toHaveLength(1) + }) + }) +}) diff --git a/apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.tsx b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.tsx similarity index 82% rename from apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.tsx rename to apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.tsx index dce6acb7bb8fc..30bcd6925805a 100644 --- a/apps/studio/components/interfaces/QueryPerformance/WithMonitor/WithMonitor.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.tsx @@ -6,12 +6,12 @@ import { useMemo, useState, useEffect } from 'react' import dayjs from 'dayjs' import utc from 'dayjs/plugin/utc' import useLogsQuery from 'hooks/analytics/useLogsQuery' -import { getPgStatMonitorLogsQuery } from '../QueryPerformance.constants' +import { getSupamonitorLogsQuery } from '../QueryPerformance.constants' import { - parsePgStatMonitorLogs, + parseSupamonitorLogs, transformLogsToChartData, aggregateLogsByQuery, -} from './WithMonitor.utils' +} from './WithSupamonitor.utils' import { useParams } from 'common' import { DownloadResultsButton } from 'components/ui/DownloadResultsButton' import { captureQueryPerformanceError } from '../QueryPerformance.utils' @@ -21,7 +21,7 @@ import { getErrorMessage } from 'lib/get-error-message' dayjs.extend(utc) -interface WithMonitorProps { +interface WithSupamonitorProps { dateRange?: { period_start: { date: string; time_period: string } period_end: { date: string; time_period: string } @@ -30,13 +30,12 @@ interface WithMonitorProps { onDateRangeChange?: (from: string, to: string) => void } -export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) => { +export const WithSupamonitor = ({ dateRange, onDateRangeChange }: WithSupamonitorProps) => { const { ref } = useParams() const { data: project } = useSelectedProjectQuery() const state = useDatabaseSelectorStateSnapshot() const [selectedQuery, setSelectedQuery] = useState(null) - // [kemal]: Fetch pg_stat_monitor logs. This will need to change when we move to the actual extension. const effectiveDateRange = useMemo(() => { if (dateRange) { return { @@ -45,7 +44,6 @@ export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) } } - // [kemal]: Fallback to default 24 hours const end = dayjs.utc() const start = end.subtract(24, 'hours') return { @@ -55,30 +53,33 @@ export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) }, [dateRange]) const queryWithTimeRange = useMemo(() => { - return getPgStatMonitorLogsQuery( + return getSupamonitorLogsQuery( effectiveDateRange.iso_timestamp_start, effectiveDateRange.iso_timestamp_end ) }, [effectiveDateRange]) - const pgStatMonitorLogs = useLogsQuery(ref as string, { + const supamonitorLogs = useLogsQuery(ref as string, { sql: queryWithTimeRange, iso_timestamp_start: effectiveDateRange.iso_timestamp_start, iso_timestamp_end: effectiveDateRange.iso_timestamp_end, }) - const { logData, isLoading: isLogsLoading, error: logsError } = pgStatMonitorLogs + const { logData, isLoading: isLogsLoading, error: logsError } = supamonitorLogs const parsedLogs = useMemo(() => { - return parsePgStatMonitorLogs(logData || []) + const result = parseSupamonitorLogs(logData || []) + return result }, [logData]) const chartData = useMemo(() => { - return transformLogsToChartData(parsedLogs) + const result = transformLogsToChartData(parsedLogs) + return result }, [parsedLogs]) const aggregatedGridData = useMemo(() => { - return aggregateLogsByQuery(parsedLogs) + const result = aggregateLogsByQuery(parsedLogs) + return result }, [parsedLogs]) const handleSelectQuery = (query: string) => { @@ -86,7 +87,7 @@ export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) } const handleRetry = () => { - pgStatMonitorLogs.runQuery() + supamonitorLogs.runQuery() } useEffect(() => { @@ -95,8 +96,8 @@ export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) captureQueryPerformanceError(logsError, { projectRef: ref, databaseIdentifier: state.selectedDatabaseId, - queryPreset: 'pg_stat_monitor', - queryType: 'monitor', + queryPreset: 'supamonitor', + queryType: 'supamonitor', postgresVersion: project?.dbVersion, databaseType: state.selectedDatabaseId === ref ? 'primary' : 'read-replica', sql: queryWithTimeRange, @@ -120,7 +121,7 @@ export const WithMonitor = ({ dateRange, onDateRangeChange }: WithMonitorProps) actions={ } diff --git a/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.test.ts b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.test.ts new file mode 100644 index 0000000000000..802f65f722ad8 --- /dev/null +++ b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.test.ts @@ -0,0 +1,306 @@ +import { describe, it, expect } from 'vitest' +import { + parseSupamonitorLogs, + transformLogsToChartData, + aggregateLogsByQuery, +} from './WithSupamonitor.utils' +import { ParsedLogEntry } from '../QueryPerformance.types' + +const makeSampleLog = (overrides: Partial = {}): any => ({ + timestamp: '2025-01-01T00:00:00Z', + application_name: 'test_app', + calls: 10, + database_name: 'test_db', + query: 'SELECT 1', + query_id: 1, + total_exec_time: 100, + total_plan_time: 20, + user_name: 'postgres', + mean_exec_time: 10, + mean_plan_time: 2, + min_exec_time: 1, + max_exec_time: 50, + min_plan_time: 0.5, + max_plan_time: 5, + p50_exec_time: 8, + p95_exec_time: 40, + p50_plan_time: 1.5, + p95_plan_time: 4, + ...overrides, +}) + +describe('parseSupamonitorLogs', () => { + it('returns empty array for null or empty input', () => { + expect(parseSupamonitorLogs(null as any)).toEqual([]) + expect(parseSupamonitorLogs([])).toEqual([]) + }) + + it('parses log entries preserving all fields', () => { + const raw = [makeSampleLog()] + const result = parseSupamonitorLogs(raw) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + timestamp: '2025-01-01T00:00:00Z', + application_name: 'test_app', + calls: 10, + database_name: 'test_db', + query: 'SELECT 1', + query_id: 1, + total_exec_time: 100, + total_plan_time: 20, + user_name: 'postgres', + mean_exec_time: 10, + mean_plan_time: 2, + min_exec_time: 1, + max_exec_time: 50, + min_plan_time: 0.5, + max_plan_time: 5, + p50_exec_time: 8, + p95_exec_time: 40, + p50_plan_time: 1.5, + p95_plan_time: 4, + }) + }) + + it('handles multiple log entries', () => { + const raw = [makeSampleLog(), makeSampleLog({ query: 'SELECT 2', query_id: 2 })] + const result = parseSupamonitorLogs(raw) + expect(result).toHaveLength(2) + }) +}) + +describe('transformLogsToChartData', () => { + it('returns empty array for null or empty input', () => { + expect(transformLogsToChartData(null as any)).toEqual([]) + expect(transformLogsToChartData([])).toEqual([]) + }) + + it('filters out entries with no timestamp', () => { + const logs: ParsedLogEntry[] = [{ query: 'SELECT 1', calls: 5 }] + const result = transformLogsToChartData(logs) + expect(result).toEqual([]) + }) + + it('filters out entries with invalid timestamp', () => { + const logs: ParsedLogEntry[] = [{ timestamp: 'not-a-date', calls: 5 }] + const result = transformLogsToChartData(logs) + expect(result).toEqual([]) + }) + + it('transforms a valid log entry into a chart data point', () => { + const logs: ParsedLogEntry[] = [ + { + timestamp: '2025-01-01T00:00:00Z', + mean_exec_time: 10, + mean_plan_time: 2, + min_exec_time: 1, + max_exec_time: 50, + min_plan_time: 0.5, + max_plan_time: 5, + p50_exec_time: 8, + p95_exec_time: 40, + p50_plan_time: 1.5, + p95_plan_time: 4, + calls: 10, + }, + ] + + const result = transformLogsToChartData(logs) + + expect(result).toHaveLength(1) + expect(result[0]).toEqual({ + period_start: new Date('2025-01-01T00:00:00Z').getTime(), + timestamp: '2025-01-01T00:00:00Z', + query_latency: 12, // 10 + 2 + mean_time: 10, + min_time: 1.5, // 1 + 0.5 + max_time: 55, // 50 + 5 + stddev_time: 0, + p50_time: 9.5, // 8 + 1.5 + p95_time: 44, // 40 + 4 + rows_read: 0, + calls: 10, + cache_hits: 0, + cache_misses: 0, + }) + }) + + it('defaults missing numeric fields to 0', () => { + const logs: ParsedLogEntry[] = [ + { + timestamp: '2025-06-01T12:00:00Z', + }, + ] + + const result = transformLogsToChartData(logs) + + expect(result).toHaveLength(1) + expect(result[0].query_latency).toBe(0) + expect(result[0].calls).toBe(0) + expect(result[0].min_time).toBe(0) + expect(result[0].max_time).toBe(0) + }) + + it('sorts results by period_start ascending', () => { + const logs: ParsedLogEntry[] = [ + { timestamp: '2025-01-03T00:00:00Z', mean_exec_time: 1 }, + { timestamp: '2025-01-01T00:00:00Z', mean_exec_time: 2 }, + { timestamp: '2025-01-02T00:00:00Z', mean_exec_time: 3 }, + ] + + const result = transformLogsToChartData(logs) + + expect(result).toHaveLength(3) + expect(result[0].timestamp).toBe('2025-01-01T00:00:00Z') + expect(result[1].timestamp).toBe('2025-01-02T00:00:00Z') + expect(result[2].timestamp).toBe('2025-01-03T00:00:00Z') + }) +}) + +describe('aggregateLogsByQuery', () => { + it('returns empty array for null or empty input', () => { + expect(aggregateLogsByQuery(null as any)).toEqual([]) + expect(aggregateLogsByQuery([])).toEqual([]) + }) + + it('skips entries with empty or whitespace-only queries', () => { + const logs: ParsedLogEntry[] = [ + { query: '', calls: 5 }, + { query: ' ', calls: 3 }, + ] + const result = aggregateLogsByQuery(logs) + expect(result).toEqual([]) + }) + + it('aggregates a single log entry correctly', () => { + const logs: ParsedLogEntry[] = [ + { + query: 'SELECT 1', + user_name: 'postgres', + application_name: 'app', + calls: 10, + total_exec_time: 100, + total_plan_time: 20, + min_exec_time: 1, + max_exec_time: 50, + min_plan_time: 0.5, + max_plan_time: 5, + }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result).toHaveLength(1) + expect(result[0].query).toBe('SELECT 1') + expect(result[0].rolname).toBe('postgres') + expect(result[0].application_name).toBe('app') + expect(result[0].calls).toBe(10) + expect(result[0].total_time).toBe(120) + expect(result[0].mean_time).toBe(12) + expect(result[0].min_time).toBe(1.5) + expect(result[0].max_time).toBe(55) + expect(result[0].prop_total_time).toBe(100) + }) + + it('aggregates multiple entries for the same query', () => { + const logs: ParsedLogEntry[] = [ + { + query: 'SELECT 1', + user_name: 'postgres', + calls: 5, + total_exec_time: 50, + total_plan_time: 10, + min_exec_time: 2, + max_exec_time: 20, + min_plan_time: 1, + max_plan_time: 3, + }, + { + query: 'SELECT 1', + user_name: 'postgres', + calls: 10, + total_exec_time: 100, + total_plan_time: 20, + min_exec_time: 1, + max_exec_time: 50, + min_plan_time: 0.5, + max_plan_time: 5, + }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result).toHaveLength(1) + expect(result[0].calls).toBe(15) // 5 + 10 + expect(result[0].total_time).toBe(180) // (50+10) + (100+20) + expect(result[0].mean_time).toBe(12) // 180 / 15 + expect(result[0].min_time).toBe(1.5) // min(2+1, 1+0.5) = 1.5 + expect(result[0].max_time).toBe(55) // max(20+3, 50+5) = 55 + expect(result[0]._count).toBe(2) // 2 log entries + }) + + it('normalizes whitespace differences in queries', () => { + const logs: ParsedLogEntry[] = [ + { query: 'SELECT 1', calls: 5, total_exec_time: 50, total_plan_time: 0 }, + { query: 'SELECT 1', calls: 3, total_exec_time: 30, total_plan_time: 0 }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result).toHaveLength(1) + expect(result[0].calls).toBe(8) + }) + + it('sorts results by total_time descending', () => { + const logs: ParsedLogEntry[] = [ + { query: 'SELECT 1', calls: 1, total_exec_time: 10, total_plan_time: 0 }, + { query: 'SELECT 2', calls: 1, total_exec_time: 100, total_plan_time: 0 }, + { query: 'SELECT 3', calls: 1, total_exec_time: 50, total_plan_time: 0 }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result).toHaveLength(3) + expect(result[0].query).toBe('SELECT 2') + expect(result[1].query).toBe('SELECT 3') + expect(result[2].query).toBe('SELECT 1') + }) + + it('calculates prop_total_time as percentage of total execution', () => { + const logs: ParsedLogEntry[] = [ + { query: 'SELECT 1', calls: 1, total_exec_time: 75, total_plan_time: 0 }, + { query: 'SELECT 2', calls: 1, total_exec_time: 25, total_plan_time: 0 }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result[0].prop_total_time).toBe(75) + expect(result[1].prop_total_time).toBe(25) + }) + + it('handles zero calls gracefully (mean_time defaults to 0)', () => { + const logs: ParsedLogEntry[] = [ + { query: 'SELECT 1', calls: 0, total_exec_time: 100, total_plan_time: 0 }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result).toHaveLength(1) + expect(result[0].mean_time).toBe(0) + }) + + it('sets static fields correctly', () => { + const logs: ParsedLogEntry[] = [ + { query: 'SELECT 1', calls: 1, total_exec_time: 10, total_plan_time: 0 }, + ] + + const result = aggregateLogsByQuery(logs) + + expect(result[0].rows_read).toBe(0) + expect(result[0].cache_hit_rate).toBe(0) + expect(result[0].index_advisor_result).toBeNull() + expect(result[0]._total_cache_hits).toBe(0) + expect(result[0]._total_cache_misses).toBe(0) + }) +}) diff --git a/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.ts b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.ts new file mode 100644 index 0000000000000..73711e9be8e93 --- /dev/null +++ b/apps/studio/components/interfaces/QueryPerformance/WithSupamonitor/WithSupamonitor.utils.ts @@ -0,0 +1,147 @@ +import { QueryPerformanceRow, ChartDataPoint, ParsedLogEntry } from '../QueryPerformance.types' + +export function parseSupamonitorLogs(logData: any[]): ParsedLogEntry[] { + if (!logData || logData.length === 0) return [] + + return logData.map((log) => ({ + timestamp: log.timestamp, + application_name: log.application_name, + calls: log.calls, + database_name: log.database_name, + query: log.query, + query_id: log.query_id, + total_exec_time: log.total_exec_time, + total_plan_time: log.total_plan_time, + user_name: log.user_name, + mean_exec_time: log.mean_exec_time, + mean_plan_time: log.mean_plan_time, + min_exec_time: log.min_exec_time, + max_exec_time: log.max_exec_time, + min_plan_time: log.min_plan_time, + max_plan_time: log.max_plan_time, + p50_exec_time: log.p50_exec_time, + p95_exec_time: log.p95_exec_time, + p50_plan_time: log.p50_plan_time, + p95_plan_time: log.p95_plan_time, + })) +} + +export function transformLogsToChartData(parsedLogs: ParsedLogEntry[]): ChartDataPoint[] { + if (!parsedLogs || parsedLogs.length === 0) return [] + + return parsedLogs + .map((log: ParsedLogEntry) => { + if (!log.timestamp) return null + + const periodStart = new Date(log.timestamp).getTime() + if (isNaN(periodStart)) return null + + const meanExecTime = parseFloat(String(log.mean_exec_time ?? 0)) + const meanPlanTime = parseFloat(String(log.mean_plan_time ?? 0)) + const calls = parseInt(String(log.calls ?? 0), 10) + + return { + period_start: periodStart, + timestamp: log.timestamp, + query_latency: meanExecTime + meanPlanTime, + mean_time: meanExecTime, + min_time: (log.min_exec_time ?? 0) + (log.min_plan_time ?? 0), + max_time: (log.max_exec_time ?? 0) + (log.max_plan_time ?? 0), + stddev_time: 0, + p50_time: (log.p50_exec_time ?? 0) + (log.p50_plan_time ?? 0), + p95_time: (log.p95_exec_time ?? 0) + (log.p95_plan_time ?? 0), + rows_read: 0, + calls, + cache_hits: 0, + cache_misses: 0, + } + }) + .filter((item): item is NonNullable => item !== null) + .sort((a, b) => a.period_start - b.period_start) +} + +function normalizeQuery(query: string): string { + return query.replace(/\s+/g, ' ').trim() +} + +export function aggregateLogsByQuery(parsedLogs: ParsedLogEntry[]): QueryPerformanceRow[] { + if (!parsedLogs || parsedLogs.length === 0) return [] + + const queryGroups = new Map() + + parsedLogs.forEach((log) => { + const query = normalizeQuery(log.query || '') + if (!query) return + + if (!queryGroups.has(query)) { + queryGroups.set(query, []) + } + queryGroups.get(query)!.push(log) + }) + + const aggregatedData: QueryPerformanceRow[] = [] + let totalExecutionTime = 0 + + const queryStats = Array.from(queryGroups.entries()).map(([query, logs]) => { + const count = logs.length + let totalCalls = 0 + let totalExecTime = 0 + let totalPlanTime = 0 + let minTime = Infinity + let maxTime = -Infinity + const rolname = logs[0]?.user_name || '' + const applicationName = logs[0]?.application_name || '' + + logs.forEach((log) => { + const logCalls = parseInt(String(log.calls ?? 0), 10) + totalCalls += logCalls + totalExecTime += parseFloat(String(log.total_exec_time ?? 0)) + totalPlanTime += parseFloat(String(log.total_plan_time ?? 0)) + minTime = Math.min(minTime, (log.min_exec_time ?? 0) + (log.min_plan_time ?? 0)) + maxTime = Math.max(maxTime, (log.max_exec_time ?? 0) + (log.max_plan_time ?? 0)) + }) + + const totalTime = totalExecTime + totalPlanTime + const avgMeanTime = totalCalls > 0 ? totalTime / totalCalls : 0 + const finalMinTime = minTime === Infinity ? 0 : minTime + const finalMaxTime = maxTime === -Infinity ? 0 : maxTime + + totalExecutionTime += totalTime + + return { + query, + rolname, + applicationName, + count, + avgMeanTime, + minTime: finalMinTime, + maxTime: finalMaxTime, + totalCalls, + totalTime, + } + }) + + queryStats.forEach((stats) => { + const propTotalTime = totalExecutionTime > 0 ? (stats.totalTime / totalExecutionTime) * 100 : 0 + + aggregatedData.push({ + query: stats.query, + rolname: stats.rolname, + application_name: stats.applicationName, + calls: stats.totalCalls, + mean_time: stats.avgMeanTime, + min_time: stats.minTime, + max_time: stats.maxTime, + total_time: stats.totalTime, + rows_read: 0, + cache_hit_rate: 0, + prop_total_time: propTotalTime, + index_advisor_result: null, + _total_cache_hits: 0, + _total_cache_misses: 0, + _count: stats.count, + }) + }) + + return aggregatedData.sort((a, b) => b.total_time - a.total_time) +} diff --git a/apps/studio/components/interfaces/QueryPerformance/hooks/useSupamonitorStatus.ts b/apps/studio/components/interfaces/QueryPerformance/hooks/useSupamonitorStatus.ts new file mode 100644 index 0000000000000..e525668a31aa0 --- /dev/null +++ b/apps/studio/components/interfaces/QueryPerformance/hooks/useSupamonitorStatus.ts @@ -0,0 +1,18 @@ +import { useSupamonitorEnabledQuery } from 'data/database/supamonitor-enabled-query' +import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject' + +/** + * Hook to check if supamonitor is enabled in shared_preload_libraries + */ +export function useSupamonitorStatus() { + const { data: project } = useSelectedProjectQuery() + const { data: isSupamonitorEnabled, isLoading } = useSupamonitorEnabledQuery({ + projectRef: project?.ref, + connectionString: project?.connectionString, + }) + + return { + isSupamonitorEnabled: isSupamonitorEnabled ?? false, + isLoading, + } +} diff --git a/apps/studio/data/database/keys.ts b/apps/studio/data/database/keys.ts index 29dc8b52da2df..941609fe0f0cf 100644 --- a/apps/studio/data/database/keys.ts +++ b/apps/studio/data/database/keys.ts @@ -46,4 +46,6 @@ export const databaseKeys = { schema: string | undefined, table: string | undefined ) => ['projects', projectRef, 'table-index-advisor', schema, table] as const, + supamonitorEnabled: (projectRef: string | undefined) => + ['projects', projectRef, 'supamonitor-enabled'] as const, } diff --git a/apps/studio/data/database/supamonitor-enabled-query.ts b/apps/studio/data/database/supamonitor-enabled-query.ts new file mode 100644 index 0000000000000..0d537e449ede5 --- /dev/null +++ b/apps/studio/data/database/supamonitor-enabled-query.ts @@ -0,0 +1,47 @@ +import { useQuery } from '@tanstack/react-query' + +import { executeSql } from 'data/sql/execute-sql-query' +import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject' +import { PROJECT_STATUS } from 'lib/constants' +import type { ResponseError, UseCustomQueryOptions } from 'types' +import { databaseKeys } from './keys' + +export type SupamonitorEnabledVariables = { + projectRef?: string + connectionString?: string | null +} + +export async function getSupamonitorEnabled({ + projectRef, + connectionString, +}: SupamonitorEnabledVariables) { + const { result } = await executeSql<{ libraries: string }[]>({ + projectRef, + connectionString, + sql: `SELECT current_setting('shared_preload_libraries', true) AS libraries`, + }) + + const libraries = result[0]?.libraries ?? '' + return libraries.split(',').some((lib) => lib.trim() === 'supamonitor') +} + +export type SupamonitorEnabledData = Awaited> +export type SupamonitorEnabledError = ResponseError + +export const useSupamonitorEnabledQuery = ( + { projectRef, connectionString }: SupamonitorEnabledVariables, + { + enabled = true, + ...options + }: UseCustomQueryOptions = {} +) => { + const { data: project } = useSelectedProjectQuery() + const isActive = project?.status === PROJECT_STATUS.ACTIVE_HEALTHY + + return useQuery({ + queryKey: databaseKeys.supamonitorEnabled(projectRef), + queryFn: () => getSupamonitorEnabled({ projectRef, connectionString }), + enabled: enabled && typeof projectRef !== 'undefined' && isActive, + ...options, + }) +} diff --git a/apps/studio/pages/project/[ref]/observability/query-performance.tsx b/apps/studio/pages/project/[ref]/observability/query-performance.tsx index e51dec02a3a4b..22a5b54b6bebe 100644 --- a/apps/studio/pages/project/[ref]/observability/query-performance.tsx +++ b/apps/studio/pages/project/[ref]/observability/query-performance.tsx @@ -3,6 +3,7 @@ import { NumericFilter } from 'components/interfaces/Reports/v2/ReportsNumericFi import { useParams } from 'common' import { useIndexAdvisorStatus } from 'components/interfaces/QueryPerformance/hooks/useIsIndexAdvisorStatus' +import { useSupamonitorStatus } from 'components/interfaces/QueryPerformance/hooks/useSupamonitorStatus' import { useQueryPerformanceSort } from 'components/interfaces/QueryPerformance/hooks/useQueryPerformanceSort' import { QueryPerformance } from 'components/interfaces/QueryPerformance/QueryPerformance' import { @@ -27,6 +28,7 @@ const QueryPerformanceReport: NextPageWithLayout = () => { const { ref } = useParams() const { data: project, isLoading: isLoadingProject } = useSelectedProjectQuery() const { isIndexAdvisorEnabled } = useIndexAdvisorStatus() + const { isSupamonitorEnabled } = useSupamonitorStatus() const { sort: sortConfig } = useQueryPerformanceSort() const { @@ -76,8 +78,6 @@ const QueryPerformanceReport: NextPageWithLayout = () => { filterIndexAdvisor: indexAdvisor === 'true', }) - const isPgStatMonitorEnabled = project?.dbVersion === '17.4.1.076-psml-1' - if (!isLoadingProject && !project) { return (
@@ -99,7 +99,7 @@ const QueryPerformanceReport: NextPageWithLayout = () => { href={`${DOCS_URL}/guides/platform/performance#examining-query-performance`} /> - {isPgStatMonitorEnabled && ( + {isSupamonitorEnabled && ( { queryHitRate={queryHitRate} queryPerformanceQuery={queryPerformanceQuery} queryMetrics={queryMetrics} - isPgStatMonitorEnabled={isPgStatMonitorEnabled} + isSupamonitorEnabled={isSupamonitorEnabled} dateRange={selectedDateRange} onDateRangeChange={updateDateRange} /> diff --git a/supabase/functions/common/database-types.ts b/supabase/functions/common/database-types.ts deleted file mode 100644 index e69de29bb2d1d..0000000000000