diff --git a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts index 69dd8d4d5d53f..dfbd3ef9d71e9 100644 --- a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts +++ b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts @@ -2480,7 +2480,17 @@ export const platform: NavMenuConstant = { { name: 'Custom Domains', url: '/guides/platform/custom-domains' }, { name: 'Database Backups', url: '/guides/platform/backups' }, { name: 'IPv4 Address', url: '/guides/platform/ipv4-address' }, - { name: 'Read Replicas', url: '/guides/platform/read-replicas' }, + { + name: 'Read Replicas', + url: '/guides/platform/read-replicas', + items: [ + { name: 'Overview', url: '/guides/platform/read-replicas' as `/${string}` }, + { + name: 'Getting started', + url: '/guides/platform/read-replicas/getting-started' as `/${string}`, + }, + ], + }, ], }, { @@ -2832,6 +2842,8 @@ export const self_hosting: NavMenuConstant = { items: [ { name: 'Enabling MCP server', url: '/guides/self-hosting/enable-mcp' }, { name: 'Restore from Platform', url: '/guides/self-hosting/restore-from-platform' }, + { name: 'Configure S3 Storage', url: '/guides/self-hosting/self-hosted-s3' }, + { name: 'Copy Storage from Platform', url: '/guides/self-hosting/copy-from-platform-s3' }, ], }, { diff --git a/apps/docs/content/guides/auth/auth-email-templates.mdx b/apps/docs/content/guides/auth/auth-email-templates.mdx index 291ab4e0aef56..b548594f7fc3b 100644 --- a/apps/docs/content/guides/auth/auth-email-templates.mdx +++ b/apps/docs/content/guides/auth/auth-email-templates.mdx @@ -39,7 +39,7 @@ The templating system provides the following variables for use: | `{{ .SiteURL }}` | Contains your application's Site URL. This can be configured in your project's [authentication settings](/dashboard/project/_/auth/url-configuration). | | `{{ .RedirectTo }}` | Contains the redirect URL passed when `signUp`, `signInWithOtp`, `signInWithOAuth`, `resetPasswordForEmail` or `inviteUserByEmail` is called. The redirect URL allow list can be configured in your project's [authentication settings](/dashboard/project/_/auth/url-configuration). | | `{{ .Data }}` | Contains metadata from `auth.users.user_metadata`. Use this to personalize the email message. | -| `{{ .Email }}` | Contains the original email address of the user. Empty when when trying to [link an email address to an anonymous user](/docs/guides/auth/auth-anonymous#link-an-email--phone-identity). | +| `{{ .Email }}` | Contains the original email address of the user. Empty when trying to [link an email address to an anonymous user](/docs/guides/auth/auth-anonymous#link-an-email--phone-identity). | | `{{ .NewEmail }}` | Contains the new email address of the user. This variable is only supported in the "Change email address" template. | | `{{ .OldEmail }}` | Contains the old email address of the user. This variable is only supported in the "Email address changed notification" template. | | `{{ .Phone }}` | Contains the new phone number of the user. This variable is only supported in the "Phone number changed notification" template. | diff --git a/apps/docs/content/guides/database/custom-postgres-config.mdx b/apps/docs/content/guides/database/custom-postgres-config.mdx index 2d9d2b6363d14..22c1dceae6629 100644 --- a/apps/docs/content/guides/database/custom-postgres-config.mdx +++ b/apps/docs/content/guides/database/custom-postgres-config.mdx @@ -55,7 +55,7 @@ Some settings can only be modified by a superuser. Supabase pre-enables the [`su | `log_lock_waits` | Controls whether a log message is produced when a session waits longer than [deadlock_timeout](https://www.postgresql.org/docs/current/runtime-config-locks.html#GUC-DEADLOCK-TIMEOUT) to acquire a lock. | | `log_min_duration_statement` | Causes the duration of each completed statement to be logged if the statement ran for at least the specified amount of time. | | `log_min_messages` | Minimum severity level of messages to log. | -| `log_parameter_max_length` | Sets the maximum length in bytes of data logged for bind parameter values when logging statements. | +| `log_parameter_max_length` | Sets the maximum length in bytes of data logged for bind parameter values when logging statements. | | `log_replication_commands` | Logs all replication commands | | `log_statement` | Controls which SQL statements are logged. Valid values are `none` (off), `ddl`, `mod`, and `all` (all statements). | | `log_temp_files` | Controls logging of temporary file names and sizes. | @@ -65,9 +65,9 @@ Some settings can only be modified by a superuser. Supabase pre-enables the [`su | `pgaudit.*` | Configures the [PGAudit extension](/docs/guides/database/extensions/pgaudit). The `log_parameter` is still restricted to protect secrets | | `pgrst.*` | [`PostgREST` settings](https://docs.postgrest.org/en/stable/references/configuration.html#db-aggregates-enabled) | | `plan_filter.*` | Configures the [pg_plan_filter extension](/docs/guides/database/extensions/pg_plan_filter) | -| `safeupdate.enabled` | Enables the [safeupdate extension](https://github.com/eradman/pg-safeupdate), which requires a `WHERE` clause on `UPDATE` and `DELETE` statements. | +| `safeupdate.enabled` | Enables the [safeupdate extension](https://github.com/eradman/pg-safeupdate), which requires a `WHERE` clause on `UPDATE` and `DELETE` statements. | | `session_replication_role` | Sets the session's behavior for triggers and rewrite rules. | -| `track_functions` | Controls whether function call counts and timing are tracked. Valid values are `none`, `pl` (only procedural-language functions), and `all`. | +| `track_functions` | Controls whether function call counts and timing are tracked. Valid values are `none`, `pl` (only procedural-language functions), and `all`. | | `track_io_timing` | Collects timing statistics for database I/O activity. | | `wal_compression` | This parameter enables compression of WAL using the specified compression method. | diff --git a/apps/docs/content/guides/platform/manage-your-usage/read-replicas.mdx b/apps/docs/content/guides/platform/manage-your-usage/read-replicas.mdx index 503f697ac016c..b62147c9ed3be 100644 --- a/apps/docs/content/guides/platform/manage-your-usage/read-replicas.mdx +++ b/apps/docs/content/guides/platform/manage-your-usage/read-replicas.mdx @@ -5,7 +5,13 @@ title: 'Manage Read Replica usage' ## What you are charged for -Each [Read Replica](/docs/guides/platform/read-replicas) is a dedicated database. You are charged for its resources: [Compute](/docs/guides/platform/compute-and-disk#compute), [Disk Size](/docs/guides/platform/database-size#disk-size), provisioned [Disk IOPS](/docs/guides/platform/compute-and-disk#provisioned-disk-throughput-and-iops), provisioned [Disk Throughput](/docs/guides/platform/compute-and-disk#provisioned-disk-throughput-and-iops), and [IPv4](/docs/guides/platform/ipv4-address). +Each [Read Replica](/docs/guides/platform/read-replicas) is a dedicated database. You are charged for its resources, which are the following, and mirrored from the primary database: + +- [Compute](/docs/guides/platform/compute-and-disk#compute) +- [Disk Size](/docs/guides/platform/database-size#disk-size) +- Provisioned [Disk IOPS](/docs/guides/platform/compute-and-disk#provisioned-disk-throughput-and-iops) +- Provisioned [Disk Throughput](/docs/guides/platform/compute-and-disk#provisioned-disk-throughput-and-iops) +- [IPv4](/docs/guides/platform/ipv4-address). @@ -13,26 +19,37 @@ Read Replicas are **not** covered by the [Spend Cap](/docs/guides/platform/cost- -## How charges are calculated +## How we calculate charges Read Replica charges are the total of the charges listed below. -**Compute** +### Compute + Compute is charged by the hour, meaning you are charged for the exact number of hours that a Read Replica is running and, therefore, incurring Compute usage. If a Read Replica runs for part of an hour, you are still charged for the full hour. Read Replicas run on the same Compute size as the primary database. -**Disk Size** -Refer to [Manage Disk Size usage](/docs/guides/platform/manage-your-usage/disk-size) for details on how charges are calculated. The disk size of a Read Replica is 1.25x the size of the primary disk to account for WAL archives. With a Read Replica you go beyond your subscription plan's quota for Disk Size. +### Disk size + +Read [the Manage Disk Size usage guide](/docs/guides/platform/manage-your-usage/disk-size) for details on how we calculate charges. The disk size of a Read Replica is 1.25x the size of the primary disk to account for WAL archives. With a Read Replica you go beyond your subscription plan's quota for Disk Size. + +{/* supa-mdx-lint-disable-next-line Rule001HeadingCase */} + +### Provisioned Disk IOPS (optional) + +Read Replicas inherit any additional provisioned Disk IOPS from the primary database. Read the [Manage Disk IOPS usage guide](/docs/guides/platform/manage-your-usage/disk-iops) for details on how we calculate charges. + +{/* supa-mdx-lint-disable-next-line Rule001HeadingCase */} + +### Provisioned Disk Throughput (optional) + +Read Replicas inherit any additional provisioned Disk Throughput from the primary database. Read the [Manage Disk Throughput usage guide](/docs/guides/platform/manage-your-usage/disk-throughput) for details on how we calculate charges. -**Provisioned Disk IOPS (optional)** -Read Replicas inherit any additional provisioned Disk IOPS from the primary database. Refer to [Manage Disk IOPS usage](/docs/guides/platform/manage-your-usage/disk-iops) for details on how charges are calculated. +{/* supa-mdx-lint-enable-next-line Rule001HeadingCase */} -**Provisioned Disk Throughput (optional)** -Read Replicas inherit any additional provisioned Disk Throughput from the primary database. Refer to [Manage Disk Throughput usage](/docs/guides/platform/manage-your-usage/disk-throughput) for details on how charges are calculated. +### IPv4 (optional) -**IPv4 (optional)** -If the primary database has a configured IPv4 address, its Read Replicas are also assigned one, with charges for each. Refer to [Manage IPv4 usage](/docs/guides/platform/manage-your-usage/ipv4) for details on how charges are calculated. +If the primary database has configured an IPv4 address add-on, its Read Replicas are also assigned one, with charges for each. Read the [Manage IPv4 usage guide](/docs/guides/platform/manage-your-usage/ipv4) for details on how we calculate charges. ### Usage on your invoice @@ -42,7 +59,7 @@ Compute incurred by Read Replicas is shown as "Replica Compute Hours" on your in ### No additional resources configured -The project has one Read Replica and no IPv4 and no additional Disk IOPS and Disk Throughput configured. +The project has one Read Replica, no IPv4, and no additional Disk IOPS and Disk Throughput configured. | Line Item | Units | Costs | | ----------------------------- | --------- | --------------------------- | @@ -60,7 +77,7 @@ The project has one Read Replica and no IPv4 and no additional Disk IOPS and Dis ### Additional resources configured -The project has two Read Replicas and IPv4 and additional Disk IOPS and Disk Throughput configured. +The project has two Read Replicas, IPv4, and additional Disk IOPS and Disk Throughput configured. | Line Item | Units | Costs | | ----------------------------- | --------- | ---------------------------- | diff --git a/apps/docs/content/guides/platform/multi-factor-authentication.mdx b/apps/docs/content/guides/platform/multi-factor-authentication.mdx index 5d59ed655aaa4..b1ef06cc409f3 100644 --- a/apps/docs/content/guides/platform/multi-factor-authentication.mdx +++ b/apps/docs/content/guides/platform/multi-factor-authentication.mdx @@ -40,7 +40,7 @@ If you are an organization owner and on the Pro, Team or Enterprise plan, you ca ## Disable MFA -You can disable MFA for your user account under your [Supabase account settings](/dashboard/account/security). On subsequent login attempts, you will not be prompted to enter a MFA code. +You can disable MFA for your user account under your [Supabase account settings](/dashboard/account/security). On subsequent login attempts, you will not be prompted to enter an MFA code. diff --git a/apps/docs/content/guides/platform/read-replicas.mdx b/apps/docs/content/guides/platform/read-replicas.mdx index 3c2eefe23b2f7..93764c7378497 100644 --- a/apps/docs/content/guides/platform/read-replicas.mdx +++ b/apps/docs/content/guides/platform/read-replicas.mdx @@ -4,7 +4,7 @@ description: 'Deploy read-only databases across multiple regions, for lower late subtitle: 'Deploy read-only databases across multiple regions, for lower latency and better resource management.' --- -Read Replicas are additional databases that are kept in sync with your Primary database. You can read your data from a Read Replica, which helps with: +Read Replicas are additional databases kept in sync with your Primary database. You can read your data from a Read Replica, which helps with: - **Load balancing:** Read Replicas reduce load on the Primary database. For example, you can use a Read Replica for complex analytical queries and reserve the Primary for user-facing create, update, and delete operations. - **Improved latency:** For projects with a global user base, additional databases can be deployed closer to users to reduce latency. @@ -19,7 +19,7 @@ Read Replicas are additional databases that are kept in sync with your Primary d ## About Read Replicas -The database you start with when launching a Supabase project is your Primary database. Read Replicas are kept in sync with the Primary through a process called "replication." Replication is asynchronous to ensure that transactions on the Primary aren't blocked. There is a delay between an update on the Primary and the time that a Read Replica receives the change. This delay is called "replication lag." +The database you start with when launching a Supabase project is your Primary database. A process called "replication" keeps Read Replicas in sync with the Primary. Replication is asynchronous to ensure that transactions on the Primary aren't blocked. There is a delay between an update on the Primary and the time that a Read Replica receives the change. This delay is called "replication lag." You can only read data from a Read Replica. This is in contrast to a Primary database, where you can both read and write: @@ -28,73 +28,33 @@ You can only read data from a Read Replica. This is in contrast to a Primary dat | Primary | ✅ | ✅ | ✅ | ✅ | | Read Replica | ✅ | - | - | - | -## Prerequisites + +
+ - - -Read Replicas are available for all projects on the Pro, Team and Enterprise plans. Spin one up now over at the [Infrastructure Settings page](/dashboard/project/_/settings/infrastructure). - - - -Projects must meet these requirements to use Read Replicas: - -1. Running on AWS. -1. Running on at least a [Small compute add-on](/docs/guides/platform/compute-add-ons). - - Read Replicas are started on the same compute instance as the Primary to keep up with changes. -1. Running on Postgres 15+. - - For projects running on older versions of Postgres, you will need to [upgrade to the latest platform version](/docs/guides/platform/migrating-and-upgrading-projects#pgupgrade). -1. Using [physical backups](/docs/guides/platform/backups#point-in-time-recovery) - - Physical backups are automatically enabled if using [PITR](/docs/guides/platform/backups#point-in-time-recovery) - - If you're not using PITR, you'll be able to switch to physical backups as part of the Read Replica setup process. Note that physical backups can't be downloaded from the dashboard in the way logical backups can. - -## Getting started - -To add a Read Replica, go to the [Infrastructure Settings page](/dashboard/project/_/settings/infrastructure) in your dashboard. - -You can also manage Read Replicas using the Management API (beta functionality): - -```bash -# Get your access token from https://supabase.com/dashboard/account/tokens -export SUPABASE_ACCESS_TOKEN="your-access-token" -export PROJECT_REF="your-project-ref" - -# Create a new Read Replica -curl -X POST "https://api.supabase.com/v1/projects/$PROJECT_REF/read-replicas/setup" \ - -H "Authorization: Bearer $SUPABASE_ACCESS_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ - "region": "us-east-1" - }' - -# Delete a Read Replica -curl -X POST "https://api.supabase.com/v1/projects/$PROJECT_REF/read-replicas/remove" \ - -H "Authorization: Bearer $SUPABASE_ACCESS_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ - "database_identifier": "abcdefghijklmnopqrst" - }' -``` - -Projects on an XL compute add-on or larger can create up to five Read Replicas. Projects on compute add-ons smaller than XL can create up to two Read Replicas. All Read Replicas inherit the compute size of their Primary database. - -### Deploying a Read Replica + When your database starts slowing down, you face a choice: make your existing database bigger (scale vertically), or spread the load across multiple databases (scale horizontally). Both approaches work. Neither is universally correct. The right answer depends on your workload, your budget, and where the bottleneck actually is. -A Read Replica is deployed by using a physical backup as a starting point, and a combination of WAL file archives and direct replication from the Primary database to catch up. Both components may take significant time to complete. The duration of restoring from a physical backup is roughly dependent and directly related to the database size of your project. The time taken to catch up to the primary using WAL archives and direct replication is dependent on the level of activity on the Primary database; a more active database will produce a larger number of WAL files that will need to be processed. + Read Replicas decision flowchart -Along with the progress of the deployment, the dashboard displays rough estimates for each component. + -{/* supa-mdx-lint-disable-next-line Rule001HeadingCase */} +
-### What does it mean when "Init failed" is observed? - -The status `Init failed` indicates that the Read Replica has failed to deploy. Some possible scenarios as to why a Read Replica may have failed to be deployed: - -- Underlying instance failed to come up. -- Network issue leading to inability to connect to the Primary database. -- Possible incompatible database settings between the Primary and Read Replica databases. -- Platform issues. - -It is safe to drop this failed Read Replica, and in the event of a transient issue, attempt to spin up another one. If however spinning up Read Replicas for your project consistently fails, do check out our [status page](https://status.supabase.com) for any ongoing incidents, or open a support ticket [here](/dashboard/support/new). To aid the investigation, do not bring down the recently failed Read Replica. +
## Features @@ -104,14 +64,18 @@ Read Replicas offer the following features: Each Read Replica has its own dedicated database and API endpoints. -- Find the database endpoint on the projects [**Connect** panel](/dashboard/project/_?showConnect=true) -- Find the API endpoint on the [API Settings page](/dashboard/project/_/settings/api) under **Project URL** +- Find the database endpoint on the project's [**Connect** panel](/dashboard/project/_?showConnect=true). Toggle between Primary and Read Replicas using the **Source** dropdown. +- Find the API endpoint on the [API Settings page](/dashboard/project/_/settings/api) under **Project URL**. Toggle between Primary and Read Replicas using the **Source** dropdown. + +If you use an [IPv4 add-on](/docs/guides/platform/ipv4-address#read-replicas), the database endpoints for your Read Replicas also use an IPv4 add-on. Read Replicas only support `GET` requests from the [REST API](/docs/guides/api). If you are calling a read-only Postgres function through the REST API, make sure to set the `get: true` [option](/docs/reference/javascript/rpc?queryGroups=example&example=call-a-read-only-postgres-function). + + Requests to other Supabase products, such as Auth, Storage, and Realtime, aren't able to use a Read Replica or its API endpoint. Support for more products will be added in the future. -If you're using an [IPv4 add-on](/docs/guides/platform/ipv4-address#read-replicas), the database endpoints for your Read Replicas will also use an IPv4 add-on. + ### Dedicated connection pool @@ -119,11 +83,15 @@ A connection pool through Supavisor is also available for each Read Replica. Fin ### API load balancer -A load balancer is deployed to automatically balance requests between your Primary database and Read Replicas. Find its endpoint on the [API Settings page](/dashboard/project/_/settings/api). +A load balancer automatically balances requests between your Primary database and Read Replicas. Find its endpoint on the [**API Settings page**](/dashboard/project/_/settings/api). -The load balancer enables geo-routing for Data API requests so that `GET` requests will automatically be routed to the database that is closest to your user ensuring the lowest latency. Non-`GET` requests can also be sent through this endpoint, and will be routed to the Primary database. +The load balancer enables geo-routing for Data API requests to automatically route `GET` requests to the database closest to your user ensuring the lowest latency. You can also send Non-`GET` requests through this endpoint, and they are routed to the Primary database automatically. -You can also interact with Supabase services (Auth, Edge Functions, Realtime, and Storage) through this load balancer so there's no need to worry about which endpoint to use and in which situations. However, geo-routing for these services are not yet available but is coming soon. + + +You can also interact with other Supabase services (Auth, Edge Functions, Realtime, and Storage) through this load balancer so there's no need to worry about which endpoint to use and in which situations. Geo-routing for Auth, Realtime, and Storage aren't yet available but are coming soon. + + @@ -137,10 +105,10 @@ If you remove all Read Replicas from your project, the load balancer and its end -Starting on April 4th, 2025, we will be changing the routing behavior for eligible Data API requests: +From April 4th, 2025, the routing behavior for eligible Data API requests changed: -- Old behavior: Round-Robin distribution among all databases (all read replicas + primary) of your project, regardless of location -- New behavior: Geo-routing, that directs requests to the closest available database (all read replicas + primary) +- **Old behavior**: Round-Robin distribution among all databases (all read replicas + primary) of your project, regardless of location +- **New behavior**: Geo-routing, that directs requests to the closest available database (all read replicas + primary) The new behavior delivers a better experience for your users by minimizing the latency to your project. You can take full advantage of this by placing Read Replicas close to your major customer bases. @@ -186,74 +154,6 @@ We recommend ingesting your [project's metrics](/docs/guides/platform/metrics#ac All settings configured through the dashboard will be propagated across all databases of a project. This ensures that no Read Replica get out of sync with the Primary database or with other Read Replicas. -## Operations blocked by Read Replicas - -### Project upgrades and data restorations - -The following procedures require all Read Replicas for a project to be brought down before they can be performed: - -1. [Project upgrades](/docs/guides/platform/migrating-and-upgrading-projects#pgupgrade) -1. [Data restorations](/docs/guides/platform/backups#pitr-restoration-process) - -These operations need to be completed before Read Replicas can be re-deployed. - -## About replication - -We use a hybrid approach to replicate data from a Primary to its Read Replicas, combining the native methods of streaming replication and file-based log shipping. - -### Streaming replication - -Postgres generates a Write Ahead Log (WAL) as database changes occur. With streaming replication, these changes stream from the Primary to the Read Replica server. The WAL alone is sufficient to reconstruct the database to its current state. - -This replication method is fast, since changes are streamed directly from the Primary to the Read Replica. On the other hand, it faces challenges when the Read Replica can't keep up with the WAL changes from its Primary. This can happen when the Read Replica is too small, running on degraded hardware, or has a heavier workload running. - -To address this, Postgres does provide tunable configuration, like `wal_keep_size`, to adjust the WAL retained by the Primary. If the Read Replica fails to “catch up” before the WAL surpasses the `wal_keep_size` setting, the replication is terminated. Tuning is a bit of an art - the amount of WAL required is variable for every situation. - -### File-based log shipping - -In this replication method, the Primary continuously buffers WAL changes to a local file and then sends the file to the Read Replica. If multiple Read Replicas are present, files could also be sent to an intermediary location accessible by all. The Read Replica then reads the WAL files and applies those changes. There is higher replication lag than streaming replication since the Primary buffers the changes locally first. It also means there is a small chance that WAL changes do not reach Read Replicas if the Primary goes down before the file is transferred. In these cases, if the Primary fails a Replica using streaming replication would (in most cases) be more up-to-date than a Replica using file-based log shipping. - -### File-based log shipping 🤝 streaming replication - -Map view of Primary and Read Replica databases - -We bring these two methods together to achieve quick, stable, and reliable replication. Each method addresses the limitations of the other. Streaming replication minimizes replication lag, while file-based log shipping provides a fallback. For file-based log shipping, we use our existing Point In Time Recovery (PITR) infrastructure. We regularly archive files from the Primary using [WAL-G](https://github.com/wal-g/wal-g), an open source archival and restoration tool, and ship the WAL files to S3. - -We combine it with streaming replication to reduce replication lag. Once WAL-G files have been synced from S3, Read Replicas connect to the Primary and stream the WAL directly. - -### Monitoring replication lag - -Replication lag for a specific Read Replica can be monitored through the dashboard. On the [Database Reports page](/dashboard/project/_/observability/database) Read Replicas will have an additional chart under `Replica Information` displaying historical replication lag in seconds. Realtime replication lag in seconds can be observed on the [Infrastructure Settings page](/dashboard/project/_/settings/infrastructure). This is the value on top of the Read Replica. Do note that there is no single threshold to indicate when replication lag should be addressed. It would be fully dependent on the requirements of your project. - -If you are already ingesting your [project's metrics](/docs/guides/platform/metrics#accessing-the-metrics-endpoint) into your own environment, you can also keep track of replication lag and set alarms accordingly with the metric: `physical_replication_lag_physical_replica_lag_seconds`. - -Some common sources of high replication lag include: - -1. Exclusive locks on tables on the Primary. - Operations such as `drop table`, `reindex` (amongst others) take an Access Exclusive lock on the table. This can result in increasing replication lag for the duration of the lock. -1. Resource Constraints on the database - Heavy utilization on the primary or the replica, if run on an under-resourced project, can result in high replication lag. This includes the characteristics of the disk being utilized (IOPS, Throughput). -1. Long-running transactions on the Primary. - Transactions that run for a long-time on the primary can also result in high replication lag. You can use the `pg_stat_activity` view to identify and terminate such transactions if needed. `pg_stat_activity` is a live view, and does not offer historical data on transactions that might have been active for a long time in the past. - -High replication lag can result in stale data being returned for queries being executed against the affected read replicas. - -You can [consult](https://cloud.google.com/sql/docs/postgres/replication/replication-lag) [additional](https://repost.aws/knowledge-center/rds-postgresql-replication-lag) [resources](https://severalnines.com/blog/what-look-if-your-postgresql-replication-lagging/) on the subject as well. - -## Misc - -### Restart or compute add-on change behaviour - -When a project that utilizes Read Replicas is restarted, or the compute add-on size is changed, the Primary database gets restarted first. During this period, the Read Replicas remain available. - -Once the Primary database has completed restarting (or resizing, in case of a compute add-on change) and become available for usage, all the Read Replicas are restarted (and resized, if needed) concurrently. - ## Pricing -For a detailed breakdown of how charges are calculated, refer to [Manage Read Replica usage](/docs/guides/platform/manage-your-usage/read-replicas). +For a detailed breakdown of how we calculate charges, read the [Manage Read Replica usage guide](/docs/guides/platform/manage-your-usage/read-replicas). diff --git a/apps/docs/content/guides/platform/read-replicas/getting-started.mdx b/apps/docs/content/guides/platform/read-replicas/getting-started.mdx new file mode 100644 index 0000000000000..42dac54514855 --- /dev/null +++ b/apps/docs/content/guides/platform/read-replicas/getting-started.mdx @@ -0,0 +1,170 @@ +--- +title: 'Getting started with Read Replicas' +description: 'Deploy read-only databases across multiple regions, for lower latency.' +subtitle: 'Deploy read-only databases across multiple regions, for lower latency and better resource management.' +--- + +## Prerequisites + + + +Read Replicas are available for all projects on the Pro, Team and Enterprise plans. Spin one up now over at the [Infrastructure Settings page](/dashboard/project/_/settings/infrastructure). + + + +Projects must meet these requirements to use Read Replicas: + +1. Running on AWS. +2. Running on at least a [Small compute add-on](/docs/guides/platform/compute-add-ons). + + - Read Replicas are started on the same compute instance as the Primary to keep up with changes. + +3. Running on Postgres 15+. + + - For projects running on older versions of Postgres, you need to [upgrade to the latest platform version](/docs/guides/platform/migrating-and-upgrading-projects#pgupgrade). + +4. Not using [legacy logical backups](/docs/guides/platform/backups#point-in-time-recovery) + + - Physical backups are automatically enabled if using [Point in time recovery (PITR)](/docs/guides/platform/backups#point-in-time-recovery) + +## Creating a Read Replica + +To add a Read Replica, go to the [Database Replication page](/dashboard/project/_/database/replication) in your project dashboard. + +You can also manage Read Replicas using the Management API (beta functionality): + +```bash +# Get your access token from https://supabase.com/dashboard/account/tokens +export SUPABASE_ACCESS_TOKEN="your-access-token" +export PROJECT_REF="your-project-ref" + +# Create a new Read Replica +curl -X POST "https://api.supabase.com/v1/projects/$PROJECT_REF/read-replicas/setup" \ + -H "Authorization: Bearer $SUPABASE_ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "region": "us-east-1" + }' + +# Delete a Read Replica +curl -X POST "https://api.supabase.com/v1/projects/$PROJECT_REF/read-replicas/remove" \ + -H "Authorization: Bearer $SUPABASE_ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "database_identifier": "abcdefghijklmnopqrst" + }' +``` + + + +Projects on an XL compute add-on or larger can create up to five Read Replicas. Projects on compute add-ons smaller than XL can create up to two Read Replicas. All Read Replicas inherit the compute size of their Primary database. + + + +### Deploying a Read Replica + +We deploy a Read Replica using a physical backup as a starting point, and a combination of write ahead logging (WAL) file archives and direct replication from the Primary database to catch up. Both components may take significant time to complete, depending on your specific workload. + +The time to restore from a physical backup is dependent and directly related to the database size of your project. The time taken to catch up to the primary using WAL archives and direct replication is dependent on the level of activity on the Primary database. A more active database produces a larger number of WAL files that need to be processed. + +Along with the progress of the deployment, the dashboard displays rough estimates for each component. + +## Replication method details + +We use a hybrid approach to replicate data from a Primary to its Read Replicas, combining the native methods of streaming replication and file-based log shipping. + +### Streaming replication + +Postgres generates a Write Ahead Log (WAL) as database changes occur. With streaming replication, these changes stream from the Primary to the Read Replica server. The WAL alone is sufficient to reconstruct the database to its current state. + +This replication method is fast, since the Primary streams changes directly to the Read Replica. However, it faces challenges when the Read Replica can't keep up with the WAL changes from its Primary. This can happen when the Read Replica is too small, running on degraded hardware, or has a heavier workload running. + +To address this, Postgres provides tunable configuration, like `wal_keep_size`, to adjust the WAL retained by the Primary. If the Read Replica fails to "catch up" before the WAL surpasses the `wal_keep_size` setting, it terminates the replication. Tuning is an art - the amount of WAL required varies for every situation. + +### File-based log shipping + +In this replication method, the Primary continuously buffers WAL changes to a local file and then sends the file to the Read Replica. If multiple Read Replicas are present, files could also be sent to an intermediary location accessible by all replicas. + +The Read Replica then reads the WAL files and applies those changes. There is higher replication lag than streaming replication since the Primary buffers the changes locally first. It also means there is a small chance that WAL changes do not reach Read Replicas if the Primary goes down before the file is transferred. In these cases, if the Primary fails a Replica using streaming replication would (in most cases) be more up-to-date than a Replica using file-based log shipping. + +### File-based log shipping meets streaming replication + +Map view of Primary and Read Replica databases + +We bring these two methods together to achieve quick, stable, and reliable replication. Each method addresses the limitations of the other. Streaming replication minimizes replication lag, while file-based log shipping provides a fallback. For file-based log shipping, we use our existing Point In Time Recovery (PITR) infrastructure. We regularly archive files from the Primary using [WAL-G](https://github.com/wal-g/wal-g), an open source archival and restoration tool, and ship the WAL files to off-site, durable cloud storage, such as S3. + +We combine it with streaming replication to reduce replication lag. Once WAL-G files have been synced from S3, Read Replicas connect to the Primary and stream the WAL directly. + +### Restart or compute add-on change behaviour + +When you restart a project that utilizes Read Replicas, or change the compute add-on size, the Primary database gets restarted first. During this period, the Read Replicas remain available. + +Once the Primary database has completed restarting (or resizing, in case of a compute add-on change) and become available for usage, all the Read Replicas are restarted (and resized, if needed) concurrently. + +## Operations blocked by Read Replicas + +### Project upgrades and data restorations + +The following procedures require all Read Replicas for a project to be brought down before performing them: + +1. [Project upgrades](/docs/guides/platform/migrating-and-upgrading-projects#pgupgrade) +2. [Data restorations](/docs/guides/platform/backups#pitr-restoration-process) + +These operations need to complete before you can re-deploy Read Replicas. + +### Monitoring replication lag + +You can monitor replication lag for a specific Read Replica through a project dashboard on the [**Database Reports page**](/dashboard/project/_/observability/database). Read Replicas have an additional chart under **Replica Information** displaying historical replication lag in seconds. + +You can see realtime replication lag in seconds on the [**Infrastructure Settings** page](/dashboard/project/_/settings/infrastructure). This is the value on top of the Read Replica. + + + +There is no single threshold to indicate when you should address replication lag. It is dependent on the requirements of your project. + + + + + +If you are already ingesting your [project's metrics](/docs/guides/platform/metrics#accessing-the-metrics-endpoint) into your own environment, you can also keep track of replication lag and set alarms with the `physical_replication_lag_physical_replica_lag_seconds` metric. + + + +### Addressing high replication lag + +Some common sources of high replication lag include: + +1. **Exclusive locks on tables on the Primary**: Operations such as `drop table` and `reindex` take an access-exclusive lock on the table. This can result in increasing replication lag for the duration of the lock. +2. **Resource Constraints on the database**: Heavy utilization on the primary or the replica, if run on an under-resourced project, can result in high replication lag. This includes the characteristics of the disk being utilized (IOPS, Throughput). +3. **Long-running transactions on the Primary**: Transactions that run for a long-time on the primary can also result in high replication lag. You can use the `pg_stat_activity` view to identify and terminate such transactions if needed. `pg_stat_activity` is a live view, and does not offer historical data on transactions that might have been active for a long time in the past. + High replication lag can result in stale data returned for queries executed against the affected read replicas. + + + +You can find additional resources on replication lag in [the Google documentation](https://cloud.google.com/sql/docs/postgres/replication/replication-lag), [the AWS documentation](https://repost.aws/knowledge-center/rds-postgresql-replication-lag), and [the several nines blog](https://severalnines.com/blog/what-look-if-your-postgresql-replication-lagging/). + + + +## Troubleshooting + +{/* supa-mdx-lint-disable-next-line Rule001HeadingCase */} + +### An "Init failed" status + +The replica status "Init failed" in the dashboard indicates that the Read Replica has failed to deploy. Some possible scenarios as to why a Read Replica deployment may have failed are the following: + +- An underlying instance failed to come up. +- A network issue leading to inability to connect to the Primary database. +- A possible incompatible database settings between the Primary and Read Replica databases. +- Platform issues. +- Very high active workloads combined with large (50+ GB) database sizes + +It is safe to drop this failed Read Replica, and in the event of a transient issue, attempt to spin up another one. If spinning up Read Replicas for your project consistently fails, check the[status page](https://status.supabase.com) for any ongoing incidents, or [open a support ticket](/dashboard/support/new). To aid the investigation, do not bring down the recently failed Read Replica. + +{/* supa-mdx-lint-enable-next-line Rule001HeadingCase */} diff --git a/apps/docs/content/guides/self-hosting/copy-from-platform-s3.mdx b/apps/docs/content/guides/self-hosting/copy-from-platform-s3.mdx new file mode 100644 index 0000000000000..2c98be8144f87 --- /dev/null +++ b/apps/docs/content/guides/self-hosting/copy-from-platform-s3.mdx @@ -0,0 +1,159 @@ +--- +title: 'Copy Storage Objects from Platform' +description: 'Copy storage objects from a managed Supabase project to a self-hosted instance using rclone.' +subtitle: 'Copy storage objects from a managed Supabase project to a self-hosted instance using rclone.' +--- + +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +This guide walks you through copying storage objects from a managed Supabase platform project to a self-hosted instance using [rclone](https://rclone.org/) with S3-to-S3 copy. + + + +Direct file copy (e.g., downloading files and placing them into `volumes/storage/`) does not work. Self-hosted Storage uses an internal file structure that differs from what you get when downloading files from the platform. Use the S3 protocol to transfer objects so that Storage creates the correct metadata records. + + + +## Before you begin + +You need: + +- A working self-hosted Supabase instance with the S3 protocol endpoint enabled - see [Configure S3 Storage](/docs/guides/self-hosting/self-hosted-s3#enable-the-s3-protocol-endpoint) +- Your platform project's S3 credentials - generated from the [S3 Configuration](/dashboard/project/_/storage/s3) page +- Matching buckets created on your self-hosted instance +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +- [rclone](https://rclone.org/install/) installed on the machine running the copy + +## Step 1: Get platform S3 credentials + +In your managed Supabase project dashboard, go to **Storage** > **S3 Configuration** > **Access keys**. Generate a new access key pair and copy: + +- **Endpoint**: `https://.supabase.co/storage/v1/s3` +- **Region**: your project's region (e.g., `us-east-1`) +- **Access Key ID** and **Secret access key** + + + +For better performance with large files, use the direct storage hostname: `https://.storage.supabase.co/storage/v1/s3` + + + +## Step 2: Create buckets on self-hosted + +Buckets must exist on the destination before you can copy objects into them. You can create them through dashboard UI, or with **SQL Editor**. + + + +If you already restored your platform database to self-hosted using the [restore guide](/docs/guides/self-hosting/restore-from-platform), your bucket definitions are already present. You can skip this step. + + + +To list your platform buckets, connect to your platform database and run: + +```sql +SELECT id, name, public FROM storage.buckets ORDER BY name; +``` + +Then create matching buckets on your self-hosted instance. Connect to your self-hosted database and run: + +```sql +INSERT INTO storage.buckets (id, name, public) +VALUES + ('your-storage-bucket', 'your-storage-bucket', false) +ON CONFLICT (id) DO NOTHING; +``` + +Repeat for each bucket, setting `public` to `true` or `false` as appropriate. + +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +## Step 3: Configure rclone +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +Create or edit your rclone configuration file (`~/.config/rclone/rclone.conf`): + +```ini rclone.conf +[platform] +type = s3 +provider = Other +access_key_id = your-platform-access-key-id +secret_access_key = your-platform-secret-access-key +endpoint = https://your-project-ref.supabase.co/storage/v1/s3 +region = your-project-region + +[self-hosted] +type = s3 +provider = Other +access_key_id = your-self-hosted-access-key-id +secret_access_key = your-self-hosted-secret-access-key +endpoint = http://your-domain:8000/storage/v1/s3 +region = your-self-hosted-region +``` + +Replace the credentials with your actual values. For self-hosted, use the `REGION`, `S3_PROTOCOL_ACCESS_KEY_ID` and `S3_PROTOCOL_ACCESS_KEY_SECRET` you configured in [Configure S3 Storage](/docs/guides/self-hosting/self-hosted-s3#enable-the-s3-protocol-endpoint). + +Verify both remotes connect: + +```bash +rclone lsd platform: +rclone lsd self-hosted: +``` + +Both commands should list your buckets. + +## Step 4: Copy objects + +Copy a single bucket: + +```bash +rclone copy platform:your-storage-bucket self-hosted:your-storage-bucket --progress +``` + +To copy all buckets: + +```bash +for bucket in $(rclone lsf platform: | tr -d '/'); do + echo "Copying bucket: $bucket" + rclone copy "platform:$bucket" "self-hosted:$bucket" --progress +done +``` + + + +For large migrations, consider adding `--transfers 4` to increase parallelism, or `--checkers 8` to speed up the comparison phase. See the [flags documentation](https://rclone.org/flags/) for all options. + + + +## Verify + +Compare object counts between source and destination: + +```bash +rclone size platform:your-storage-bucket && \ +rclone size self-hosted:your-storage-bucket +``` + +Open Studio on your self-hosted instance and browse the storage buckets to confirm files are accessible. + +## Troubleshooting + +### Signature errors + +If you see `SignatureDoesNotMatch` when connecting to either remote: + +- **Platform**: Regenerate S3 access keys from your project's Storage Settings. Ensure the endpoint URL includes `/storage/v1/s3`. +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +- **Self-hosted**: Verify that `REGION`, `S3_PROTOCOL_ACCESS_KEY_ID` and `S3_PROTOCOL_ACCESS_KEY_SECRET` in `.env` file match your rclone config. + +### Bucket not found +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +If rclone reports that a bucket doesn't exist on the self-hosted side, create it first - see [Step 2](#step-2-create-buckets-on-self-hosted). The S3 protocol does not auto-create buckets on copy. + +### Timeouts on large files +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +For very large files, increase rclone's timeout: + +```bash +rclone copy platform:your-storage-bucket self-hosted:your-storage-bucket --timeout 30m +``` + +### Empty listing on platform + +If `rclone lsd platform:` returns nothing, verify the endpoint URL ends with `/storage/v1/s3` and that the S3 access keys have not expired. Regenerate them from the dashboard if needed. diff --git a/apps/docs/content/guides/self-hosting/docker.mdx b/apps/docs/content/guides/self-hosting/docker.mdx index d1f39a54d5c18..f9c78a6eb4626 100644 --- a/apps/docs/content/guides/self-hosting/docker.mdx +++ b/apps/docs/content/guides/self-hosting/docker.mdx @@ -423,17 +423,10 @@ SMTP_SENDER_NAME= We recommend using [AWS SES](https://aws.amazon.com/ses/). It's extremely cheap and reliable. Restart all services to pick up the new configuration. #### Configuring S3 Storage +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +By default all files are stored locally on the server. You can connect Storage to an S3-compatible backend (AWS S3, MinIO, Cloudflare R2), enable the S3 protocol endpoint for tools like `rclone`, or both. These are independent features. -By default all files are stored locally on the server. You can configure the Storage service to use S3 by updating the following environment variables: - -```yaml docker-compose.yml -storage: - environment: STORAGE_BACKEND=s3 - GLOBAL_S3_BUCKET=name-of-your-s3-bucket - REGION=region-of-your-s3-bucket -``` - -You can find all the available options in the [storage repository](https://github.com/supabase/storage-api/blob/master/.env.sample). Restart the `storage` service to pick up the changes: `docker compose restart storage --no-deps` +See the [Configure S3 Storage](/docs/guides/self-hosting/self-hosted-s3) guide for detailed setup instructions. #### Configuring Supabase AI Assistant diff --git a/apps/docs/content/guides/self-hosting/restore-from-platform.mdx b/apps/docs/content/guides/self-hosting/restore-from-platform.mdx index 7b300083aec75..71546c9397a5d 100644 --- a/apps/docs/content/guides/self-hosting/restore-from-platform.mdx +++ b/apps/docs/content/guides/self-hosting/restore-from-platform.mdx @@ -100,14 +100,14 @@ SELECT * FROM pg_extension; The database dump includes your schema, data, roles, RLS policies, database functions, triggers, and `auth.users`. However, several things require separate configuration on your self-hosted instance: -| Requires manual setup | How to configure | -| --- | --- | -| JWT secrets and API keys | Generate new ones and update `.env` | +| Requires manual setup | How to configure | +| ------------------------------------------- | ------------------------------------------------- | +| JWT secrets and API keys | Generate new ones and update `.env` | | Auth provider settings (OAuth, Apple, etc.) | Configure `GOTRUE_EXTERNAL_*` variables in `.env` | -| Edge functions | Manually copy to your self-hosted instance | -| Storage objects | Transfer separately (not covered in this guide) | -| SMTP / email settings | Configure `SMTP_*` variables in `.env` | -| Custom domains and DNS | Point your DNS to the self-hosted server | +| Edge functions | Manually copy to your self-hosted instance | +| Storage objects | Transfer separately (not covered in this guide) | +| SMTP / email settings | Configure `SMTP_*` variables in `.env` | +| Custom domains and DNS | Point your DNS to the self-hosted server | ## Auth considerations @@ -155,7 +155,7 @@ Keeping your self-hosted configuration [up to date](https://github.com/supabase/ If the restore fails because an extension isn't available, check whether it's supported on your self-hosted Postgres version. You can list available extensions with: ```sql -SELECT * FROM pg_available_extensions; +select * from pg_available_extensions; ``` ### Connection refused diff --git a/apps/docs/content/guides/self-hosting/self-hosted-s3.mdx b/apps/docs/content/guides/self-hosting/self-hosted-s3.mdx new file mode 100644 index 0000000000000..37ada206d7480 --- /dev/null +++ b/apps/docs/content/guides/self-hosting/self-hosted-s3.mdx @@ -0,0 +1,151 @@ +--- +title: 'Configure S3 Storage' +description: 'Enable S3-compatible client endpoint and set up an S3 backend for self-hosted Supabase Storage.' +subtitle: 'Enable S3-compatible client endpoint and set up an S3 backend for self-hosted Supabase Storage.' +--- + +Self-hosted Supabase Storage has two independent S3-related features: +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +- **S3 protocol endpoint** - an S3-compatible API that Storage exposes at `/storage/v1/s3`. This allows standard S3 tools like `rclone` and the AWS CLI to interact with your Storage instance. +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +- **S3 backend** - where Storage keeps data. By default, files are stored on the local filesystem. You can switch to an S3-compatible service (AWS S3, MinIO, etc.) for durability, scalability, or to use existing infrastructure. + +You can configure either feature independently. For example, you can enable the S3 protocol endpoint to use `rclone` while keeping the default file-based storage, or switch to an S3 backend without enabling the S3 protocol endpoint. + +## Enable the S3 protocol endpoint + +The S3 protocol endpoint at `/storage/v1/s3` allows standard S3 clients to interact with your self-hosted Storage instance. It works with any storage backend, including the default file-based storage - you do not need to configure an S3 backend first. The Supabase REST API and SDK do not use the S3 protocol. + +Make sure to check that `REGION`, `S3_PROTOCOL_ACCESS_KEY_ID` and `S3_PROTOCOL_ACCESS_KEY_SECRET` are properly configured in you `.env` file. Read more about the secrets and passwords in [Configuring and securing Supabase](/docs/guides/self-hosting/docker#configuring-and-securing-supabase). + +```yaml +storage: + environment: + # ... existing variables ... + REGION: ${REGION} + S3_PROTOCOL_ACCESS_KEY_ID: ${S3_PROTOCOL_ACCESS_KEY_ID} + S3_PROTOCOL_ACCESS_KEY_SECRET: ${S3_PROTOCOL_ACCESS_KEY_SECRET} +``` + +### Test with the AWS CLI + +```bash +( set -a && \ +source .env > /dev/null 2>&1 && \ +echo "" && \ +AWS_ACCESS_KEY_ID=$S3_PROTOCOL_ACCESS_KEY_ID \ +AWS_SECRET_ACCESS_KEY=$S3_PROTOCOL_ACCESS_KEY_SECRET \ +aws s3 ls \ +--endpoint-url http://localhost:8000/storage/v1/s3 \ +--region $REGION \ +s3://your-storage-bucket ) +``` + +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +### Test with rclone +```bash +( set -a && \ +source .env > /dev/null 2>&1 && \ +echo "" && \ +rclone ls \ +--s3-endpoint http://localhost:8000/storage/v1/s3 \ +--s3-region $REGION \ +--s3-provider Other \ +--s3-access-key-id "$S3_PROTOCOL_ACCESS_KEY_ID" \ +--s3-secret-access-key "$S3_PROTOCOL_ACCESS_KEY_SECRET" \ +:s3:your-storage-bucket ) +``` + +Use `aws login` and `rclone config` for persistent configuration. + +## How to configure an S3 backend + +In general, the following configuration variables define S3 backend configuration for Storage in `docker-compose.yml`: + +```yaml +storage: + environment: + # ... existing variables ... + STORAGE_BACKEND: s3 + GLOBAL_S3_BUCKET: your-s3-bucket-or-dirname + GLOBAL_S3_ENDPOINT: https://your-s3-endpoint + GLOBAL_S3_PROTOCOL: https + GLOBAL_S3_FORCE_PATH_STYLE: "true" + AWS_ACCESS_KEY_ID: your-access-key-id + AWS_SECRET_ACCESS_KEY: your-secret-access-key + REGION: your-region +``` + +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +Depending on your setup, you may need to adjust these values - for example, to use a local S3-compatible service like MinIO or a cloud provider like AWS. + +{/* supa-mdx-lint-disable-next-line Rule001HeadingCase */} +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +### Using MinIO +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +An overlay `docker-compose.s3.yml` configuration can be added to enable MinIO container and provide an S3-compatible API for Storage backend: + +```bash +docker compose -f docker-compose.yml -f docker-compose.s3.yml up -d +``` + +Make sure to review the Storage section in your `.env` file for related configuration options. + +### Using AWS S3 + +Create an S3 bucket and an IAM user with access to it. Then configure the storage service: + +```yaml docker-compose.yml +storage: + environment: + # ... existing variables ... + STORAGE_BACKEND: s3 + GLOBAL_S3_BUCKET: your-aws-bucket-name + AWS_ACCESS_KEY_ID: your-aws-access-key + AWS_SECRET_ACCESS_KEY: your-aws-secret-key + REGION: your-aws-region +``` + +For AWS S3, you do not need `GLOBAL_S3_ENDPOINT` or `GLOBAL_S3_FORCE_PATH_STYLE` - the Storage S3 client automatically resolves the endpoint from the region and uses virtual-hosted-style URLs, which is what AWS S3 expects. These variables are only needed for non-AWS S3-compatible providers. + +### S3-compatible providers +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +Use the same configuration as MinIO, but point to your provider's endpoint, e.g.: + +```yaml +storage: + environment: + # ... existing variables ... + STORAGE_BACKEND: s3 + GLOBAL_S3_BUCKET: your-bucket-name + GLOBAL_S3_ENDPOINT: https://your-account-id.r2.cloudflarestorage.com +``` + +## Verify + +{/* supa-mdx-lint-disable-next-line Rule003Spelling */} +- Open Studio and upload a file to a bucket. List the file using the AWS CLI or `rclone` to confirm the S3 endpoint works. +- If using an S3 backend: confirm the file appears in your S3 provider's console. + +## Troubleshooting + +### Signature mismatch errors + +S3 clients sign requests using the access key ID and secret. If you see `SignatureDoesNotMatch`, verify that the `REGION`, `S3_PROTOCOL_ACCESS_KEY_ID` and `S3_PROTOCOL_ACCESS_KEY_SECRET` in your `.env` file match what your S3 client is using. + +### TUS upload errors on Cloudflare R2 + +If resumable (TUS) uploads fail with HTTP 500 and a message about `x-amz-tagging`, add `TUS_ALLOW_S3_TAGS: "false"` to the storage service environment. Cloudflare R2 does not implement this S3 feature. + +### Permission denied on uploads + +Setting a bucket to "Public" only allows unauthenticated **downloads**. Uploads are always blocked unless you create an RLS policy on the `storage.objects` table. Go to **Storage** > **Files** > **Policies** in Studio and create a policy that allows `INSERT` for the appropriate roles. + +### Upload URLs point to localhost + +If uploads from a browser fail (CORS or mixed content errors), check that `API_EXTERNAL_URL` and `SUPABASE_PUBLIC_URL` in your `.env` file match your actual domain and protocol - not `http://localhost:8000`. + +### Additional resources + +- [Storage repository `.env.sample`](https://github.com/supabase/storage/blob/master/.env.sample) +- [S3 Authentication](/docs/guides/storage/s3/authentication) diff --git a/apps/docs/content/guides/telemetry/log-drains.mdx b/apps/docs/content/guides/telemetry/log-drains.mdx index 03d30a507e998..94a025230efa3 100644 --- a/apps/docs/content/guides/telemetry/log-drains.mdx +++ b/apps/docs/content/guides/telemetry/log-drains.mdx @@ -19,6 +19,7 @@ The following table lists the supported destinations and the required setup conf | Loki | HTTP | URL
Headers | | Sentry | HTTP | DSN | | Amazon S3 | AWS SDK | S3 Bucket
Region
Access Key ID
Secret Access Key
Batch Timeout | +| OTLP | HTTP | Endpoint
Protocol
Gzip
Headers | HTTP requests are batched with a max of 250 logs or 1 second intervals, whichever happens first. Logs are compressed via Gzip if the destination supports it. @@ -236,7 +237,7 @@ Required configuration when creating an S3 Log Drain: - Region: the AWS region where the bucket is located. - Access Key ID: used for authentication. - Secret Access Key: used for authentication. -- Batch Timeout (ms): maximum time to wait before flushing a batch. Recommended 2000–5000ms. +- Batch Timeout (ms): maximum time to wait before flushing a batch. Recommended 2000-5000ms. @@ -244,6 +245,114 @@ Ensure the AWS account tied to the Access Key ID has permissions to write to the + +## OpenTelemetry protocol (OTLP) + +Logs are sent to any OTLP-compatible endpoint using the OpenTelemetry Protocol over HTTP with Protocol Buffers encoding. + +OTLP is an open-standard protocol for telemetry data, making it compatible with many observability platforms including: + +
    +
  • OpenTelemetry Collector
  • +
  • Grafana Cloud
  • +
  • New Relic
  • +
  • Honeycomb
  • +
  • Datadog (OTLP ingestion)
  • +
  • Elastic
  • +
  • And many more
  • +
+ +Required configuration when creating an OTLP Log Drain: + +
    +
  • Endpoint: The full URL of your OTLP HTTP endpoint (typically ending in `/v1/logs`)
  • +
  • Protocol: Currently only `http/protobuf` is supported
  • +
  • Gzip: Enable compression to reduce bandwidth (recommended: enabled)
  • +
  • Headers: Optional authentication headers (e.g., `Authorization`, `X-API-Key`)
  • +
+ +Logs are sent as OTLP log record messages using Protocol Buffers encoding, following the [OpenTelemetry Logs specification](https://opentelemetry.io/docs/specs/otel/logs/). + + + +Ensure your OTLP endpoint is configured to accept logs at the `/v1/logs` path with `application/x-protobuf` content type. + + + + + + +To receive Supabase logs with the OpenTelemetry Collector, configure an OTLP HTTP receiver: + +```yaml +receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + +processors: + batch: + +exporters: + logging: + loglevel: debug + +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [logging] +``` + +Then create a log drain in [Supabase dashboard](/dashboard/project/_/settings/log-drains) with: + +
    +
  • Endpoint: `https://your-collector:4318/v1/logs`
  • +
  • Add authentication headers as needed for your setup
  • +
+ +
+ + + +Different OTLP platforms use different authentication methods. Add headers accordingly: + +**API Key Authentication:** + +``` +X-API-Key: your-api-key +``` + +**Bearer Token:** + +``` +Authorization: Bearer your-token +``` + +**Basic Authentication:** + +``` +Authorization: Basic base64(username:password) +``` + +Refer to your observability platform's documentation for specific authentication requirements. + + + +
+ ## Pricing For a detailed breakdown of how charges are calculated, refer to [Manage Log Drain usage](/docs/guides/platform/manage-your-usage/log-drains). diff --git a/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx b/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx index 8f7013891791d..6cbd27ee5295d 100644 --- a/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx +++ b/apps/docs/content/troubleshooting/enabling-ipv4-addon.mdx @@ -10,7 +10,7 @@ Enabling the IPv4 add-on will attach an IPv4 address to your project's compute i Enabling the IPv4 add-on will not cause any downtime since existing connections will not be dropped. New inbound connections (both direct connections on ports 5432 and 6543, as well as PostgREST requests) will use the newly allocated IPv4 address. -## Will the project instance will be restarted? +## Will the project instance be restarted? No, the IPv4 add-on will not require a restart of the instance. diff --git a/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx b/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx index a3ff143175482..fa700d13e968e 100644 --- a/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx +++ b/apps/docs/content/troubleshooting/realtime-project-suspended-for-exceeding-quotas.mdx @@ -36,6 +36,7 @@ In most cases, quota overages are accidental rather than intentional: ## What to do if your project is suspended 1. **Open a support ticket**: [Contact support](/dashboard/support/new) and include: + - Your project reference ID - A description of your Realtime use case (what features use Broadcast, Presence, or Postgres Changes) - An estimate of your expected concurrent connections and message throughput diff --git a/apps/docs/content/troubleshooting/unused-external-import-warning-vite-rollup.mdx b/apps/docs/content/troubleshooting/unused-external-import-warning-vite-rollup.mdx new file mode 100644 index 0000000000000..e773c531d743c --- /dev/null +++ b/apps/docs/content/troubleshooting/unused-external-import-warning-vite-rollup.mdx @@ -0,0 +1,68 @@ +--- +title = "UNUSED_EXTERNAL_IMPORT build warning with Vite, Rollup, or Nuxt" +topics = [ "platform" ] +keywords = [ "UNUSED_EXTERNAL_IMPORT", "vite", "rollup", "nuxt", "build warning", "false positive", "bundler", "supabase-js" ] +--- + +When bundling an application that uses `@supabase/supabase-js`, you may see warnings like: + +``` +"PostgrestError" is imported from external module "@supabase/postgrest-js" but never used in "...supabase-js/dist/index.mjs". +"FunctionRegion", "FunctionsError", "FunctionsFetchError", "FunctionsHttpError" and "FunctionsRelayError" are imported from external module "@supabase/functions-js" but never used in "...". +``` + +**This is a false positive — your bundle is correct and no code is missing.** + +## Why this happens + +`@supabase/supabase-js` re-exports error types like `PostgrestError` and `FunctionsError` so you can import them directly from `@supabase/supabase-js`. The build tool merges all imports from the same package into a single statement in the output: + +```js +// dist/index.mjs (simplified) +import { PostgrestClient, PostgrestError } from '@supabase/postgrest-js' +// ^ used internally ^ re-exported for you +``` + +Vite/Rollup checks which names from that import are referenced _in the code body_ and flags `PostgrestError` as unused, because it only appears in an `export` statement — not called or assigned. The export itself is the real usage, but this check doesn't account for re-exports. Tree-shaking and bundle size are unaffected. + +## Suppress the warning + +### Vite / Rollup (`vite.config.js` or `rollup.config.js`) + +```js +export default { + build: { + rollupOptions: { + onwarn(warning, warn) { + if (warning.code === 'UNUSED_EXTERNAL_IMPORT' && warning.exporter?.includes('@supabase/')) + return + warn(warning) + }, + }, + }, +} +``` + +### Nuxt (`nuxt.config.ts`) + + + +This issue has been resolved in `@nuxtjs/supabase` version 2.0.4. If you are on that version or later, you do not need to apply this workaround. + + + +```ts +export default defineNuxtConfig({ + vite: { + build: { + rollupOptions: { + onwarn(warning, warn) { + if (warning.code === 'UNUSED_EXTERNAL_IMPORT' && warning.exporter?.includes('@supabase/')) + return + warn(warning) + }, + }, + }, + }, +}) +``` diff --git a/apps/docs/docs/ref/dart/v0/upgrade-guide.mdx b/apps/docs/docs/ref/dart/v0/upgrade-guide.mdx index b2eec12eab81d..94b1664a7b723 100644 --- a/apps/docs/docs/ref/dart/v0/upgrade-guide.mdx +++ b/apps/docs/docs/ref/dart/v0/upgrade-guide.mdx @@ -353,7 +353,7 @@ await supabase.auth.signInWithOtp( phone: phone, ); -// After receiving a SMS with a OTP. +// After receiving an SMS with an OTP. await supabase.auth.verifyOTP( type: OtpType.sms, token: token, diff --git a/apps/docs/docs/ref/javascript/v1/upgrade-guide.mdx b/apps/docs/docs/ref/javascript/v1/upgrade-guide.mdx index 7eafa3254d01b..fc529451c7257 100644 --- a/apps/docs/docs/ref/javascript/v1/upgrade-guide.mdx +++ b/apps/docs/docs/ref/javascript/v1/upgrade-guide.mdx @@ -250,7 +250,7 @@ const { data, error } = await supabase .auth .signInWithOtp({ phone }) -// After receiving a SMS with a OTP. +// After receiving an SMS with an OTP. const { data, error } = await supabase .auth .verifyOtp({ phone, token }) diff --git a/apps/docs/public/humans.txt b/apps/docs/public/humans.txt index e8f3da37b7e24..9855f6d312cf4 100644 --- a/apps/docs/public/humans.txt +++ b/apps/docs/public/humans.txt @@ -27,6 +27,7 @@ Artur Zakirov Artyom Borissov Barco Fourie Beng Eu +Benjamin Coenen Bo Lu Bobbie Soedirgo Brendan Stephens diff --git a/apps/docs/public/img/guides/platform/read-replicas/read-replicas-flow.svg b/apps/docs/public/img/guides/platform/read-replicas/read-replicas-flow.svg new file mode 100644 index 0000000000000..a49c06eb10382 --- /dev/null +++ b/apps/docs/public/img/guides/platform/read-replicas/read-replicas-flow.svg @@ -0,0 +1 @@ +

No

Yes

No

Yes

No

Yes

Yes

No

Yes

No

Database slowing down

CPU above 70% sustained?

Monitor, do not scale yet

Queries optimized? Indexes in place?

Run EXPLAIN ANALYZE
Add missing indexes
Optimize first

Workload 80%+ reads?

Upgrade compute
Replicas will not help writes

Already at 16XL?

Read Replicas
Only horizontal option left

Need workload isolation
or geo-distribution?

Read Replicas

Either works
Compute is simpler
Replicas scale further

\ No newline at end of file diff --git a/apps/docs/spec/Makefile b/apps/docs/spec/Makefile index bbd601349cbf1..c4d831bc6db11 100644 --- a/apps/docs/spec/Makefile +++ b/apps/docs/spec/Makefile @@ -53,16 +53,16 @@ download.analytics.v0: transform: dereference.api.v1 dereference.auth.v1 dereference.storage.v0 dereference.tsdoc.v2 combine.tsdoc.v2 dereference.api.v1: - npx @redocly/cli bundle --dereferenced -o $(REPO_DIR)/transforms/api_v1_openapi_deparsed.json $(REPO_DIR)/api_v1_openapi.json + cd $(GENERATOR_DIR) && pnpm exec redocly bundle --dereferenced -o $(REPO_DIR)/transforms/api_v1_openapi_deparsed.json $(REPO_DIR)/api_v1_openapi.json dereference.auth.v1: - npx @redocly/cli bundle --dereferenced -o $(REPO_DIR)/transforms/auth_v1_openapi_deparsed.json $(REPO_DIR)/auth_v1_openapi.json + cd $(GENERATOR_DIR) && pnpm exec redocly bundle --dereferenced -o $(REPO_DIR)/transforms/auth_v1_openapi_deparsed.json $(REPO_DIR)/auth_v1_openapi.json dereference.storage.v0: - npx @redocly/cli bundle --dereferenced -o $(REPO_DIR)/transforms/storage_v0_openapi_deparsed.json $(REPO_DIR)/storage_v0_openapi.json + cd $(GENERATOR_DIR) && pnpm exec redocly bundle --dereferenced -o $(REPO_DIR)/transforms/storage_v0_openapi_deparsed.json $(REPO_DIR)/storage_v0_openapi.json dereference.analytics.v0: - npx @redocly/cli bundle --dereferenced -o $(REPO_DIR)/transforms/analytics_v0_openapi_deparsed.json $(REPO_DIR)/analytics_v0_openapi.json + cd $(GENERATOR_DIR) && pnpm exec redocly bundle --dereferenced -o $(REPO_DIR)/transforms/analytics_v0_openapi_deparsed.json $(REPO_DIR)/analytics_v0_openapi.json # No longer updated # dereference.tsdoc.v1: @@ -74,12 +74,12 @@ dereference.analytics.v0: # cd $(GENERATOR_DIR) && npm run tsdoc:dereference:supabase:v1 dereference.tsdoc.v2: - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:functions:v2 - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:gotrue:v2 - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:postgrest:v2 - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:realtime:v2 - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:storage:v2 - cd $(GENERATOR_DIR) && npm run tsdoc:dereference:supabase:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:functions:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:gotrue:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:postgrest:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:realtime:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:storage:v2 + cd $(GENERATOR_DIR) && pnpm run tsdoc:dereference:supabase:v2 # No longer updated # combine.tsdoc.v1: @@ -125,7 +125,7 @@ generate.sections.api.v1: ############################################################################### validate.analytics.v0: - npx @redocly/cli lint --extends=minimal $(REPO_DIR)/analytics_v0_openapi.json + cd $(GENERATOR_DIR) && pnpm exec redocly lint --extends=minimal $(REPO_DIR)/analytics_v0_openapi.json ############################################################################### # Format everything - easier for git to track changes. diff --git a/apps/studio/components/interfaces/Auth/Users/Users.utils.tsx b/apps/studio/components/interfaces/Auth/Users/Users.utils.tsx index 22955e5ab15fd..da1ab91990a1b 100644 --- a/apps/studio/components/interfaces/Auth/Users/Users.utils.tsx +++ b/apps/studio/components/interfaces/Auth/Users/Users.utils.tsx @@ -1,9 +1,8 @@ +import { User } from 'data/auth/users-infinite-query' import dayjs from 'dayjs' +import { BASE_PATH } from 'lib/constants' import { Copy, Trash, UserIcon } from 'lucide-react' import { Column, useRowSelection } from 'react-data-grid' - -import { User } from 'data/auth/users-infinite-query' -import { BASE_PATH } from 'lib/constants' import { Checkbox_Shadcn_, cn, @@ -14,6 +13,7 @@ import { ContextMenuTrigger_Shadcn_, copyToClipboard, } from 'ui' + import { PROVIDERS_SCHEMAS } from '../AuthProvidersFormValidation' import { ColumnConfiguration, UsersTableColumn } from './Users.constants' import { HeaderCell } from './UsersGridComponents' @@ -363,6 +363,7 @@ export const formatUserColumns = ({ const provider = row.providers[idx] return (
{ @@ -321,7 +321,7 @@ curl --request POST 'http://localhost:54321/functions/v1/hello-world' \\ Explore our templates {templates.map((template) => ( - + } diff --git a/apps/studio/components/interfaces/Integrations/Queues/QueuesTab.tsx b/apps/studio/components/interfaces/Integrations/Queues/QueuesTab.tsx index d64369c0494e4..ec41650a71843 100644 --- a/apps/studio/components/interfaces/Integrations/Queues/QueuesTab.tsx +++ b/apps/studio/components/interfaces/Integrations/Queues/QueuesTab.tsx @@ -1,16 +1,16 @@ +import { useParams } from 'common' +import AlertError from 'components/ui/AlertError' +import { useQueuesQuery } from 'data/database-queues/database-queues-query' +import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject' import { RefreshCw, Search, X } from 'lucide-react' import { useRouter } from 'next/router' import { parseAsBoolean, parseAsString, useQueryState } from 'nuqs' import { useMemo, useState } from 'react' import DataGrid, { Row } from 'react-data-grid' - -import { useParams } from 'common' -import AlertError from 'components/ui/AlertError' -import { useQueuesQuery } from 'data/database-queues/database-queues-query' -import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject' import { Button, cn, LoadingLine } from 'ui' import { Input } from 'ui-patterns/DataInputs/Input' import { GenericSkeletonLoader } from 'ui-patterns/ShimmeringLoader' + import { CreateQueueSheet } from './CreateQueueSheet' import { formatQueueColumns, prepareQueuesForDataGrid } from './Queues.utils' @@ -72,6 +72,7 @@ export const QueuesTab = () => { actions={[ search && (
)} + {type === 'otlp' && ( + <> +
+ + ( + + + + + + + + + Protocol + {OTLP_PROTOCOLS.map((proto) => ( + + {proto.label} + + ))} + + + + + + )} + /> +
+ + ( + +
+ + + + + Gzip Compression + + + Enable gzip compression for log data sent to the OTLP endpoint. + +
+
+ )} + /> + + )} {type === 'last9' && (
{/* This form needs to be outside the */} - {(type === 'webhook' || type === 'loki') && ( + {(type === 'webhook' || type === 'loki' || type === 'otlp') && ( <>

Custom Headers

-

- {getHeadersSectionDescription()} -

+

{getHeadersDescription(type)}

{hasHeaders && diff --git a/apps/studio/components/interfaces/LogDrains/LogDrains.constants.tsx b/apps/studio/components/interfaces/LogDrains/LogDrains.constants.tsx index 10c361b915a83..c75c7721391cd 100644 --- a/apps/studio/components/interfaces/LogDrains/LogDrains.constants.tsx +++ b/apps/studio/components/interfaces/LogDrains/LogDrains.constants.tsx @@ -1,5 +1,5 @@ import { components } from 'api-types' -import { Axiom, Datadog, Grafana, Last9, Sentry } from 'icons' +import { Axiom, Datadog, Grafana, Last9, Otlp, Sentry } from 'icons' import { BracesIcon, Cloud } from 'lucide-react' const iconProps = { @@ -17,6 +17,12 @@ export const LOG_DRAIN_TYPES = [ description: 'Forward logs as a POST request to a custom HTTP endpoint', icon: , }, + { + value: 'otlp', + name: 'OpenTelemetry Protocol (OTLP)', + description: 'Send logs to any OpenTelemetry Protocol (OTLP) compatible endpoint', + icon: , + }, { value: 'datadog', name: 'Datadog', @@ -110,3 +116,12 @@ export type LogDrainDatadogConfig = { export type LogDrainWebhookConfig = { url: string } + +export const OTLP_PROTOCOLS = [{ label: 'HTTP/Protobuf', value: 'http/protobuf' }] as const + +export type LogDrainOtlpConfig = { + endpoint: string + protocol?: string + gzip?: boolean + headers?: Record +} diff --git a/apps/studio/components/interfaces/LogDrains/LogDrains.tsx b/apps/studio/components/interfaces/LogDrains/LogDrains.tsx index f9eb7436d216a..af49b253c023c 100644 --- a/apps/studio/components/interfaces/LogDrains/LogDrains.tsx +++ b/apps/studio/components/interfaces/LogDrains/LogDrains.tsx @@ -58,6 +58,7 @@ export function LogDrains({ const sentryEnabled = useFlag('SentryLogDrain') const s3Enabled = useFlag('S3logdrain') const axiomEnabled = useFlag('axiomLogDrain') + const otlpEnabled = useFlag('otlpLogDrain') const last9Enabled = useFlag('Last9LogDrain') const hasLogDrains = !!logDrains?.length @@ -92,11 +93,12 @@ export function LogDrains({ if (!isLoading && !hasLogDrains) { return ( <> -
+
{LOG_DRAIN_TYPES.filter((t) => { if (t.value === 'sentry') return sentryEnabled if (t.value === 's3') return s3Enabled if (t.value === 'axiom') return axiomEnabled + if (t.value === 'otlp') return otlpEnabled if (t.value === 'last9') return last9Enabled return true }).map((src) => ( diff --git a/apps/studio/components/interfaces/LogDrains/LogDrains.utils.test.ts b/apps/studio/components/interfaces/LogDrains/LogDrains.utils.test.ts new file mode 100644 index 0000000000000..3c37c43ad9bdb --- /dev/null +++ b/apps/studio/components/interfaces/LogDrains/LogDrains.utils.test.ts @@ -0,0 +1,277 @@ +import { describe, expect, it } from 'vitest' + +import { + getHeadersSectionDescription, + HEADER_VALIDATION_ERRORS, + otlpConfigSchema, + validateNewHeader, +} from './LogDrains.utils' + +describe('getHeadersSectionDescription', () => { + it('returns webhook description for webhook type', () => { + const result = getHeadersSectionDescription('webhook') + expect(result).toBe('Set custom headers when draining logs to the Endpoint URL') + }) + + it('returns loki description for loki type', () => { + const result = getHeadersSectionDescription('loki') + expect(result).toBe('Set custom headers when draining logs to the Loki HTTP(S) endpoint') + }) + + it('returns otlp description for otlp type', () => { + const result = getHeadersSectionDescription('otlp') + expect(result).toBe( + 'Set custom headers for OTLP authentication (e.g., Authorization, X-API-Key)' + ) + }) + + it('returns empty string for unsupported types', () => { + expect(getHeadersSectionDescription('datadog')).toBe('') + expect(getHeadersSectionDescription('s3')).toBe('') + expect(getHeadersSectionDescription('sentry')).toBe('') + }) +}) + +describe('validateNewHeader', () => { + describe('valid cases', () => { + it('accepts valid header with empty existing headers', () => { + const result = validateNewHeader({}, { name: 'Authorization', value: 'Bearer token' }) + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + }) + + it('accepts valid header with existing headers', () => { + const existingHeaders = { + 'Content-Type': 'application/json', + 'X-Custom': 'value', + } + const result = validateNewHeader(existingHeaders, { + name: 'Authorization', + value: 'Bearer token', + }) + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + }) + }) + + describe('validation errors', () => { + it('rejects when 20 headers already exist', () => { + const existingHeaders = Object.fromEntries( + Array.from({ length: 20 }, (_, i) => [`Header-${i}`, `value-${i}`]) + ) + const result = validateNewHeader(existingHeaders, { name: 'New-Header', value: 'value' }) + expect(result.valid).toBe(false) + expect(result.error).toBe(HEADER_VALIDATION_ERRORS.MAX_LIMIT) + }) + + it('rejects duplicate header names', () => { + const existingHeaders = { + 'Content-Type': 'application/json', + Authorization: 'Bearer old-token', + } + const result = validateNewHeader(existingHeaders, { + name: 'Authorization', + value: 'Bearer new-token', + }) + expect(result.valid).toBe(false) + expect(result.error).toBe(HEADER_VALIDATION_ERRORS.DUPLICATE) + }) + + it('rejects header with empty name', () => { + const result = validateNewHeader({}, { name: '', value: 'some-value' }) + expect(result.valid).toBe(false) + expect(result.error).toBe(HEADER_VALIDATION_ERRORS.REQUIRED) + }) + + it('rejects header with empty value', () => { + const result = validateNewHeader({}, { name: 'Some-Header', value: '' }) + expect(result.valid).toBe(false) + expect(result.error).toBe(HEADER_VALIDATION_ERRORS.REQUIRED) + }) + + it('rejects header with both empty name and value', () => { + const result = validateNewHeader({}, { name: '', value: '' }) + expect(result.valid).toBe(false) + expect(result.error).toBe(HEADER_VALIDATION_ERRORS.REQUIRED) + }) + }) + + describe('edge cases', () => { + it('allows exactly 19 existing headers', () => { + const existingHeaders = Object.fromEntries( + Array.from({ length: 19 }, (_, i) => [`Header-${i}`, `value-${i}`]) + ) + const result = validateNewHeader(existingHeaders, { name: 'New-Header', value: 'value' }) + expect(result.valid).toBe(true) + }) + + it('is case-sensitive for duplicate checking', () => { + const existingHeaders = { authorization: 'bearer token' } + const result = validateNewHeader(existingHeaders, { + name: 'Authorization', + value: 'Bearer token', + }) + expect(result.valid).toBe(true) + }) + }) +}) + +describe('otlpConfigSchema', () => { + describe('valid OTLP configurations', () => { + it('accepts valid HTTPS endpoint with all fields', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com:4318/v1/logs', + protocol: 'http/protobuf', + gzip: true, + headers: { Authorization: 'Bearer token' }, + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + if (result.success) { + expect(result.data).toEqual(config) + } + }) + + it('accepts HTTP endpoint (for testing environments)', () => { + const config = { + type: 'otlp' as const, + endpoint: 'http://localhost:4318/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + + it('applies default values for optional fields', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + if (result.success) { + expect(result.data.protocol).toBe('http/protobuf') + expect(result.data.gzip).toBe(true) + } + }) + + it('accepts empty headers object', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com/v1/logs', + headers: {}, + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + + it('accepts multiple custom headers', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com/v1/logs', + headers: { + Authorization: 'Bearer token', + 'X-API-Key': 'secret-key', + 'X-Custom-Header': 'custom-value', + }, + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + }) + + describe('invalid OTLP configurations', () => { + it('rejects empty endpoint', () => { + const config = { + type: 'otlp' as const, + endpoint: '', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(false) + if (!result.success) { + expect(result.error.issues[0].message).toContain('OTLP endpoint is required') + } + }) + + it('rejects endpoint without protocol', () => { + const config = { + type: 'otlp' as const, + endpoint: 'otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(false) + if (!result.success) { + expect(result.error.issues[0].message).toContain('must start with http:// or https://') + } + }) + + it('rejects endpoint with invalid protocol', () => { + const config = { + type: 'otlp' as const, + endpoint: 'ftp://otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(false) + }) + + it('rejects endpoint with ws:// protocol', () => { + const config = { + type: 'otlp' as const, + endpoint: 'ws://otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(false) + }) + + it('rejects wrong type field', () => { + const config = { + type: 'webhook' as const, + endpoint: 'https://otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(false) + }) + }) + + describe('edge cases', () => { + it('accepts endpoint with port number', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com:4318/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + + it('accepts endpoint with query parameters', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com/v1/logs?tenant=123', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + + it('accepts endpoint with authentication in URL', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://user:pass@otlp.example.com/v1/logs', + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + }) + + it('allows gzip to be explicitly false', () => { + const config = { + type: 'otlp' as const, + endpoint: 'https://otlp.example.com/v1/logs', + gzip: false, + } + const result = otlpConfigSchema.safeParse(config) + expect(result.success).toBe(true) + if (result.success) { + expect(result.data.gzip).toBe(false) + } + }) + }) +}) diff --git a/apps/studio/components/interfaces/LogDrains/LogDrains.utils.ts b/apps/studio/components/interfaces/LogDrains/LogDrains.utils.ts new file mode 100644 index 0000000000000..5195513ce7564 --- /dev/null +++ b/apps/studio/components/interfaces/LogDrains/LogDrains.utils.ts @@ -0,0 +1,77 @@ +/** + * Utility functions for log drain management + * Extracted for testability + */ + +import { z } from 'zod' + +import { LogDrainType } from './LogDrains.constants' + +/** + * Get the description text for the custom headers section based on log drain type + */ +export function getHeadersSectionDescription(type: LogDrainType): string { + if (type === 'webhook') { + return 'Set custom headers when draining logs to the Endpoint URL' + } + if (type === 'loki') { + return 'Set custom headers when draining logs to the Loki HTTP(S) endpoint' + } + if (type === 'otlp') { + return 'Set custom headers for OTLP authentication (e.g., Authorization, X-API-Key)' + } + return '' +} + +/** + * Validation errors for header management + */ +export const HEADER_VALIDATION_ERRORS = { + MAX_LIMIT: 'You can only have 20 custom headers', + DUPLICATE: 'Header name already exists', + REQUIRED: 'Header name and value are required', +} as const + +/** + * Validates if a new header can be added to the existing headers + */ +export function validateNewHeader( + existingHeaders: Record, + newHeader: { name: string; value: string } +): { valid: boolean; error?: string } { + const headerKeys = Object.keys(existingHeaders) + + if (headerKeys.length >= 20) { + return { valid: false, error: HEADER_VALIDATION_ERRORS.MAX_LIMIT } + } + + if (headerKeys.includes(newHeader.name)) { + return { valid: false, error: HEADER_VALIDATION_ERRORS.DUPLICATE } + } + + if (!newHeader.name || !newHeader.value) { + return { valid: false, error: HEADER_VALIDATION_ERRORS.REQUIRED } + } + + return { valid: true } +} + +/** + * Zod schema for OTLP log drain configuration + * Extracted for testing purposes + */ +export const otlpConfigSchema = z.object({ + type: z.literal('otlp'), + endpoint: z + .string() + .min(1, { message: 'OTLP endpoint is required' }) + .refine( + (url) => url.startsWith('http://') || url.startsWith('https://'), + 'OTLP endpoint must start with http:// or https://' + ), + protocol: z.string().optional().default('http/protobuf'), + gzip: z.boolean().optional().default(true), + headers: z.record(z.string(), z.string()).optional(), +}) + +export type OtlpConfig = z.infer diff --git a/apps/studio/components/interfaces/QueryPerformance/components/FilterInput.tsx b/apps/studio/components/interfaces/QueryPerformance/components/FilterInput.tsx index 4ea5ef34c4045..3e3b607987620 100644 --- a/apps/studio/components/interfaces/QueryPerformance/components/FilterInput.tsx +++ b/apps/studio/components/interfaces/QueryPerformance/components/FilterInput.tsx @@ -1,5 +1,4 @@ import { Search, X } from 'lucide-react' - import { Button } from 'ui' import { Input } from 'ui-patterns/DataInputs/Input' @@ -25,6 +24,7 @@ export const FilterInput = ({ value, onChange, placeholder, className }: FilterI actions={[ value && (
diff --git a/apps/studio/components/layouts/ProjectLayout/index.tsx b/apps/studio/components/layouts/ProjectLayout/index.tsx index ebb3bc5e1cbdf..660c784aa4193 100644 --- a/apps/studio/components/layouts/ProjectLayout/index.tsx +++ b/apps/studio/components/layouts/ProjectLayout/index.tsx @@ -143,7 +143,7 @@ export const ProjectLayout = forwardRef {snippets.map((snippet) => ( 0 ? messageParts.map((part: NonNullable, idx) => { const isLastPart = idx === messageParts.length - 1 - return + return }) : content && ( diff --git a/apps/studio/components/ui/DataTable/DataTableFilters/DataTableFilterCommand.tsx b/apps/studio/components/ui/DataTable/DataTableFilters/DataTableFilterCommand.tsx index 7f4edbb1ebab1..b76d1926362c7 100644 --- a/apps/studio/components/ui/DataTable/DataTableFilters/DataTableFilterCommand.tsx +++ b/apps/studio/components/ui/DataTable/DataTableFilters/DataTableFilterCommand.tsx @@ -382,11 +382,13 @@ function CommandItemSuggestions({ field }: { field: DataTableFilterField< case 'checkbox': { return ( - {getFacetedUniqueValues - ? Array.from(getFacetedUniqueValues(table, value)?.keys() || []) - .map((value) => `[${value}]`) - .join(' ') - : field.options?.map(({ value }) => `[${value}]`).join(' ')} + {field.options && field.options.length > 0 + ? field.options.map(({ value }) => `[${value}]`).join(' ') + : getFacetedUniqueValues + ? Array.from(getFacetedUniqueValues(table, value)?.keys() || []) + .map((value) => `[${value}]`) + .join(' ') + : null} ) } diff --git a/apps/studio/components/ui/DataTable/FilterSideBar.tsx b/apps/studio/components/ui/DataTable/FilterSideBar.tsx index cceb6943e85b7..d3adfbc5479fc 100644 --- a/apps/studio/components/ui/DataTable/FilterSideBar.tsx +++ b/apps/studio/components/ui/DataTable/FilterSideBar.tsx @@ -1,16 +1,16 @@ -import { useRouter } from 'next/router' - import { useFlag, useParams } from 'common' import { useUnifiedLogsPreview } from 'components/interfaces/App/FeaturePreview/FeaturePreviewContext' +import { LOG_DRAIN_TYPES } from 'components/interfaces/LogDrains/LogDrains.constants' +import Link from 'next/link' +import { useRouter } from 'next/router' +import React from 'react' import { Button, cn, ResizablePanel } from 'ui' + import { FeaturePreviewSidebarPanel } from '../FeaturePreviewSidebarPanel' import { DateRangeDisabled } from './DataTable.types' import { DataTableFilterControls } from './DataTableFilters/DataTableFilterControls' import { DataTableResetButton } from './DataTableResetButton' import { useDataTable } from './providers/DataTableProvider' -import Link from 'next/link' -import { LOG_DRAIN_TYPES } from 'components/interfaces/LogDrains/LogDrains.constants' -import React from 'react' interface FilterSideBarProps { dateRangeDisabled?: DateRangeDisabled @@ -69,9 +69,9 @@ export function FilterSideBar({ dateRangeDisabled }: FilterSideBarProps) { description="Send logs to your preferred observability or storage platform." illustration={
- {LOG_DRAIN_TYPES.map((type) => - React.cloneElement(type.icon, { height: 20, width: 20 }) - )} + {LOG_DRAIN_TYPES.filter((t) => + ['datadog', 'sentry', 'webhook', 'loki'].includes(t.value) + ).map((type) => React.cloneElement(type.icon, { height: 20, width: 20 }))}
} actions={ diff --git a/apps/studio/csp.js b/apps/studio/csp.js index c286aa552230b..ff2f9f0a1dbf2 100644 --- a/apps/studio/csp.js +++ b/apps/studio/csp.js @@ -5,7 +5,7 @@ const SUPABASE_URL = process.env.SUPABASE_URL ? new URL(process.env.SUPABASE_URL const GOTRUE_URL = process.env.NEXT_PUBLIC_GOTRUE_URL ? new URL(process.env.NEXT_PUBLIC_GOTRUE_URL).origin : '' -const SUPABASE_PROJECTS_URL = 'https://*.supabase.co' +const SUPABASE_PROJECTS_URL = 'https://*.supabase.co https://*.storage.supabase.co' const SUPABASE_PROJECTS_URL_WS = 'wss://*.supabase.co' // construct the URL for the Websocket Local URLs @@ -37,7 +37,7 @@ const NIMBUS_STAGING_PROJECTS_URL_WS = 'wss://*.nmb-proj.com' const NIMBUS_PROD_PROJECTS_URL = process.env.NIMBUS_PROD_PROJECTS_URL || '' const NIMBUS_PROD_PROJECTS_URL_WS = process.env.NIMBUS_PROD_PROJECTS_URL_WS || '' -const SUPABASE_STAGING_PROJECTS_URL = 'https://*.supabase.red' +const SUPABASE_STAGING_PROJECTS_URL = 'https://*.supabase.red https://*.storage.supabase.red' const SUPABASE_STAGING_PROJECTS_URL_WS = 'wss://*.supabase.red' const SUPABASE_COM_URL = 'https://supabase.com' const CLOUDFLARE_CDN_URL = 'https://cdnjs.cloudflare.com' diff --git a/apps/studio/data/config/project-endpoint-query.ts b/apps/studio/data/config/project-endpoint-query.ts index 3407df5995a39..4f4445e52138f 100644 --- a/apps/studio/data/config/project-endpoint-query.ts +++ b/apps/studio/data/config/project-endpoint-query.ts @@ -1,4 +1,5 @@ import { IS_PLATFORM } from 'common' + import { ProjectSettingsVariables, useProjectSettingsV2Query } from './project-settings-v2-query' export const useProjectEndpointQuery = ({ projectRef }: ProjectSettingsVariables) => { @@ -9,8 +10,11 @@ export const useProjectEndpointQuery = ({ projectRef }: ProjectSettingsVariables const protocol = data?.app_config?.protocol ?? 'https' const endpoint = data?.app_config?.endpoint const clientEndpoint = `${IS_PLATFORM ? 'https' : protocol}://${endpoint}` + const storageEndpoint = data?.app_config?.storage_endpoint + ? `${IS_PLATFORM ? 'https' : protocol}://${data?.app_config?.storage_endpoint}` + : undefined - return { endpoint: clientEndpoint } + return { endpoint: clientEndpoint, storageEndpoint } }, } ) diff --git a/apps/studio/data/profile/mfa-challenge-and-verify-mutation.ts b/apps/studio/data/profile/mfa-challenge-and-verify-mutation.ts index ad89d2332e93b..8b2d62fcec663 100644 --- a/apps/studio/data/profile/mfa-challenge-and-verify-mutation.ts +++ b/apps/studio/data/profile/mfa-challenge-and-verify-mutation.ts @@ -40,7 +40,7 @@ export const useMfaChallengeAndVerifyMutation = ({ return mfaChallengeAndVerify(params) }, async onSuccess(data, variables, context) { - // when a MFA is added, the aaLevel is bumped up + // when an MFA is added, the aaLevel is bumped up const refreshFactors = variables.refreshFactors ?? true await Promise.all([ diff --git a/apps/studio/eslint.config.cjs b/apps/studio/eslint.config.cjs index 5b3f0df7fd398..5eb0b464ab176 100644 --- a/apps/studio/eslint.config.cjs +++ b/apps/studio/eslint.config.cjs @@ -13,6 +13,7 @@ module.exports = defineConfig([ '@next/next/no-img-element': 'off', 'react/no-unescaped-entities': 'off', 'react/display-name': 'warn', + 'react/jsx-key': 'error', 'barrel-files/avoid-re-export-all': 'error', }, }, diff --git a/apps/studio/instrumentation-client.ts b/apps/studio/instrumentation-client.ts index 6f24298c4d8c6..279c007e7377b 100644 --- a/apps/studio/instrumentation-client.ts +++ b/apps/studio/instrumentation-client.ts @@ -103,7 +103,7 @@ Sentry.init({ debug: false, // Enable performance monitoring - Next.js routes and API calls are automatically instrumented - tracesSampleRate: 0.1, // Capture 10% of transactions for performance monitoring + tracesSampleRate: 0.03, // Capture 3% of transactions for performance monitoring // [Ali] Filter out browser extensions and user scripts (FE-2094) // Using denyUrls to block known third-party script patterns diff --git a/apps/studio/pages/cli/login.tsx b/apps/studio/pages/cli/login.tsx index fa11a76594e17..8fb8b2cfe1467 100644 --- a/apps/studio/pages/cli/login.tsx +++ b/apps/studio/pages/cli/login.tsx @@ -1,13 +1,12 @@ -import Link from 'next/link' -import { useRouter } from 'next/router' -import { useEffect } from 'react' -import { toast } from 'sonner' - import { useIsLoggedIn, useParams } from 'common' import APIAuthorizationLayout from 'components/layouts/APIAuthorizationLayout' import CopyButton from 'components/ui/CopyButton' import { createCliLoginSession } from 'data/cli/login' import { withAuth } from 'hooks/misc/withAuth' +import Link from 'next/link' +import { useRouter } from 'next/router' +import { useEffect } from 'react' +import { toast } from 'sonner' import type { NextPageWithLayout } from 'types' import { InputOTP, InputOTPGroup, InputOTPSlot, LogoLoader } from 'ui' import { Admonition } from 'ui-patterns' @@ -56,7 +55,7 @@ const CliLoginPage: NextPageWithLayout = () => { {Array.from({ length: 8 }, (_, i) => ( - + ))} diff --git a/apps/studio/pages/project/[ref]/functions/index.tsx b/apps/studio/pages/project/[ref]/functions/index.tsx index ff03b3a7f78d6..9737243ae135a 100644 --- a/apps/studio/pages/project/[ref]/functions/index.tsx +++ b/apps/studio/pages/project/[ref]/functions/index.tsx @@ -1,7 +1,3 @@ -import { ExternalLink, Search, X } from 'lucide-react' -import { parseAsString, parseAsStringLiteral, useQueryState } from 'nuqs' -import React, { useMemo } from 'react' - import { useParams } from 'common' import { DeployEdgeFunctionButton } from 'components/interfaces/EdgeFunctions/DeployEdgeFunctionButton' import { @@ -23,6 +19,9 @@ import AlertError from 'components/ui/AlertError' import { DocsButton } from 'components/ui/DocsButton' import { useEdgeFunctionsQuery } from 'data/edge-functions/edge-functions-query' import { DOCS_URL, IS_PLATFORM } from 'lib/constants' +import { ExternalLink, Search, X } from 'lucide-react' +import { parseAsString, parseAsStringLiteral, useQueryState } from 'nuqs' +import React, { useMemo } from 'react' import type { NextPageWithLayout } from 'types' import { Button, Card, Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from 'ui' import { Admonition } from 'ui-patterns' @@ -114,6 +113,7 @@ const EdgeFunctionsPage: NextPageWithLayout = () => { actions={[ search && (