diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index bf624b4..0e87b90 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -277,7 +277,7 @@ jobs: postgres-build: runs-on: ${{ matrix.os }} - name: postgresql-${{ matrix.name }}-${{ matrix.arch }} build + name: postgresql${{ matrix.postgres_version }}-${{ matrix.name }}-${{ matrix.arch }} build timeout-minutes: 15 strategy: fail-fast: false @@ -286,15 +286,35 @@ jobs: - os: ubuntu-22.04 arch: x86_64 name: linux + postgres_version: '17' + - os: ubuntu-22.04 + arch: x86_64 + name: linux + postgres_version: '15' + - os: ubuntu-22.04-arm + arch: arm64 + name: linux + postgres_version: '17' - os: ubuntu-22.04-arm arch: arm64 name: linux + postgres_version: '15' - os: macos-15 arch: arm64 name: macos + postgres_version: '17' + - os: macos-15 + arch: arm64 + name: macos + postgres_version: '15' - os: macos-15 arch: x86_64 name: macos + postgres_version: '17' + - os: macos-15 + arch: x86_64 + name: macos + postgres_version: '15' steps: - uses: actions/checkout@v4.2.2 @@ -307,23 +327,23 @@ jobs: sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg sudo apt-get update - sudo apt-get install -y postgresql-server-dev-17 + sudo apt-get install -y postgresql-server-dev-${{ matrix.postgres_version }} - name: macos install postgresql if: matrix.name == 'macos' - run: brew install postgresql@17 gettext + run: brew install postgresql@${{ matrix.postgres_version }} gettext - name: build and package postgresql extension (linux) if: matrix.name == 'linux' - run: make postgres-package + run: make postgres-package PG_CONFIG=/usr/lib/postgresql/${{ matrix.postgres_version }}/bin/pg_config - name: build and package postgresql extension (macos) if: matrix.name == 'macos' - run: make postgres-package PG_CONFIG=$(brew --prefix postgresql@17)/bin/pg_config PG_EXTRA_CFLAGS="-I$(brew --prefix gettext)/include ${{ matrix.arch == 'x86_64' && '-arch x86_64' || '' }}" + run: make postgres-package PG_CONFIG=$(brew --prefix postgresql@${{ matrix.postgres_version }})/bin/pg_config PG_EXTRA_CFLAGS="-I$(brew --prefix gettext)/include ${{ matrix.arch == 'x86_64' && '-arch x86_64' || '' }}" - uses: actions/upload-artifact@v4.6.2 with: - name: cloudsync-postgresql-${{ matrix.name }}-${{ matrix.arch }} + name: cloudsync-postgresql${{ matrix.postgres_version }}-${{ matrix.name }}-${{ matrix.arch }} path: dist/postgresql/ if-no-files-found: error @@ -546,6 +566,8 @@ jobs: [**Expo**](https://www.npmjs.com/package/@sqliteai/sqlite-sync-expo): `npm install @sqliteai/sqlite-sync-expo` [**Android**](https://central.sonatype.com/artifact/ai.sqlite/sync): `ai.sqlite:sync:${{ steps.tag.outputs.version }}` [**Swift**](https://github.com/sqliteai/sqlite-sync#swift-package): [Installation Guide](https://github.com/sqliteai/sqlite-sync#swift-package) + [**Docker (PostgreSQL)**](https://hub.docker.com/r/sqlitecloud/sqlite-sync-postgres): `docker pull sqlitecloud/sqlite-sync-postgres:17` or `:15` + [**Docker (Supabase)**](https://hub.docker.com/r/sqlitecloud/sqlite-sync-supabase): `docker pull sqlitecloud/sqlite-sync-supabase:17` or `:15` --- @@ -555,3 +577,89 @@ jobs: cloudsync-*-${{ steps.tag.outputs.version }}.* CloudSync-*-${{ steps.tag.outputs.version }}.* make_latest: true + + docker-publish: + runs-on: ubuntu-22.04 + name: docker ${{ matrix.image }} pg${{ matrix.pg_major }} + needs: [release] + if: github.ref == 'refs/heads/main' + + env: + DOCKERHUB_ORG: sqlitecloud + + strategy: + fail-fast: false + matrix: + include: + - image: sqlite-sync-postgres + pg_major: '17' + dockerfile: docker/postgresql/Dockerfile.release + - image: sqlite-sync-postgres + pg_major: '15' + dockerfile: docker/postgresql/Dockerfile.release + - image: sqlite-sync-supabase + pg_major: '17' + dockerfile: docker/postgresql/Dockerfile.supabase.release + supabase_tag: '17.6.1.071' + - image: sqlite-sync-supabase + pg_major: '15' + dockerfile: docker/postgresql/Dockerfile.supabase.release + supabase_tag: '15.8.1.085' + + steps: + + - uses: actions/checkout@v4.2.2 + with: + submodules: true + + - name: get cloudsync version + id: version + run: echo "version=$(make version)" >> $GITHUB_OUTPUT + + - uses: docker/setup-qemu-action@v3 + + - uses: docker/setup-buildx-action@v3 + + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: set docker tags and build args (standalone) + if: matrix.image == 'sqlite-sync-postgres' + id: standalone + run: | + VERSION=${{ steps.version.outputs.version }} + PG=${{ matrix.pg_major }} + IMAGE=${{ env.DOCKERHUB_ORG }}/${{ matrix.image }} + { + echo "tags=${IMAGE}:${PG},${IMAGE}:${PG}-${VERSION}" + echo "build_args<> $GITHUB_OUTPUT + + - name: set docker tags and build args (supabase) + if: matrix.image == 'sqlite-sync-supabase' + id: supabase + run: | + VERSION=${{ steps.version.outputs.version }} + IMAGE=${{ env.DOCKERHUB_ORG }}/${{ matrix.image }} + SUPABASE_TAG=${{ matrix.supabase_tag }} + { + echo "tags=${IMAGE}:${{ matrix.pg_major }},${IMAGE}:${{ matrix.pg_major }}-${VERSION},${IMAGE}:${SUPABASE_TAG}" + echo "build_args<> $GITHUB_OUTPUT + + - uses: docker/build-push-action@v6 + with: + context: . + file: ${{ matrix.dockerfile }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ matrix.image == 'sqlite-sync-postgres' && steps.standalone.outputs.tags || steps.supabase.outputs.tags }} + build-args: ${{ matrix.image == 'sqlite-sync-postgres' && steps.standalone.outputs.build_args || steps.supabase.outputs.build_args }} diff --git a/API.md b/API.md index 140eb6f..71291d9 100644 --- a/API.md +++ b/API.md @@ -57,7 +57,7 @@ When designing your database schema for SQLite Sync, follow these essential requ - **Foreign Key Compatibility**: Be aware of potential conflicts during CRDT merge operations and RLS policy interactions. - **Trigger Compatibility**: Triggers may cause duplicate operations or be called multiple times due to column-by-column processing. -For comprehensive guidelines, see the [Database Schema Recommendations](docs/SCHEMA.md). +For comprehensive guidelines, see the [Database Schema Recommendations](docs/schema.md). The function supports three overloads: - `cloudsync_init(table_name)`: Uses the default 'cls' CRDT algorithm. diff --git a/README.md b/README.md index 4fb0822..4b21ef4 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Built on **CRDT** (Conflict-free Replicated Data Types), it guarantees: ### 1. Install -Download a pre-built binary from the [Releases](https://github.com/sqliteai/sqlite-sync/releases) page, or install a platform package (see [full installation guide](./docs/INSTALLATION.md) for platform-specific code examples): +Download a pre-built binary from the [Releases](https://github.com/sqliteai/sqlite-sync/releases) page, or install a platform package (see [full installation guide](./docs/installation.md) for platform-specific code examples): | Platform | Install | |----------|---------| @@ -165,7 +165,7 @@ SELECT cloudsync_init('notes'); SELECT cloudsync_set_column('notes', 'body', 'algo', 'block'); ``` -Now two agents (or devices) can edit different lines of the same note, and both edits are preserved after sync. See the full guide: **[Block-Level LWW Documentation](./docs/BLOCK-LWW.md)**. +Now two agents (or devices) can edit different lines of the same note, and both edits are preserved after sync. See the full guide: **[Block-Level LWW Documentation](./docs/block-lww.md)**. ## Row-Level Security @@ -174,16 +174,16 @@ With SQLite Cloud's RLS, a single shared cloud database serves all users while e - One database, multiple tenants, no per-user database provisioning. - Each client syncs only authorized rows, minimal bandwidth and storage. -See the full guide: **[Row-Level Security Documentation](./docs/ROW-LEVEL-SECURITY.md)**. +See the full guide: **[Row-Level Security Documentation](./docs/row-level-security.md)**. ## Documentation - **[API Reference](./API.md)**: all functions, parameters, and examples -- **[Installation Guide](./docs/INSTALLATION.md)**: platform-specific setup (Swift, Android, Expo, React Native, Flutter, WASM) -- **[Block-Level LWW Guide](./docs/BLOCK-LWW.md)**: line-level text merge for markdown and documents -- **[Row-Level Security Guide](./docs/ROW-LEVEL-SECURITY.md)**: multi-tenant access control with server-enforced policies -- **[Database Schema Recommendations](./docs/SCHEMA.md)**: primary keys, constraints, foreign keys, triggers -- **[Custom Network Layer](./docs/Network.md)**: replace the built-in libcurl networking +- **[Installation Guide](./docs/installation.md)**: platform-specific setup (Swift, Android, Expo, React Native, Flutter, WASM) +- **[Block-Level LWW Guide](./docs/block-lww.md)**: line-level text merge for markdown and documents +- **[Row-Level Security Guide](./docs/row-level-security.md)**: multi-tenant access control with server-enforced policies +- **[Database Schema Recommendations](./docs/schema.md)**: primary keys, constraints, foreign keys, triggers +- **[Custom Network Layer](./docs/internal/network.md)**: replace the built-in libcurl networking - **[Examples](./examples/)**: complete walkthroughs (todo app, sport tracker, Swift multiplatform) ## SQLite Cloud Setup diff --git a/docker/postgresql/Dockerfile.release b/docker/postgresql/Dockerfile.release new file mode 100644 index 0000000..d3a9aff --- /dev/null +++ b/docker/postgresql/Dockerfile.release @@ -0,0 +1,50 @@ +# PostgreSQL with pre-compiled CloudSync (sqlite-sync) extension +# +# Usage: +# docker build \ +# --build-arg POSTGRES_TAG=17 \ +# --build-arg CLOUDSYNC_VERSION=1.0.1 \ +# -t sqlite-sync-postgres:17 \ +# -f docker/postgresql/Dockerfile.release . +# +# Or pull the pre-built image from Docker Hub: +# docker pull sqlitecloud/sqlite-sync-postgres:17 +# + +ARG POSTGRES_TAG=17 +FROM postgres:${POSTGRES_TAG} + +ARG CLOUDSYNC_VERSION +ARG TARGETARCH + +# Map Docker platform arch to artifact arch +RUN case "${TARGETARCH}" in \ + amd64) ARCH="x86_64" ;; \ + arm64) ARCH="arm64" ;; \ + *) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \ + esac && \ + apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && \ + ASSET="cloudsync-postgresql${PG_MAJOR}-linux-${ARCH}-${CLOUDSYNC_VERSION}.tar.gz" && \ + URL="https://github.com/sqliteai/sqlite-sync/releases/download/${CLOUDSYNC_VERSION}/${ASSET}" && \ + echo "Downloading ${URL}" && \ + curl -fSL "${URL}" -o /tmp/cloudsync.tar.gz && \ + mkdir -p /tmp/cloudsync && \ + tar -xzf /tmp/cloudsync.tar.gz -C /tmp/cloudsync && \ + install -m 755 /tmp/cloudsync/cloudsync.so "$(pg_config --pkglibdir)/" && \ + install -m 644 /tmp/cloudsync/cloudsync--1.0.sql "$(pg_config --sharedir)/extension/" && \ + install -m 644 /tmp/cloudsync/cloudsync.control "$(pg_config --sharedir)/extension/" && \ + rm -rf /tmp/cloudsync /tmp/cloudsync.tar.gz && \ + apt-get purge -y curl && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* + +# Verify installation +RUN ls -la "$(pg_config --pkglibdir)/cloudsync.so" && \ + ls -la "$(pg_config --sharedir)/extension/cloudsync"* && \ + echo "CloudSync extension installed successfully" + +# Copy initialization script (auto-creates the extension on first start) +COPY docker/postgresql/init.sql /docker-entrypoint-initdb.d/ + +EXPOSE 5432 + +LABEL org.sqliteai.cloudsync.description="PostgreSQL with CloudSync CRDT extension" \ + org.opencontainers.image.source="https://github.com/sqliteai/sqlite-sync" diff --git a/docker/postgresql/Dockerfile.supabase.release b/docker/postgresql/Dockerfile.supabase.release new file mode 100644 index 0000000..4514c31 --- /dev/null +++ b/docker/postgresql/Dockerfile.supabase.release @@ -0,0 +1,72 @@ +# Supabase PostgreSQL with pre-compiled CloudSync (sqlite-sync) extension +# +# Usage: +# docker build \ +# --build-arg SUPABASE_POSTGRES_TAG=15.8.1.085 \ +# --build-arg CLOUDSYNC_VERSION=1.0.1 \ +# -f docker/postgresql/Dockerfile.supabase.release \ +# -t my-cloudsync-supabase-postgres . +# +# Or pull the pre-built image from Docker Hub: +# docker pull sqlitecloud/sqlite-sync-supabase:15 +# + +ARG SUPABASE_POSTGRES_TAG=17.6.1.071 +FROM public.ecr.aws/supabase/postgres:${SUPABASE_POSTGRES_TAG} + +ARG CLOUDSYNC_VERSION +ARG TARGETARCH + +ENV CLOUDSYNC_PG_CONFIG=/root/.nix-profile/bin/pg_config + +# Download pre-compiled extension and install into Supabase's Nix layout +RUN case "${TARGETARCH}" in \ + amd64) ARCH="x86_64" ;; \ + arm64) ARCH="arm64" ;; \ + *) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \ + esac && \ + # Derive PG major version from pg_config + PG_MAJOR=$(${CLOUDSYNC_PG_CONFIG} --version | sed 's/[^0-9]*//' | cut -d. -f1) && \ + apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && \ + ASSET="cloudsync-postgresql${PG_MAJOR}-linux-${ARCH}-${CLOUDSYNC_VERSION}.tar.gz" && \ + URL="https://github.com/sqliteai/sqlite-sync/releases/download/${CLOUDSYNC_VERSION}/${ASSET}" && \ + echo "Downloading ${URL}" && \ + curl -fSL "${URL}" -o /tmp/cloudsync.tar.gz && \ + mkdir -p /tmp/cloudsync && \ + tar -xzf /tmp/cloudsync.tar.gz -C /tmp/cloudsync && \ + # Resolve Supabase's Nix library path + PKGLIBDIR="$(${CLOUDSYNC_PG_CONFIG} --pkglibdir)" && \ + NIX_PGLIBDIR="$(grep -E '^export NIX_PGLIBDIR' /usr/bin/postgres | sed -E "s/.*'([^']+)'.*/\1/" || true)" && \ + if [ -n "$NIX_PGLIBDIR" ]; then PKGLIBDIR="$NIX_PGLIBDIR"; fi && \ + SHAREDIR_PGCONFIG="$(${CLOUDSYNC_PG_CONFIG} --sharedir)" && \ + SHAREDIR_STD="/usr/share/postgresql" && \ + install -d "$PKGLIBDIR" "$SHAREDIR_PGCONFIG/extension" && \ + install -m 755 /tmp/cloudsync/cloudsync.so "$PKGLIBDIR/" && \ + install -m 644 /tmp/cloudsync/cloudsync--1.0.sql /tmp/cloudsync/cloudsync.control "$SHAREDIR_PGCONFIG/extension/" && \ + if [ "$SHAREDIR_STD" != "$SHAREDIR_PGCONFIG" ]; then \ + install -d "$SHAREDIR_STD/extension" && \ + install -m 644 /tmp/cloudsync/cloudsync--1.0.sql /tmp/cloudsync/cloudsync.control "$SHAREDIR_STD/extension/"; \ + fi && \ + rm -rf /tmp/cloudsync /tmp/cloudsync.tar.gz && \ + apt-get purge -y curl && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* + +# Verify installation +RUN NIX_PGLIBDIR="$(grep -E '^export NIX_PGLIBDIR' /usr/bin/postgres | sed -E "s/.*'([^']+)'.*/\1/" || true)" && \ + echo "Verifying CloudSync extension installation..." && \ + if [ -n "$NIX_PGLIBDIR" ]; then \ + ls -la "$NIX_PGLIBDIR/cloudsync.so"; \ + else \ + ls -la "$(${CLOUDSYNC_PG_CONFIG} --pkglibdir)/cloudsync.so"; \ + fi && \ + ls -la "$(${CLOUDSYNC_PG_CONFIG} --sharedir)/extension/cloudsync"* && \ + if [ -d "/usr/share/postgresql/extension" ]; then \ + ls -la /usr/share/postgresql/extension/cloudsync*; \ + fi && \ + echo "CloudSync extension installed successfully" + +EXPOSE 5432 + +WORKDIR / + +LABEL org.sqliteai.cloudsync.description="Supabase PostgreSQL with CloudSync CRDT extension" \ + org.opencontainers.image.source="https://github.com/sqliteai/sqlite-sync" diff --git a/docs/ROW-LEVEL-SECURITY.md b/docs/ROW-LEVEL-SECURITY.md index 51989d7..0d81de0 100644 --- a/docs/ROW-LEVEL-SECURITY.md +++ b/docs/ROW-LEVEL-SECURITY.md @@ -44,6 +44,6 @@ CREATE TABLE users ( ); ``` -For more schema guidelines, see [Database Schema Recommendations](./SCHEMA.md). +For more schema guidelines, see [Database Schema Recommendations](./schema.md). For full RLS documentation, see the [SQLite Cloud RLS documentation](https://docs.sqlitecloud.io/docs/rls). diff --git a/docs/postgresql/grafana-dashboard.json b/docs/internal/grafana-dashboard.json similarity index 100% rename from docs/postgresql/grafana-dashboard.json rename to docs/internal/grafana-dashboard.json diff --git a/docs/Network.md b/docs/internal/network.md similarity index 100% rename from docs/Network.md rename to docs/internal/network.md diff --git a/docs/PriKey.md b/docs/internal/pri-key.md similarity index 100% rename from docs/PriKey.md rename to docs/internal/pri-key.md diff --git a/docs/RowID.md b/docs/internal/row-id.md similarity index 100% rename from docs/RowID.md rename to docs/internal/row-id.md diff --git a/docs/postgresql/README.md b/docs/postgresql/README.md index 829d0eb..e457b3a 100644 --- a/docs/postgresql/README.md +++ b/docs/postgresql/README.md @@ -67,7 +67,7 @@ Additional CRDTs can be implemented if needed, though LWW covers most real-world ### Observability -* Metrics dashboard available in [grafana-dashboard.json](grafana-dashboard.json) +* Metrics dashboard available in [grafana-dashboard.json](../internal/grafana-dashboard.json) # Postgres Sync @@ -97,6 +97,6 @@ The PostgreSQL integration is actively evolving. Current limitations include: * **Beta Status**: While extensively tested, the PostgreSQL sync stack should currently be considered **beta software**. Please report any issues; we are committed to resolving them quickly. # Next -* [CLIENT](CLIENT.md) installation and setup -* [SUPABASE](SUPABASE.md) configuration and setup -* [SPORT-TRACKER APP](SPORT_APP_README_SUPABASE.md) demo web app based on SQLite Sync WASM \ No newline at end of file +* [CLIENT](client.md) installation and setup +* [SUPABASE](integrations/supabase.md) configuration and setup +* [SPORT-TRACKER APP](examples/sport-app-supabase.md) demo web app based on SQLite Sync WASM \ No newline at end of file diff --git a/docs/postgresql/SUPABASE_FLYIO.md b/docs/postgresql/SUPABASE_FLYIO.md deleted file mode 100644 index 5fe8eb1..0000000 --- a/docs/postgresql/SUPABASE_FLYIO.md +++ /dev/null @@ -1,89 +0,0 @@ -# Deploying CloudSync to Self-Hosted Supabase on Fly.io - -## Overview - -Build a custom Supabase Postgres image with CloudSync baked in, push it to a container registry, and configure your Fly.io Supabase deployment to use it. - -## Step-by-step - -### 1. Build the custom Supabase Postgres image - -The project includes `docker/postgresql/Dockerfile.supabase` which builds CloudSync into the Supabase Postgres base image. Match the tag to the PG version your Fly.io Supabase uses: - -```bash -# Build with the default Supabase Postgres tag (17.6.1.071) -make postgres-supabase-build - -# Or specify the exact tag your Fly deployment uses: -make postgres-supabase-build SUPABASE_POSTGRES_TAG=17.6.1.071 -``` - -This produces a Docker image tagged as `public.ecr.aws/supabase/postgres:` locally. - -### 2. Tag and push to a container registry - -You need a registry accessible from Fly.io (Docker Hub, GitHub Container Registry, or Fly's own registry): - -```bash -# Tag for your registry -docker tag public.ecr.aws/supabase/postgres:17.6.1.071 \ - registry.fly.io//postgres-cloudsync:17.6.1.071 - -# Push -docker push registry.fly.io//postgres-cloudsync:17.6.1.071 -``` - -Or use Docker Hub / GHCR: - -```bash -docker tag public.ecr.aws/supabase/postgres:17.6.1.071 \ - ghcr.io//supabase-postgres-cloudsync:17.6.1.071 -docker push ghcr.io//supabase-postgres-cloudsync:17.6.1.071 -``` - -### 3. Update your Fly.io Supabase deployment - -In your Fly.io Supabase config (`fly.toml` or however you deployed the DB service), point the Postgres image to your custom image: - -```toml -[build] - image = "ghcr.io//supabase-postgres-cloudsync:17.6.1.071" -``` - -Then redeploy: - -```bash -fly deploy --app -``` - -### 4. Enable the extension - -Connect to your Fly Postgres instance and enable CloudSync: - -```bash -fly postgres connect --app -``` - -```sql -CREATE EXTENSION cloudsync; -SELECT cloudsync_version(); - --- Initialize sync on a table -SELECT cloudsync_init('my_table'); -``` - -### 5. If using supabase-docker (docker-compose) - -If your Fly.io Supabase is based on the [supabase/supabase](https://github.com/supabase/supabase) docker-compose setup, update the `db` service image in `docker-compose.yml`: - -```yaml -services: - db: - image: ghcr.io//supabase-postgres-cloudsync:17.6.1.071 -``` - -## Important notes - -- **Match the Postgres major version** — the Dockerfile defaults to PG 17 (`SUPABASE_POSTGRES_TAG=17.6.1.071`). Check what your Fly deployment runs with `SHOW server_version;`. -- **ARM vs x86** — if your Fly machines are ARM (`fly.toml` with `vm.size` using arm), build the image for `linux/arm64`: `docker buildx build --platform linux/arm64 ...` -- **RLS considerations** — when using Supabase Auth with Row-Level Security, use a JWT `token` (not `apikey`) when calling sync functions. diff --git a/docs/postgresql/deployment/postgres-flyio.md b/docs/postgresql/deployment/postgres-flyio.md new file mode 100644 index 0000000..e14253c --- /dev/null +++ b/docs/postgresql/deployment/postgres-flyio.md @@ -0,0 +1,809 @@ +# Self-Hosting PostgreSQL on Fly.io with CloudSync Extension + +This guide deploys a standalone PostgreSQL instance with CloudSync on Fly.io, plus a minimal JWT auth server for token generation. No Supabase required. + +By the end you will have: + +- A Fly.io VM running PostgreSQL with the CloudSync CRDT extension +- A JWT auth server (Node.js) — choose between: + - **HS256 (shared secret)** — simplest setup, signs tokens with a base64-encoded secret + - **RS256 (JWKS)** — production-ready, signs with a private key and exposes a public JWKS endpoint +- A custom Postgres image published to Docker Hub + +## Prerequisites + +| Tool | Purpose | Install | +|------|---------|---------| +| [Docker Desktop](https://www.docker.com/products/docker-desktop/) | Build the custom Postgres image | `brew install --cask docker` | +| [Fly CLI (`flyctl`)](https://fly.io/docs/flyctl/install/) | Provision and manage Fly.io machines | `brew install flyctl` | +| [Git](https://git-scm.com/) | Clone repositories | `brew install git` | +| [Docker Hub](https://hub.docker.com/) account | Host your custom Postgres image | Free signup | + +You also need a [Fly.io account](https://fly.io/app/sign-up). + +### Fly.io VM requirements + +Since this is just PostgreSQL + a small auth server (not a full Supabase stack), resource requirements are much lower: + +| Resource | Minimum | Recommended | +|----------|---------|-------------| +| RAM | 1 GB | 2 GB | +| CPU | 1 core | 2 cores | +| Disk | 4 GB SSD | 10 GB+ | + +--- + +## Step 1: Initialize git submodules + +```bash +cd /path/to/sqlite-sync-dev +git submodule update --init --recursive +``` + +Without this, the build fails with `fractional_indexing.h: No such file or directory`. + +--- + +## Step 2: Build the custom Postgres image + +From the sqlite-sync-dev repo root: + +```bash +make postgres-docker-build +``` + +This builds `postgres:17` with CloudSync pre-installed using `docker/postgresql/Dockerfile`. + +Verify: + +```bash +docker images | grep sqlite-sync-pg +``` + +--- + +## Step 3: Build for amd64 and push to Docker Hub + +If you're on Apple Silicon, you must cross-build for Fly.io's x86 VMs: + +```bash +docker build --platform linux/amd64 \ + -f docker/postgresql/Dockerfile \ + -t /postgres-cloudsync:17 \ + . + +docker push /postgres-cloudsync:17 +``` + +> On Intel Mac or Linux x86, the default build is already amd64. + +--- + +## Step 4: Provision a Fly.io VM + +### 4a. Log in and create the app + +```bash +fly auth login +fly apps create +``` + +### 4b. Create a persistent volume + +```bash +fly volumes create pg_data --app --region --size 4 +``` + +### 4c. Create a Fly Machine + +```bash +fly machine run ubuntu:24.04 \ + --app \ + --region \ + --vm-size shared-cpu-2x \ + --vm-memory 2048 \ + --volume pg_data:/data \ + --name postgres-vm \ + -- sleep inf +``` + +### 4d. Allocate a public IP + +```bash +fly ips allocate-v4 --shared --app +fly ips allocate-v6 --app +``` + +--- + +## Step 5: Set up Docker on the VM + +### 5a. SSH into the machine + +```bash +fly ssh console --app +``` + +### 5b. Install Docker Engine + +```bash +apt-get update +apt-get install -y ca-certificates curl gnupg + +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg +chmod a+r /etc/apt/keyrings/docker.gpg + +echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ + > /etc/apt/sources.list.d/docker.list + +apt-get update +apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin +``` + +### 5c. Configure Docker storage driver + +```bash +apt-get install -y fuse-overlayfs +mkdir -p /etc/docker +echo '{"storage-driver":"fuse-overlayfs","data-root":"/data/docker"}' > /etc/docker/daemon.json +``` + +### 5d. Start Docker + +```bash +dockerd & +# Wait for "API listen on /var/run/docker.sock" +``` + +--- + +## Step 6: Create the Docker Compose stack + +Create the project directory: + +```bash +mkdir -p /data/cloudsync-postgres +cd /data/cloudsync-postgres +``` + +### 6a. Generate a JWT secret + +```bash +JWT_SECRET=$(openssl rand -base64 32) +echo "JWT_SECRET=$JWT_SECRET" > .env +echo "POSTGRES_PASSWORD=$(openssl rand -base64 16)" >> .env +echo "Your JWT secret: $JWT_SECRET" +echo "Save this secret — you'll need it for CloudSync server configuration." +``` + +### 6b. Create docker-compose.yml + +```bash +cat > docker-compose.yml << 'EOF' +services: + db: + image: /postgres-cloudsync:17 + container_name: cloudsync-postgres + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: postgres + ports: + - "5432:5432" + volumes: + - pg_data:/var/lib/postgresql/data + - ./init.sql:/docker-entrypoint-initdb.d/init.sql:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + + auth: + image: node:22-alpine + container_name: cloudsync-auth + environment: + JWT_SECRET: ${JWT_SECRET} + PORT: 3001 + ports: + - "3001:3001" + volumes: + - ./auth-server:/app + working_dir: /app + command: ["node", "server.js"] + restart: unless-stopped + +volumes: + pg_data: +EOF +``` + +### 6c. Create the Postgres init script + +```bash +cat > init.sql << 'EOF' +CREATE EXTENSION IF NOT EXISTS cloudsync; +EOF +``` + +### 6d. Create the JWT auth server + +```bash +mkdir -p auth-server +``` + +Create the package file: + +```bash +cat > auth-server/package.json << 'EOF' +{ + "name": "cloudsync-auth", + "version": "1.0.0", + "private": true, + "dependencies": { + "jsonwebtoken": "^9.0.0" + } +} +EOF +``` + +Create the auth server: + +```bash +cat > auth-server/server.js << 'AUTHEOF' +const http = require("http"); +const jwt = require("jsonwebtoken"); + +const PORT = process.env.PORT || 3001; +const JWT_SECRET = process.env.JWT_SECRET; + +if (!JWT_SECRET) { + console.error("JWT_SECRET environment variable is required"); + process.exit(1); +} + +function parseBody(req) { + return new Promise((resolve, reject) => { + let data = ""; + req.on("data", (chunk) => (data += chunk)); + req.on("end", () => { + try { resolve(JSON.parse(data)); } + catch { reject(new Error("Invalid JSON")); } + }); + req.on("error", reject); + }); +} + +function respond(res, status, body) { + res.writeHead(status, { "Content-Type": "application/json" }); + res.end(JSON.stringify(body)); +} + +const server = http.createServer(async (req, res) => { + // Health check + if (req.method === "GET" && req.url === "/healthz") { + return respond(res, 200, { status: "ok" }); + } + + // Generate token + // POST /token { "sub": "user-id", "role": "authenticated", "expiresIn": "24h" } + if (req.method === "POST" && req.url === "/token") { + try { + const body = await parseBody(req); + const sub = body.sub || "anonymous"; + const role = body.role || "authenticated"; + const expiresIn = body.expiresIn || "24h"; + const claims = body.claims || {}; + + const token = jwt.sign( + { sub, role, aud: "authenticated", ...claims }, + JWT_SECRET, + { expiresIn, algorithm: "HS256" } + ); + + return respond(res, 200, { token, expiresIn }); + } catch (err) { + return respond(res, 400, { error: err.message }); + } + } + + respond(res, 404, { error: "Not found" }); +}); + +server.listen(PORT, () => { + console.log("Auth server listening on port " + PORT); +}); +AUTHEOF +``` + +Install dependencies: + +```bash +docker run --rm -v $(pwd)/auth-server:/app -w /app node:22-alpine npm install +``` + +### 6e. (Optional) Create the JWKS auth server + +If you need RS256/JWKS-based authentication instead of (or in addition to) the shared secret approach, create a second auth server that generates an RSA key pair on startup and exposes a JWKS endpoint. + +```bash +mkdir -p auth-server-jwks + +cat > auth-server-jwks/package.json << 'EOF' +{ + "name": "cloudsync-auth-jwks", + "version": "1.0.0", + "private": true, + "dependencies": { + "jsonwebtoken": "^9.0.0", + "jose": "^5.0.0" + } +} +EOF + +cat > auth-server-jwks/server.js << 'EOF' +const http = require("http"); +const jwt = require("jsonwebtoken"); +const crypto = require("crypto"); +const { exportJWK } = require("jose"); + +const PORT = process.env.PORT || 3002; +const ISSUER = process.env.ISSUER || "cloudsync-auth-jwks"; +const KID = "cloudsync-key-1"; + +let privateKey, publicKey, jwksResponse; + +async function init() { + const pair = crypto.generateKeyPairSync("rsa", { + modulusLength: 2048, + publicKeyEncoding: { type: "spki", format: "pem" }, + privateKeyEncoding: { type: "pkcs8", format: "pem" }, + }); + privateKey = pair.privateKey; + publicKey = pair.publicKey; + + const publicKeyObject = crypto.createPublicKey(publicKey); + const jwk = await exportJWK(publicKeyObject); + jwk.kid = KID; + jwk.alg = "RS256"; + jwk.use = "sig"; + jwksResponse = JSON.stringify({ keys: [jwk] }); + + console.log("RSA key pair generated (kid: " + KID + ")"); +} + +function parseBody(req) { + return new Promise((resolve, reject) => { + let data = ""; + req.on("data", (chunk) => (data += chunk)); + req.on("end", () => { + try { resolve(JSON.parse(data)); } + catch { reject(new Error("Invalid JSON")); } + }); + req.on("error", reject); + }); +} + +function respond(res, status, body) { + res.writeHead(status, { "Content-Type": "application/json" }); + res.end(typeof body === "string" ? body : JSON.stringify(body)); +} + +const server = http.createServer(async (req, res) => { + if (req.method === "GET" && req.url === "/healthz") { + return respond(res, 200, { status: "ok" }); + } + + // JWKS endpoint — CloudSync server fetches this to verify tokens + if (req.method === "GET" && req.url === "/.well-known/jwks.json") { + res.writeHead(200, { "Content-Type": "application/json" }); + return res.end(jwksResponse); + } + + // POST /token { "sub": "user-id", "role": "authenticated", "expiresIn": "24h" } + if (req.method === "POST" && req.url === "/token") { + try { + const body = await parseBody(req); + const sub = body.sub || "anonymous"; + const role = body.role || "authenticated"; + const expiresIn = body.expiresIn || "24h"; + const claims = body.claims || {}; + + const token = jwt.sign( + { sub, role, aud: "authenticated", iss: ISSUER, ...claims }, + privateKey, + { expiresIn, algorithm: "RS256", keyid: KID } + ); + + return respond(res, 200, { token, expiresIn }); + } catch (err) { + return respond(res, 400, { error: err.message }); + } + } + + respond(res, 404, { error: "Not found" }); +}); + +init().then(() => { + server.listen(PORT, () => { + console.log("JWKS Auth server listening on port " + PORT); + }); +}); +EOF + +docker run --rm -v $(pwd)/auth-server-jwks:/app -w /app node:22-alpine npm install +``` + +Add the JWKS auth service to `docker-compose.yml`: + +```yaml + auth-jwks: + image: node:22-alpine + container_name: cloudsync-auth-jwks + environment: + PORT: 3002 + ISSUER: cloudsync-auth-jwks + ports: + - "3002:3002" + volumes: + - ./auth-server-jwks:/app + working_dir: /app + command: ["node", "server.js"] + restart: unless-stopped +``` + +> **Note:** The JWKS server generates a new RSA key pair each time it starts. For production, persist the key pair to a volume so tokens remain valid across restarts. + +--- + +## Step 7: Start the stack + +```bash +cd /data/cloudsync-postgres +docker compose up -d +``` + +Verify: + +```bash +docker compose ps + +# Test Postgres +docker compose exec db psql -U postgres -c "SELECT cloudsync_version();" + +# Test HS256 auth server +curl http://localhost:3001/healthz + +# Test JWKS auth server (if enabled) +curl http://localhost:3002/healthz +curl http://localhost:3002/.well-known/jwks.json +``` + +--- + +## Step 8: Generate a JWT token + +**HS256 (shared secret):** + +```bash +curl -X POST http://localhost:3001/token \ + -H "Content-Type: application/json" \ + -d '{"sub": "user-1", "role": "authenticated"}' +``` + +**RS256 (JWKS):** + +```bash +curl -X POST http://localhost:3002/token \ + -H "Content-Type: application/json" \ + -d '{"sub": "user-1", "role": "authenticated"}' +``` + +Response (both): + +```json +{"token":"eyJhbG...","expiresIn":"24h"} +``` + +--- + +## Step 9: Register with CloudSync server + +```bash +export CLOUDSYNC_URL="https://your-cloudsync-server.fly.dev" +export ORG_API_KEY="" + +# Get the Postgres password from .env +source /data/cloudsync-postgres/.env + +# Connection string (same Fly org — use .internal network) +export CONNECTION_STRING="postgres://postgres:$POSTGRES_PASSWORD@.internal:5432/postgres" + +# Or via fly proxy from local machine: +# fly proxy 5432:5432 -a +# export CONNECTION_STRING="postgres://postgres:$POSTGRES_PASSWORD@localhost:5432/postgres" +``` + +Register the database: + +```bash +curl --request POST "$CLOUDSYNC_URL/v1/databases" \ + --header "Authorization: Bearer $ORG_API_KEY" \ + --header "Content-Type: application/json" \ + --data '{ + "label": "Fly.io Postgres", + "connectionString": "'"$CONNECTION_STRING"'", + "provider": "postgres", + "projectId": "cloudsync-postgres-flyio", + "databaseName": "postgres" + }' +``` + +Save the returned `managedDatabaseId`: + +```bash +export MANAGED_DATABASE_ID="" +``` + +--- + +## Step 10: Test CloudSync sync + +### 10a. Create and enable a test table + +```bash +# On the Fly VM +docker compose exec db psql -U postgres -c " +CREATE TABLE IF NOT EXISTS todos ( + id TEXT PRIMARY KEY DEFAULT cloudsync_uuid(), + title TEXT NOT NULL DEFAULT '', + done BOOLEAN DEFAULT false +); +SELECT cloudsync_init('todos'); +" + +# Enable sync via CloudSync API +curl --request POST "$CLOUDSYNC_URL/v1/databases/$MANAGED_DATABASE_ID/cloudsync/enable" \ + --header "Authorization: Bearer $ORG_API_KEY" \ + --header "Content-Type: application/json" \ + --data '{"tables":["todos"]}' +``` + +### 10b. Generate a token and sync from SQLite + +```bash +# Get a JWT token from the auth server +TOKEN=$(curl -s -X POST http://localhost:3001/token \ + -H "Content-Type: application/json" \ + -d '{"sub": "user-1"}' | python3 -c "import sys,json; print(json.load(sys.stdin)['token'])") +``` + +In a SQLite client: + +```sql +.load path/to/cloudsync + +CREATE TABLE todos ( + id TEXT PRIMARY KEY DEFAULT (cloudsync_uuid()), + title TEXT NOT NULL DEFAULT '', + done BOOLEAN DEFAULT false +); +SELECT cloudsync_init('todos'); + +SELECT cloudsync_network_init(''); +SELECT cloudsync_network_set_token(''); + +INSERT INTO todos (title) VALUES ('Test from SQLite'); +SELECT cloudsync_network_sync(500, 5); +``` + +Verify on Postgres: + +```bash +docker compose exec db psql -U postgres -c "SELECT * FROM todos;" +``` + +--- + +## Step 11: CloudSync server JWT configuration + +The CloudSync server needs to validate tokens from your auth server. Configuration depends on which auth method you chose. + +### Option A: HS256 (shared secret) + +In the CloudSync dashboard, go to your PostgreSQL project → **Configuration** → **Edit connection settings**: +- Under **JWT secret**, enter your `JWT_SECRET` value from `.env` +- Click **Save** + +Both the auth server and CloudSync must use the same raw secret string (not base64-decoded). + +### Option B: RS256 (JWKS) + +Configure the JWKS auth server and CloudSync to use asymmetric key verification. + +**1. Update docker-compose.yml - JWKS auth server ISSUER:** + +```yaml + auth-jwks: + environment: + ISSUER: http://.internal:3002 +``` + +The issuer is the **base URL** (CloudSync automatically appends `/.well-known/jwks.json`). + +**2. Configure CloudSync to accept this issuer:** + +In the CloudSync dashboard for this PostgreSQL project: +- Go to **Configuration** tab → **Edit connection settings** +- Under **JWT allowed issuers**, enter: + ``` + http://.internal:3002 + ``` + +CloudSync will: +1. Receive JWT tokens with `iss: http://.internal:3002` +2. Validate the issuer matches the allowed list +3. Fetch the public key from `http://.internal:3002/.well-known/jwks.json` +4. Verify the token signature + +This is how production auth systems (Auth0, Supabase, Firebase) work — no shared secrets needed. + +--- + +## Access your services + +| Service | URL | +|---------|-----| +| **PostgreSQL** | `postgres://postgres:@.internal:5432/postgres` | +| **Auth Server (HS256)** | `http://.internal:3001` | +| **Auth Server (JWKS)** | `http://.internal:3002` | +| **JWKS Endpoint** | `http://.internal:3002/.well-known/jwks.json` | + +From your local machine, use `fly proxy`: + +```bash +fly proxy 5432:5432 -a # Postgres +fly proxy 3001:3001 -a # Auth server (HS256) +fly proxy 3002:3002 -a # Auth server (JWKS) +``` + +--- + +## Reference: CloudSync Configuration + +After deployment, use these values to configure CloudSync dashboard: + +### Database Connection + +``` +postgresql://postgres:@.internal:5432/postgres +``` + +Replace: +- ``: from `.env` file +- ``: your Fly.io app name + +### JWT Secret (HS256) + +For simple/development setups using shared secrets: + +```env +JWT_SECRET= +``` + +Enter this in CloudSync dashboard → **Configuration** → **JWT secret** + +### JWT Issuer (RS256 with JWKS) + +For production setups using asymmetric keys: + +``` +http://.internal:3002 +``` + +Enter this in CloudSync dashboard → **Configuration** → **JWT allowed issuers** + +CloudSync will automatically fetch the public key from: +``` +http://.internal:3002/.well-known/jwks.json +``` + +--- + +## Maintenance + +### Startup script (survives VM restarts) + +Fly VM root filesystem resets on stop/start — only `/data` persists. Create a startup script: + +```bash +cat > /data/startup.sh << 'SCRIPT' +#!/bin/bash +set -e + +echo "=== Installing Docker ===" +apt-get update && apt-get install -y ca-certificates curl gnupg fuse-overlayfs + +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg +chmod a+r /etc/apt/keyrings/docker.gpg + +echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ + > /etc/apt/sources.list.d/docker.list + +apt-get update && apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin + +echo "=== Configuring Docker ===" +mkdir -p /etc/docker +echo '{"storage-driver":"fuse-overlayfs","data-root":"/data/docker"}' > /etc/docker/daemon.json + +echo "=== Starting Docker ===" +dockerd > /data/dockerd.log 2>&1 & +until docker info > /dev/null 2>&1; do sleep 1; done +echo "Docker is ready!" + +echo "=== Starting CloudSync Postgres ===" +cd /data/cloudsync-postgres +docker compose up -d + +echo "=== Done! ===" +SCRIPT +chmod +x /data/startup.sh +``` + +After any VM restart: + +```bash +fly ssh console --app +/data/startup.sh +``` + +### Update CloudSync extension + +On your local machine: + +```bash +cd /path/to/sqlite-sync-dev +git pull && git submodule update --init --recursive +docker build --platform linux/amd64 \ + -f docker/postgresql/Dockerfile \ + -t /postgres-cloudsync:17 \ + . +docker push /postgres-cloudsync:17 +``` + +On the Fly VM: + +```bash +cd /data/cloudsync-postgres +docker compose pull db +docker compose up -d db +``` + +### View logs + +```bash +docker compose logs -f # All services +docker compose logs -f db # Postgres only +docker compose logs -f auth # Auth server only +``` + +--- + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| `fractional_indexing.h: No such file or directory` | Run `git submodule update --init --recursive` before building | +| `cloudsync_version()` not found | Init scripts only run on first start. Run `CREATE EXTENSION IF NOT EXISTS cloudsync;` manually | +| Auth server won't start | Check `docker compose logs auth`. Ensure `npm install` was run in `auth-server/` | +| Token verification fails (HS256) | Ensure `JWT_SECRET` matches exactly — CloudSync uses the raw string, not base64-decoded | +| Token verification fails (JWKS) | Ensure CloudSync can reach the JWKS endpoint and `JWT_ISSUER` matches the `ISSUER` env var | +| JWKS keys lost after restart | The JWKS server generates new keys on each start. For production, persist keys to a volume | +| Docker commands not found after VM restart | Run `/data/startup.sh` — Fly VM root filesystem resets on stop/start | +| `fuse-overlayfs` not working | Install it: `apt-get install -y fuse-overlayfs` | +| Can't connect to Postgres from outside Fly | Use `fly proxy 5432:5432 -a ` | diff --git a/docs/postgresql/deployment/supabase-flyio.md b/docs/postgresql/deployment/supabase-flyio.md new file mode 100644 index 0000000..7475160 --- /dev/null +++ b/docs/postgresql/deployment/supabase-flyio.md @@ -0,0 +1,1000 @@ +# Self-Hosting Supabase on Fly.io with CloudSync Extension + +This guide walks you through deploying a full self-hosted Supabase stack on a Fly.io VM, with the CloudSync PostgreSQL extension pre-installed. By the end you will have: + +- A Fly.io VM running all 13 Supabase services via Docker Compose +- PostgreSQL with the CloudSync CRDT extension baked in +- Supabase Studio dashboard accessible over HTTPS +- A custom Postgres image published to Docker Hub + +## Prerequisites + +Install these on your **local machine** before starting: + +| Tool | Purpose | Install | +|------|---------|---------| +| [Docker Desktop](https://www.docker.com/products/docker-desktop/) | Build the custom Postgres image | See [Installing Docker Desktop](#installing-docker-desktop) below | +| [Fly CLI (`flyctl`)](https://fly.io/docs/flyctl/install/) | Provision and manage Fly.io machines | `brew install flyctl` (macOS) or `curl -L https://fly.io/install.sh \| sh` | +| [Git](https://git-scm.com/) | Clone repositories | `brew install git` (macOS) | +| [Docker Hub](https://hub.docker.com/) account | Host your custom Postgres image | Free signup at hub.docker.com | + +You also need a [Fly.io account](https://fly.io/app/sign-up). A credit card is required even for free tier. + +### Installing Docker Desktop + +Docker Desktop is the application that lets you build and run container images on your Mac. + +1. **Download** from https://www.docker.com/products/docker-desktop/ — pick **Apple chip** (M1/M2/M3/M4) or **Intel chip** depending on your Mac. + + > Not sure which you have? Click the Apple menu () → **About This Mac**. It will say either "Apple M1/M2/M3/M4" or "Intel". + +2. **Install**: Open the downloaded `.dmg` file and drag Docker into your Applications folder. + +3. **Launch**: Open Docker from Applications (or Spotlight: Cmd+Space → type "Docker"). It will ask for your password to install system components — that's normal. + +4. **Wait**: A whale icon appears in your menu bar. Wait until it says "Docker Desktop is running" (the whale stops animating). + +5. **Verify** in Terminal: + ```bash + docker --version + # Should output: Docker version 27.x.x or similar + ``` + +### Setting up Docker Hub + +Docker Hub is a free cloud registry where you'll upload your custom Postgres image so the Fly.io server can download it. + +1. **Sign up** at https://hub.docker.com/signup — pick a username. This becomes your image prefix (e.g., `myusername/supabase-postgres-cloudsync`). + +2. **Log in from Terminal**: + ```bash + docker login + ``` + Enter your Docker Hub username and password. You should see "Login Succeeded". + +### Fly.io VM requirements + +Supabase runs 13 services simultaneously (Postgres, Auth, PostgREST, Realtime, Studio, Kong, Storage, etc.), which is why it needs more resources than a typical single app. + +| Resource | Minimum | Recommended | +|----------|---------|-------------| +| RAM | 4 GB | 8 GB+ | +| CPU | 2 cores | 4 cores | +| Disk | 50 GB SSD | 80 GB+ | + +--- + +## Step 1: Initialize git submodules + +The CloudSync extension depends on the [fractional-indexing](https://github.com/sqliteai/fractional-indexing) library, which is included as a git submodule. If you haven't done this already, initialize it: + +```bash +cd /path/to/sqlite-sync-dev +git submodule update --init --recursive +``` + +Without this, the build will fail with `fractional_indexing.h: No such file or directory`. + +--- + +## Step 2: Build the custom Supabase Postgres image + +> **Important — match the Postgres version!** Check which Postgres version the Supabase docker-compose uses by looking at the `db` service `image` tag in `docker-compose.yml` (e.g., `supabase/postgres:15.8.1.085` means PG 15). You must build your custom image with the **same tag**. Using the wrong version will cause init script failures. + +The `make postgres-supabase-build` command does the following: + +1. **Pulls the official Supabase Postgres base image** (e.g., `public.ecr.aws/supabase/postgres:15.8.1.085`) — this is Supabase's standard PostgreSQL image that ships with ~30 extensions pre-installed (PostGIS, pgvector, etc.) +2. **Runs a multi-stage Docker build** using `docker/postgresql/Dockerfile.supabase`: + - **Stage 1 (builder)**: Installs C build tools (`gcc`, `make`), copies the CloudSync source code (`src/`, `modules/`), and compiles `cloudsync.so` against Supabase's `pg_config` + - **Stage 2 (runtime)**: Starts from a clean Supabase Postgres image and copies in just three files: + - `cloudsync.so` — the compiled extension binary + - `cloudsync.control` — tells PostgreSQL the extension's name and version + - `cloudsync--1.0.sql` — the SQL that defines all CloudSync functions +3. **Tags the result** with the same name as the base image, so it's a drop-in replacement + +To find the correct tag, clone the Supabase repo and check: + +```bash +grep 'image: supabase/postgres:' supabase/docker/docker-compose.yml +# Example output: image: supabase/postgres:15.8.1.085 +# Use the version after the colon as your SUPABASE_POSTGRES_TAG +``` + +Run from the sqlite-sync-dev repo root: + +```bash +make postgres-supabase-build SUPABASE_POSTGRES_TAG=15.8.1.085 +``` + +Verify the image was built: + +```bash +docker images | grep supabase-postgres-cloudsync +# Should show: sqlcdamlayildiz/supabase-postgres-cloudsync 15.8.1.085 ... +``` + +Verify CloudSync is installed inside the image: + +```bash +docker run --rm sqlcdamlayildiz/supabase-postgres-cloudsync:15.8.1.085 \ + find / -name "cloudsync*" -type f 2>/dev/null +# Should list cloudsync.so, cloudsync.control, and cloudsync--1.0.sql +# in /nix/store/...-postgresql-and-plugins-15.8/ paths +``` + +--- + +## Step 3: Build for the correct architecture and push to Docker Hub + +The Fly.io VM needs to pull your custom image from a container registry. We use Docker Hub (free, no extra auth needed on the VM). + +> **Important — architecture mismatch**: If you're building on an Apple Silicon Mac (M1/M2/M3/M4), `make postgres-supabase-build` produces an ARM image. Fly.io VMs run x86 (amd64) by default, so the ARM image won't work. You must build for the target architecture explicitly. + +First, pull the base image for amd64 (this ensures Docker has the correct platform variant cached): + +```bash +docker pull --platform linux/amd64 public.ecr.aws/supabase/postgres:15.8.1.085 +``` + +Then build for `linux/amd64` (x86, which is what Fly.io uses): + +```bash +docker build --platform linux/amd64 \ + --build-arg SUPABASE_POSTGRES_TAG=15.8.1.085 \ + -f docker/postgresql/Dockerfile.supabase \ + -t /supabase-postgres-cloudsync:15.8.1.085 \ + . +``` + +Push the image (you must be logged in: `docker login`): + +```bash +docker push /supabase-postgres-cloudsync:15.8.1.085 +``` + +> **Note**: `docker buildx build ... --push` may fail with ECR registry resolution errors. The two-step approach above (build then push) is more reliable. + +> If you're building on an Intel Mac or a Linux x86 machine, `make postgres-supabase-build` already produces an amd64 image, so you can simply tag and push: +> ```bash +> docker tag public.ecr.aws/supabase/postgres:15.8.1.085 \ +> /supabase-postgres-cloudsync:15.8.1.085 +> docker push /supabase-postgres-cloudsync:15.8.1.085 +> ``` + +--- + +## Step 4: Provision a Fly.io VM + +We use a Fly Machine as a plain Linux VM running Docker Compose — not Fly's container orchestration. + +### 4a. Log in to Fly + +```bash +fly auth login +``` + +This opens your browser to authenticate with your Fly.io account. + +### 4b. Create a Fly app + +```bash +fly apps create +``` + +### 4c. Create a persistent volume for data + +```bash +fly volumes create supabase_data --app --region --size 50 +``` + +Pick a [region](https://fly.io/docs/reference/regions/) close to you. You can see all available regions with `fly platform regions`. Common choices: + +| Code | Location | +|------|----------| +| `fra` | Frankfurt, Germany | +| `ams` | Amsterdam, Netherlands | +| `lhr` | London, UK | +| `ord` | Chicago, US | +| `iad` | Virginia, US | +| `sin` | Singapore | + +When prompted "Do you still want to use the volumes feature?", type `y` — the warning about multiple volumes is for high-availability production setups; a single volume is fine for testing. + +### 4d. Create a Fly Machine + +```bash +fly machine run ubuntu:24.04 \ + --app \ + --region \ + --vm-size shared-cpu-4x \ + --vm-memory 4096 \ + --volume supabase_data:/data \ + --name supabase-vm \ + -- sleep inf +``` + +The `-- sleep inf` at the end is important — it tells the VM to run an infinite sleep process so it stays alive. Without it, the Ubuntu container exits immediately and the machine stops. + +This creates an Ubuntu 24.04 VM with 4 CPU cores, 4 GB RAM, and your 50 GB volume mounted at `/data`. + +The VM size (`shared-cpu-4x` + 4096 MB) meets Supabase's minimum requirements. For a test/dev deployment this is fine. You can resize later with `fly machine update` if needed. + +### 4e. Allocate a public IP + +```bash +fly ips allocate-v4 --shared --app +fly ips allocate-v6 --app +``` + +Note the IPv4 address — you'll need it for `SUPABASE_PUBLIC_URL`. + +--- + +## Step 5: Set up Docker and Supabase on the VM + +### 5a. SSH into the machine + +```bash +fly ssh console --app +``` + +### 5b. Install Docker Engine + +```bash +apt-get update +apt-get install -y ca-certificates curl gnupg + +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg +chmod a+r /etc/apt/keyrings/docker.gpg + +echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ + > /etc/apt/sources.list.d/docker.list + +apt-get update +apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin + +# Verify +docker --version +docker compose version +``` + +### 5c. Configure Docker storage driver + +Docker's default `overlayfs` storage driver doesn't work inside a Fly VM (you'll get "failed to convert whiteout file: operation not permitted" errors). Use `fuse-overlayfs` instead — it works in unprivileged environments like Fly VMs and is much faster and more space-efficient than the `vfs` fallback. + +Install fuse-overlayfs: + +```bash +apt-get install -y fuse-overlayfs +``` + +Configure Docker to use it: + +```bash +mkdir -p /etc/docker +echo '{"storage-driver":"fuse-overlayfs","data-root":"/data/docker"}' > /etc/docker/daemon.json +``` + +> **Why not `vfs`?** The `vfs` driver copies every image layer in full instead of sharing them. This means 13 Supabase images can use 35GB+ of disk (vs ~5-8GB with fuse-overlayfs), and image pulls/extraction are extremely slow (30-60 minutes vs a few minutes). Avoid `vfs` unless fuse-overlayfs doesn't work. + +### 5d. Start the Docker daemon + +The Fly VM doesn't auto-start Docker. You need to start it manually: + +```bash +dockerd & +``` + +Wait for the `API listen on /var/run/docker.sock` message before running any Docker commands. This message means Docker is ready. + +> **Note**: If Docker is already running, you'll see a "process is still running" error. That's fine — it means Docker is already available. + +### 5e. Clone and set up Supabase + +```bash +cd /data +git clone --depth 1 https://github.com/supabase/supabase +mkdir -p supabase-docker +cp -rf supabase/docker/* supabase-docker/ +cp supabase/docker/.env.example supabase-docker/.env +cd supabase-docker +``` + +--- + +## Step 6: Configure secrets + +### 6a. Generate keys automatically + +```bash +sh ./utils/generate-keys.sh +``` + +Review the output. The script updates `.env` with generated `JWT_SECRET`, `ANON_KEY`, and `SERVICE_ROLE_KEY`. + +### 6a.1. Get the JWT secret later + +If you need the Supabase Auth JWT secret after setup, read the `JWT_SECRET` value from the same `.env` file used by Docker Compose: + +```bash +cd /data/supabase-docker +grep '^JWT_SECRET=' .env +``` + +That value is the secret GoTrue (Supabase Auth) uses to sign and verify access tokens. + +If you want to confirm what the running auth container sees, check the container environment: + +```bash +docker compose exec auth printenv GOTRUE_JWT_SECRET +``` + +Both commands should return the same value. If they do not, restart the stack after updating `.env`: + +```bash +docker compose up -d +``` + +### 6b. Edit `.env` manually for remaining values + +```bash +# Install a text editor if needed +apt-get install -y nano +nano .env +``` + +Set these values: + +```env +############ +# Required # +############ + +# Database — letters and numbers only, no special characters +POSTGRES_PASSWORD= + +# URLs — replace with your Fly app's public IP or domain +SUPABASE_PUBLIC_URL=http://:8000 +API_EXTERNAL_URL=http://:8000 +SITE_URL=http://localhost:3000 + +# Dashboard login credentials +DASHBOARD_USERNAME=supabase +DASHBOARD_PASSWORD= + +############ +# Secrets # +############ +# These should already be set by generate-keys.sh, but verify they exist: +# JWT_SECRET= +# ANON_KEY= +# SERVICE_ROLE_KEY= + +# Generate the rest if not already set: +# openssl rand -base64 48 → SECRET_KEY_BASE +# openssl rand -hex 16 → VAULT_ENC_KEY (must be exactly 32 chars) +# openssl rand -base64 24 → PG_META_CRYPTO_KEY +# openssl rand -base64 24 → LOGFLARE_PUBLIC_ACCESS_TOKEN +# openssl rand -base64 24 → LOGFLARE_PRIVATE_ACCESS_TOKEN +# openssl rand -hex 16 → S3_PROTOCOL_ACCESS_KEY_ID +# openssl rand -hex 32 → S3_PROTOCOL_ACCESS_KEY_SECRET +# openssl rand -hex 16 → MINIO_ROOT_PASSWORD +``` + +--- + +## Step 7: Swap in the CloudSync Postgres image + +Edit `docker-compose.yml` and find the `db` service (near the top). Replace the `image` line: + +```yaml +services: + db: + # BEFORE: image: supabase/postgres:${POSTGRES_VERSION} + # AFTER: + image: /supabase-postgres-cloudsync:15.8.1.085 +``` + +Use the exact image path you pushed in Step 3. + +### Add the CloudSync init script + +Create the init SQL: + +```bash +cat > volumes/db/cloudsync.sql << 'EOF' +CREATE EXTENSION IF NOT EXISTS cloudsync; +EOF +``` + +Add a volume mount to the `db` service in `docker-compose.yml`: + +```yaml +services: + db: + volumes: + # ... existing volume mounts ... + - ./volumes/db/cloudsync.sql:/docker-entrypoint-initdb.d/init-scripts/100-cloudsync.sql:Z +``` + +The `100-` prefix ensures CloudSync loads after Supabase's own init scripts (numbered 97-99). + +> **Important**: Init scripts only run when the data directory is empty (first start). If you've already started Postgres once and need to add the extension, connect and run `CREATE EXTENSION cloudsync;` manually. + +--- + +## Step 8: Start Supabase + +```bash +cd /data/supabase-docker +docker compose pull +docker compose up -d +``` + +Wait ~1 minute for all services to start, then verify: + +```bash +docker compose ps +``` + +All services should show `Up (healthy)`. If any service is unhealthy: + +```bash +docker compose logs +``` + +### Fix: services fail with "password authentication failed" + +If you see `FATAL: password authentication failed for user "authenticator"` (or `supabase_auth_admin`, `supabase_storage_admin`, `supabase_admin`) in the logs, the database users were created with a different password than what's in `.env`. This happens when `POSTGRES_PASSWORD` was changed after the first start, or when the DB data persists across reinstalls. + +Fix by updating all user passwords to match your `.env`: + +```bash +# Replace YOUR_PASSWORD with the value of POSTGRES_PASSWORD from your .env file +docker compose exec db psql -U postgres -c "ALTER USER authenticator WITH PASSWORD 'YOUR_PASSWORD';" +docker compose exec db psql -U postgres -c "ALTER USER supabase_auth_admin WITH PASSWORD 'YOUR_PASSWORD';" +docker compose exec db psql -U postgres -c "ALTER USER supabase_storage_admin WITH PASSWORD 'YOUR_PASSWORD';" +docker compose exec db psql -U postgres -c "ALTER USER supabase_admin WITH PASSWORD 'YOUR_PASSWORD';" +``` + +Then restart: + +```bash +docker compose restart +``` + +### Fix: analytics (Logflare) keeps crashing + +The analytics service (Logflare) often fails in self-hosted setups due to migration issues. Since it's **optional** (only used for log analytics, not required for CloudSync or core Supabase features), the simplest fix is to disable it. + +In `docker-compose.yml`: + +1. **Remove the `analytics` dependency** from the `studio` service's `depends_on` block. Delete these lines: + ```yaml + analytics: + condition: service_healthy + ``` + If `depends_on:` becomes empty after removing, delete the `depends_on:` line too. + +2. **Comment out the `LOGFLARE_URL`** environment variable in the `studio` service: + ```yaml + # LOGFLARE_URL: http://analytics:4000 + ``` + +3. **Restart**: + ```bash + docker compose stop analytics + docker compose up -d + ``` + +### Fix: CloudSync extension not found + +If `SELECT cloudsync_version();` returns "function does not exist", the init script didn't run (it only runs on first boot when the data directory is empty). Create the extension manually: + +```bash +docker compose exec db psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS cloudsync;" +``` + +### Fix: Auth service crashes with "must be owner of function uid" + +When the auth (GoTrue) service fails to start and logs show errors like: + +``` +error executing migrations: must be owner of function uid (SQLSTATE 42501) +``` + +This happens because the `auth.uid()`, `auth.role()`, and `auth.email()` functions were created by `postgres` (via the init scripts or CloudSync extension), but the auth service runs migrations as `supabase_auth_admin` and expects to own those functions. + +**Symptoms:** +- Auth service keeps restarting +- Supabase Studio shows "Failed to retrieve users" or "column users.banned_until does not exist" (because auth migrations didn't complete) +- `docker compose logs auth` shows the `SQLSTATE 42501` ownership error + +**Fix:** Transfer ownership of the functions to `supabase_auth_admin`: + +```bash +docker compose exec db psql -U postgres -c "ALTER FUNCTION auth.uid() OWNER TO supabase_auth_admin;" +docker compose exec db psql -U postgres -c "ALTER FUNCTION auth.role() OWNER TO supabase_auth_admin;" +docker compose exec db psql -U postgres -c "ALTER FUNCTION auth.email() OWNER TO supabase_auth_admin;" +docker compose restart auth +``` + +After restarting, wait ~30 seconds and check that auth is healthy: + +```bash +docker compose ps auth +docker compose logs --tail=20 auth +``` + +You should see auth in a `healthy` state and the migrations completing successfully. + +### Fix: Supavisor (connection pooler) keeps crashing + +If `docker compose logs supavisor` shows: + +``` +Setting RLIMIT_NOFILE to 100000 +/app/limits.sh: line 6: ulimit: open files: cannot modify limit: Operation not permitted +``` + +This happens because Supavisor's startup script (`/app/limits.sh`) tries to increase the open-file limit to 100,000, but the Fly VM's kernel has a lower cap (typically 10,240) and doesn't allow it. The script failure crashes the container. + +**Fix:** Override the entrypoint in `docker-compose.yml` to skip the limits script. Add this line right after `container_name: supabase-pooler`: + +```yaml + entrypoint: ["/usr/bin/tini", "-s", "-g", "--"] +``` + +Or apply with sed: + +```bash +sed -i '/container_name: supabase-pooler/a\ entrypoint: ["/usr/bin/tini", "-s", "-g", "--"]' /data/supabase-docker/docker-compose.yml +``` + +This keeps the same `tini` init process but skips `/app/limits.sh`. The VM's default open-file limit (10,240) is sufficient for testing. + +Then restart: + +```bash +docker compose up -d supavisor +``` + +--- + +## Step 9: Verify CloudSync + +Connect to the database: + +```bash +docker compose exec db psql -U postgres +``` + +```sql +-- Check the extension is installed +SELECT * FROM pg_extension WHERE extname = 'cloudsync'; + +-- Check version +SELECT cloudsync_version(); +``` + +If `cloudsync_version()` returns "function does not exist", create the extension manually: + +```sql +CREATE EXTENSION IF NOT EXISTS cloudsync; +``` + +--- + +## Step 10: Test CloudSync Sync + +This section walks through testing the full sync flow: registering the database with the CloudSync server, creating tables, enabling sync, and running a roundtrip test. + +### Prerequisites + +You need a running **CloudSync server**. This can be the staging server or a local instance. + +```bash +export CLOUDSYNC_URL="https://cloudsync-staging-testing.fly.dev" # CloudSync server URL +export ORG_API_KEY="" # Organization API key +``` + +#### Connection string + +The CloudSync server needs a PostgreSQL connection string to reach your database. There are two options depending on where your CloudSync server runs: + +**Option A: CloudSync on the same Fly org (`.internal` network)** + +If both the CloudSync server and the Supabase VM are in the same Fly org, they can communicate over Fly's **private internal network** — no public port exposure needed. Connect directly to the `db` container's mapped port (5432 is exposed on the host by default in docker-compose): + +```bash +# Direct connection (no Supavisor) — recommended for CloudSync server-to-server +export CONNECTION_STRING="postgres://postgres:$POSTGRES_PASSWORD@.internal:5432/postgres" +``` + +**Option B: CloudSync running outside Fly (e.g., local machine, another cloud)** + +Use `fly proxy` to tunnel the Postgres port to your local machine: + +```bash +# In a separate terminal — keep this running +fly proxy 5432:5432 -a +``` + +This makes the remote Postgres available at `localhost:5432`. Then use: + +```bash +export CONNECTION_STRING="postgres://postgres:$POSTGRES_PASSWORD@localhost:5432/postgres" +``` + +> **Note:** The proxy must stay running in a separate terminal for the duration of your session. If the proxy disconnects, just re-run the command. + +To verify the connection works: + +```bash +# Option A: SSH into the VM and test locally +fly ssh console --app +docker compose exec db psql -U postgres -c "SELECT 1;" + +# Option B: With fly proxy running, test from your local machine +psql "postgres://postgres:$POSTGRES_PASSWORD@localhost:5432/postgres" -c "SELECT 1;" +``` + +### 10a. Verify CloudSync server is reachable + +```bash +curl "$CLOUDSYNC_URL/healthz" +# Expected: {"status":"ok"} +``` + +### 10b. Register the Supabase database with CloudSync + +This tells CloudSync where to find your PostgreSQL database: + +```bash +curl --request POST "$CLOUDSYNC_URL/v1/databases" \ + --header "Authorization: Bearer $ORG_API_KEY" \ + --header "Content-Type: application/json" \ + --data '{ + "label": "Supabase Fly.io Test", + "connectionString": "'"$CONNECTION_STRING"'", + "provider": "postgres", + "flavor": "supabase", + "projectId": "cloudsync-supabase-test", + "databaseName": "postgres" + }' +``` + +Save the returned `managedDatabaseId` — you'll need it for all subsequent operations: + +```bash +export MANAGED_DATABASE_ID="" +``` + +### 10c. Verify database connectivity + +```bash +curl --request POST "$CLOUDSYNC_URL/v1/databases/$MANAGED_DATABASE_ID/verify" \ + --header "Authorization: Bearer $ORG_API_KEY" +``` + +Expected: status should show the database is reachable. + +### 10d. Create a test table on the Supabase database + +SSH into the Fly VM and create a table: + +```bash +docker compose exec db psql -U postgres -c " +CREATE TABLE IF NOT EXISTS todos ( + id TEXT PRIMARY KEY DEFAULT cloudsync_uuid(), + title TEXT NOT NULL DEFAULT '', + done BOOLEAN DEFAULT false +); +SELECT cloudsync_init('todos'); +" +``` + +### 10e. Enable CloudSync on the table + +From your local machine, enable sync via the management API: + +```bash +curl --request POST "$CLOUDSYNC_URL/v1/databases/$MANAGED_DATABASE_ID/cloudsync/enable" \ + --header "Authorization: Bearer $ORG_API_KEY" \ + --header "Content-Type: application/json" \ + --data '{"tables":["todos"]}' +``` + +Verify: + +```bash +curl --request GET "$CLOUDSYNC_URL/v1/databases/$MANAGED_DATABASE_ID/cloudsync/tables" \ + --header "Authorization: Bearer $ORG_API_KEY" +``` + +The `todos` table should show `"enabled": true`. + +### 10f. Test sync roundtrip from a SQLite client + +On your local machine, create a SQLite database and sync: + +```sql +-- Load the sqlite-sync extension +.load path/to/cloudsync + +-- Create the same table schema +CREATE TABLE todos ( + id TEXT PRIMARY KEY DEFAULT (cloudsync_uuid()), + title TEXT NOT NULL DEFAULT '', + done BOOLEAN DEFAULT false +); +SELECT cloudsync_init('todos'); + +-- Configure network +SELECT cloudsync_network_init(''); +SELECT cloudsync_network_set_token(''); + +-- Insert a row locally +INSERT INTO todos (title) VALUES ('Test from SQLite'); + +-- Sync: send local changes, check for remote changes +SELECT cloudsync_network_sync(500, 5); +``` + +Then verify the row arrived on Supabase: + +```bash +docker compose exec db psql -U postgres -c "SELECT * FROM todos;" +``` + +### 10g. Test reverse sync (Supabase → SQLite) + +Insert a row directly on Supabase: + +```bash +docker compose exec db psql -U postgres -c " +INSERT INTO todos (id, title, done) VALUES (cloudsync_uuid(), 'Test from Supabase', false); +" +``` + +Then sync from the SQLite client: + +```sql +SELECT cloudsync_network_check_changes(); +SELECT * FROM todos; +``` + +The row from Supabase should appear in SQLite. + +--- + +## Step 11: Access your services + +| Service | URL | +|---------|-----| +| **Supabase Studio** | `http://:8000` | +| REST API | `http://:8000/rest/v1/` | +| Auth API | `http://:8000/auth/v1/` | +| Storage API | `http://:8000/storage/v1/` | +| Realtime | `http://:8000/realtime/v1/` | + +Studio dashboard requires a username and password. To find them, check your `.env` file on the VM: + +```bash +grep DASHBOARD /data/supabase-docker/.env +``` + +The values are `DASHBOARD_USERNAME` (default: `supabase`) and `DASHBOARD_PASSWORD` (default: `this_password_is_insecure_and_should_be_updated`). + +> **Note:** The Fly VM doesn't expose ports publicly by default. Use `fly proxy` to access services from your local machine: +> ```bash +> fly proxy 8000:8000 -a +> ``` +> Then open `http://localhost:8000` in your browser. + +### Connect to Postgres directly + +Use `fly proxy` to tunnel the Postgres port to your local machine: + +```bash +# In a separate terminal — keep this running +fly proxy 5432:5432 -a +``` + +Then connect from your local machine: + +```bash +psql 'postgres://postgres:@localhost:5432/postgres' +``` + +> **Tip:** You can proxy multiple ports at once by running multiple `fly proxy` commands in separate terminals (e.g., `8000` for Studio and `5432` for Postgres). + +--- + +## Step 11: Set up HTTPS (production) + +For production use, put a reverse proxy in front of Kong. The simplest option is [Caddy](https://caddyserver.com/) which handles TLS automatically. + +On the Fly VM: + +```bash +apt-get install -y debian-keyring debian-archive-keyring apt-transport-https +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list +apt-get update +apt-get install -y caddy +``` + +Create `/etc/caddy/Caddyfile`: + +``` +your-domain.com { + reverse_proxy localhost:8000 +} +``` + +```bash +systemctl enable caddy +systemctl start caddy +``` + +Then update `.env`: + +```env +SUPABASE_PUBLIC_URL=https://your-domain.com +API_EXTERNAL_URL=https://your-domain.com +``` + +Restart Supabase: + +```bash +cd /data/supabase-docker +docker compose down && docker compose up -d +``` + +--- + +## Maintenance + +### Update Supabase services + +```bash +cd /data/supabase-docker +# Update image tags in docker-compose.yml, then: +docker compose pull +docker compose down && docker compose up -d +``` + +### Update CloudSync extension + +On your local machine, rebuild and push the image: + +```bash +cd /path/to/sqlite-sync-dev +git pull # get latest CloudSync code +git submodule update --init --recursive # ensure submodules are up to date +make postgres-supabase-build SUPABASE_POSTGRES_TAG=15.8.1.085 +docker tag public.ecr.aws/supabase/postgres:15.8.1.085 \ + /supabase-postgres-cloudsync:15.8.1.085 +docker push /supabase-postgres-cloudsync:15.8.1.085 +``` + +On the Fly VM: + +```bash +cd /data/supabase-docker +docker compose pull db +docker compose up -d db +``` + +### View logs + +```bash +# All services +docker compose logs -f + +# Specific service +docker compose logs -f db +``` + +### Change database password + +```bash +cd /data/supabase-docker +sh ./utils/db-passwd.sh +docker compose up -d --force-recreate +``` + +### Stop and restart the Fly machine + +The Fly machine's root filesystem resets on every stop/start — only the `/data` volume persists. This means Docker must be reinstalled each time. To automate this, create a startup script (run once): + +```bash +cat > /data/startup.sh << 'SCRIPT' +#!/bin/bash +set -e + +echo "=== Installing Docker ===" +apt-get update && apt-get install -y ca-certificates curl gnupg fuse-overlayfs + +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg +chmod a+r /etc/apt/keyrings/docker.gpg + +echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ + https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ + > /etc/apt/sources.list.d/docker.list + +apt-get update && apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin + +echo "=== Configuring Docker ===" +mkdir -p /etc/docker +echo '{"storage-driver":"fuse-overlayfs","data-root":"/data/docker"}' > /etc/docker/daemon.json + +echo "=== Starting Docker ===" +dockerd > /data/dockerd.log 2>&1 & + +echo "Waiting for Docker to be ready..." +until docker info > /dev/null 2>&1; do sleep 1; done +echo "Docker is ready!" + +echo "=== Starting Supabase ===" +cd /data/supabase-docker +docker compose up -d + +echo "=== Done! ===" +echo "Run 'docker compose ps' to check service status" +SCRIPT +chmod +x /data/startup.sh +``` + +From then on, every time you restart the machine: + +```bash +# From your local machine: +fly machine stop 287920ea023108 -a # stop +fly machine start 287920ea023108 -a # start +fly ssh console --app + +# On the VM — one command does everything: +/data/startup.sh +``` + +Docker images and Supabase data are on `/data`, so they survive restarts. Only Docker itself needs reinstalling (~1-2 minutes). + +### Stop Supabase (without stopping the machine) + +```bash +docker compose down +``` + +### Destroy everything (irreversible) + +```bash +docker compose down -v +rm -rf volumes/db/data volumes/storage +``` + +--- + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| `fractional_indexing.h: No such file or directory` | Run `git submodule update --init --recursive` before building. The fractional-indexing library is a git submodule that must be initialized. | +| `cloudsync.so: cannot open shared object file` | The custom image wasn't used. Verify `docker compose ps` shows your image, not the default one. | +| Init script didn't run / `cloudsync_version()` not found | Init scripts only run on first start. Run `CREATE EXTENSION IF NOT EXISTS cloudsync;` manually via `docker compose exec db psql -U postgres`. | +| `password authentication failed for user "authenticator"` | Database users have a different password than `.env`. See "Fix: services fail with password authentication failed" above. | +| Analytics (Logflare) keeps crashing | Disable it — see "Fix: analytics keeps crashing" above. It's optional and not needed for CloudSync. | +| `cannot stop container ... did not receive an exit event` | Zombie container. Kill Docker: `kill -9 $(pidof dockerd) $(pidof containerd)`, remove container files: `rm -rf /data/docker/containers/*`, restart dockerd. | +| Services unhealthy after start | Wait 2 minutes. Check `docker compose logs `. Most common: password mismatch (see fix above). | +| Can't pull custom image from VM | Make sure the image is public on Docker Hub, or run `docker login` on the VM. | +| ARM build errors | Fly defaults to x86. If using ARM machines, rebuild with `--platform linux/arm64`. | +| Version mismatch | Run `SHOW server_version;` in psql and ensure your `SUPABASE_POSTGRES_TAG` matches the major version. | +| `no space left on device` during pull | The `fuse-overlayfs` driver is efficient but if disk fills up, extend the volume: `fly volumes extend --size 80 -a `. | +| Docker commands not found after machine restart | The Fly VM root filesystem resets on stop/start. Run `/data/startup.sh` to reinstall Docker. See "Stop and restart the Fly machine" section. | +| Auth crashes with `must be owner of function uid` | Transfer function ownership: `ALTER FUNCTION auth.uid() OWNER TO supabase_auth_admin;` and same for `auth.role()` and `auth.email()`. See "Fix: Auth service crashes" above. | +| Studio shows "column users.banned_until does not exist" | Auth migrations didn't complete. Fix the auth ownership issue above and restart auth. | +| Supavisor crashes with `ulimit: cannot modify limit` | Override entrypoint to skip `/app/limits.sh`. See "Fix: Supavisor keeps crashing" above. | diff --git a/docs/postgresql/SPORT_APP_README_SUPABASE.md b/docs/postgresql/examples/sport-app-supabase.md similarity index 100% rename from docs/postgresql/SPORT_APP_README_SUPABASE.md rename to docs/postgresql/examples/sport-app-supabase.md diff --git a/docs/postgresql/EXPO.md b/docs/postgresql/integrations/expo.md similarity index 100% rename from docs/postgresql/EXPO.md rename to docs/postgresql/integrations/expo.md diff --git a/docs/postgresql/SUPABASE.md b/docs/postgresql/integrations/supabase.md similarity index 100% rename from docs/postgresql/SUPABASE.md rename to docs/postgresql/integrations/supabase.md diff --git a/docs/postgresql/quickstarts/postgres.md b/docs/postgresql/quickstarts/postgres.md new file mode 100644 index 0000000..e7b5c3d --- /dev/null +++ b/docs/postgresql/quickstarts/postgres.md @@ -0,0 +1,126 @@ +# CloudSync Quick Start: Self-Hosted PostgreSQL + +This guide helps you enable CloudSync on a **self-hosted PostgreSQL database**. CloudSync adds offline-first synchronization capabilities to your PostgreSQL database. + +## Step 1: Deploy PostgreSQL with CloudSync + +You can enable CloudSync in one of two ways: +- Use the published Docker image if you run PostgreSQL in Docker +- Install the released extension files into an existing native PostgreSQL installation + +### Option A: Docker + +Use the published PostgreSQL image that already includes the CloudSync extension: +- `sqlitecloud/sqlite-sync-postgres:15` +- `sqlitecloud/sqlite-sync-postgres:17` + +Example using Docker Compose: + +```yaml +services: + db: + image: sqlitecloud/sqlite-sync-postgres:17 + container_name: cloudsync-postgres + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: your-secure-password + POSTGRES_DB: postgres + ports: + - "5432:5432" + volumes: + - pg_data:/var/lib/postgresql/data + - ./init.sql:/docker-entrypoint-initdb.d/init.sql:ro + +volumes: + pg_data: +``` + +Create `init.sql`: +```sql +CREATE EXTENSION IF NOT EXISTS cloudsync; +``` + +Run: +```bash +docker compose up -d +``` + +### Option B: Existing PostgreSQL Without Docker + +If you already run PostgreSQL directly on a VM or bare metal, download the release tarball that matches your operating system, CPU architecture, and PostgreSQL major version. + +Extract the archive, then copy the three extension files into PostgreSQL's extension directories: + +```bash +cp cloudsync.so "$(pg_config --pkglibdir)/" +cp cloudsync.control cloudsync--1.0.sql "$(pg_config --sharedir)/extension/" +``` + +Then connect to PostgreSQL and enable the extension: + +```sql +CREATE EXTENSION IF NOT EXISTS cloudsync; +``` + +--- + +## Step 2: Verify the Extension + +If you are using Docker: + +```bash +docker compose exec db psql -U postgres -d postgres -c "SELECT cloudsync_version();" +``` + +If you are using an existing PostgreSQL installation without Docker: + +```bash +psql -U postgres -d postgres -c "SELECT cloudsync_version();" +``` + +If the extension is installed correctly, PostgreSQL returns the CloudSync version string. + +--- + +## Step 3: Register Your Database in the CloudSync Dashboard + +In the [CloudSync dashboard](https://dashboard.sqlitecloud.io/), create a new workspace with the **PostgreSQL** provider, then add a project with your PostgreSQL connection string: + +``` +postgresql://user:password@host:5432/database +``` + +--- + +## Step 4: Enable CloudSync on Tables + +In the dashboard, go to the **Database Setup** tab, select the tables you want to sync, and click **Deploy Changes**. + +--- + +## Step 5: Set Up Authentication + +On the **Client Integration** tab you'll find your **Database ID** and authentication settings. + +### Quick Test with API Key (Recommended for Testing) + +The fastest way to test CloudSync without per-user access control — no JWT setup needed. + +```sql +SELECT cloudsync_network_init(''); +SELECT cloudsync_network_set_apikey(':'); +SELECT cloudsync_network_sync(); +``` + +### Using JWT Tokens (For RLS and Production) + +1. Set **Row Level Security** to **Yes, enforce RLS** +2. Under **Authentication (JWT)**, click **Configure authentication** and choose: + - **HMAC Secret (HS256):** Enter your JWT secret (or generate one: `openssl rand -base64 32`) + - **JWKS Issuer Validation:** Enter the issuer base URL from your token's `iss` claim (e.g. `https://your-auth-domain`). CloudSync automatically fetches the JWKS document from `/.well-known/jwks.json` +3. In your client code: + ```sql + SELECT cloudsync_network_init(''); + SELECT cloudsync_network_set_token(''); + SELECT cloudsync_network_sync(); + ``` diff --git a/docs/postgresql/quickstarts/supabase.md b/docs/postgresql/quickstarts/supabase.md new file mode 100644 index 0000000..a5dd047 --- /dev/null +++ b/docs/postgresql/quickstarts/supabase.md @@ -0,0 +1,127 @@ +# CloudSync Quick Start: Self-Hosted Supabase + +This guide helps you enable CloudSync on a **fresh or existing** self-hosted Supabase instance. CloudSync adds offline-first synchronization capabilities to your PostgreSQL database. + +## Step 1: Use the CloudSync Supabase Image + +When deploying or updating your Supabase instance, use the published CloudSync Supabase image instead of the standard Supabase Postgres image. + +### For New Deployments + +Follow [Supabase's Installing Supabase](https://supabase.com/docs/guides/self-hosting/docker#installing-supabase) guide to set up the initial files and `.env` configuration. Then, before the first `docker compose up -d`, update your `docker-compose.yml` to use the CloudSync-enabled Postgres image: + +```yaml +db: + # Supabase on PostgreSQL 15 + image: sqlitecloud/sqlite-sync-supabase:15.8.1.085 + # instead of: public.ecr.aws/supabase/postgres:15.8.1.085 + + # OR Supabase on PostgreSQL 17 + image: sqlitecloud/sqlite-sync-supabase:17.6.1.071 + # instead of: public.ecr.aws/supabase/postgres:17.6.1.071 +``` + +Use the tag that matches your Supabase Postgres base image exactly. Convenience tags `sqlitecloud/sqlite-sync-supabase:15` and `sqlitecloud/sqlite-sync-supabase:17` are also published, but the exact Supabase tag is the safest choice. + +### Add the CloudSync Init Script + +Create the init SQL: + +```bash +mkdir -p volumes/db +cat > volumes/db/cloudsync.sql << 'EOF' +CREATE EXTENSION IF NOT EXISTS cloudsync; +EOF +``` + +Add a volume mount to the `db` service in `docker-compose.yml`: + +```yaml +services: + db: + volumes: + # ... existing volume mounts ... + - ./volumes/db/cloudsync.sql:/docker-entrypoint-initdb.d/init-scripts/100-cloudsync.sql:Z +``` + +The `100-` prefix ensures CloudSync loads after Supabase's own init scripts, which are typically numbered `98-99` in the self-hosted Docker Compose setup. + +Then start Supabase: + +```bash +docker compose pull +docker compose up -d +``` + +### For Existing Deployments + +Follow [Supabase's Updating](https://supabase.com/docs/guides/self-hosting/docker#updating) guide. When updating the Postgres image, replace the default image with the matching CloudSync image: + +```bash +# Update docker-compose.yml to use: +# sqlitecloud/sqlite-sync-supabase:15.8.1.085 +# or sqlitecloud/sqlite-sync-supabase:17.6.1.071 +docker compose pull +docker compose down && docker compose up -d +``` + +If Postgres has already been initialized and you are adding CloudSync afterward, the init script will not run automatically. Connect to the database and run: + +```sql +CREATE EXTENSION IF NOT EXISTS cloudsync; +``` + +--- + +## Step 2: Verify the Extension + +```bash +docker compose exec db psql -U supabase_admin -d postgres -c "SELECT cloudsync_version();" +``` + +If the extension is installed correctly, PostgreSQL returns the CloudSync version string. + +--- + +## Step 3: Register Your Database in the CloudSync Dashboard + +In the [CloudSync dashboard](https://dashboard.sqlitecloud.io/), create a new workspace with the **Supabase (Self-hosted)** provider, then add a project with your PostgreSQL connection string: + +``` +postgresql://user:password@host:5432/database +``` + +--- + +## Step 4: Enable CloudSync on Tables + +In the dashboard, go to the **Database Setup** tab, select the tables you want to sync, and click **Deploy Changes**. + +--- + +## Step 5: Set Up Authentication + +On the **Client Integration** tab you'll find your **Database ID** and authentication settings. + +### Quick Test with API Key (Recommended for Testing) + +The fastest way to test CloudSync without per-user access control — no JWT setup needed. + +```sql +SELECT cloudsync_network_init(''); +SELECT cloudsync_network_set_apikey(':'); +SELECT cloudsync_network_sync(); +``` + +### Using JWT Tokens (For RLS and Production) + +1. Set **Row Level Security** to **Yes, enforce RLS** +2. Under **Authentication (JWT)**, click **Configure authentication** and choose: + - **HMAC Secret (HS256):** Enter your `JWT_SECRET` from Supabase's `.env` + - **JWKS Issuer Validation:** Enter the issuer base URL from your token's `iss` claim (e.g. `https://your-auth-domain`). CloudSync automatically fetches the JWKS document from `/.well-known/jwks.json` +3. In your client code: + ```sql + SELECT cloudsync_network_init(''); + SELECT cloudsync_network_set_token(''); + SELECT cloudsync_network_sync(); + ``` diff --git a/docs/postgresql/reference/jwt-claims.md b/docs/postgresql/reference/jwt-claims.md new file mode 100644 index 0000000..e778a59 --- /dev/null +++ b/docs/postgresql/reference/jwt-claims.md @@ -0,0 +1,341 @@ +# JWT Claims Reference + +## How RLS Works with JWT Claims + +**Flow:** +``` +1. Client sends JWT token to CloudSync +2. CloudSync validates JWT and extracts claims +3. CloudSync passes claims to PostgreSQL as session variables +4. RLS policies read session variables via current_setting() +5. Policies filter data based on claims +6. Only authorized rows returned to client +``` + +## How CloudSync Passes JWT Claims + +**CloudSync passes ALL JWT claims as a single JSON object** in `request.jwt.claims`: + +```sql +-- Access any claim from the JSON +user_id = (current_setting('request.jwt.claims')::jsonb->>'sub')::uuid +email = (current_setting('request.jwt.claims')::jsonb->>'email') +role = (current_setting('request.jwt.claims')::jsonb->>'role') +org_id = (current_setting('request.jwt.claims')::jsonb->>'org_id')::uuid +``` + +--- + +## Standard JWT Claims + +| Claim | Needed? | Purpose | +|-------|---------|---------| +| `sub` | ✅ Yes | User ID | +| `email` | ✅ Yes | User email | +| `role` | ✅ Yes | Permission level | +| `iss` | ✅ Yes | Issuer (validated in app) | +| `aud` | ✅ Yes | Audience (validated in app) | +| `iat` | ⚠️ Maybe | Issued at timestamp | +| `exp` | ✅ Yes | Expiration (validated in app) | + +## Custom Claims (Application-Specific) + +| Claim | Use Case | +|-------|----------| +| `org_id` | Multi-tenant apps | +| `team_id` | Team-based access | +| `permissions` | Fine-grained access | +| `scope` | OAuth scopes | +| `department_id` | Department-based filtering | +| `is_admin` | Admin flag | + +--- + +## Recommended JWT Structure for CloudSync + +```javascript +const token = jwt.sign({ + // Required + sub: user.id, // UUID: user_id + email: user.email, // String: for contact/audit + role: user.role, // String: admin/user/viewer + + // Multi-tenant (if needed) + org_id: user.org_id, // UUID: organization + team_id: user.team_id, // UUID: team (optional) + + // Permissions (choose one approach) + // Approach 1: Simple role + // role: "admin" (handled above) + + // Approach 2: Detailed permissions + permissions: [ // Array: fine-grained + "todos:read", + "todos:write", + "todos:delete" + ], + + // Standard claims (handled by JWT lib) + // iss: 'cloudsync-auth', + // aud: 'authenticated', + // iat: Math.floor(Date.now() / 1000), + // exp: Math.floor(Date.now() / 1000) + (24 * 60 * 60) +}, privateKey, { + algorithm: 'RS256', + expiresIn: '24h' +}); +``` + +--- + +## How CloudSync Passes JWT Claims to PostgreSQL + +**CloudSync validates the JWT and converts all claims to JSON, then passes as a PostgreSQL session variable:** + +```go +// CloudSync (internal implementation) +userData := token.Claims // map[string]any with all JWT claims +claimJSON, _ := json.Marshal(userData) + +// Pass all claims as JSON to PostgreSQL session +db.Exec( + `SELECT set_config('role', 'authenticated', true), + set_config('request.jwt.claims', $1, true)`, + string(claimJSON) +) +``` + +**Result:** All JWT claims available in PostgreSQL as JSON in `request.jwt.claims` + +**Example:** If JWT contains: +```json +{ + "sub": "550e8400-e29b-41d4-a716-446655440000", + "email": "user@example.com", + "role": "authenticated", + "org_id": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" +} +``` + +Then in PostgreSQL: +```sql +current_setting('request.jwt.claims') +-- Returns: {"sub":"550e8400...","email":"user@example.com","role":"authenticated","org_id":"aaaaaaaa..."} +``` + +--- + +## Optional: Helper Functions for JWT Claims + +CloudSync validates JWTs and passes all claims to PostgreSQL via `request.jwt.claims` — no PostgreSQL extension is required for JWT verification. The validation happens entirely in the CloudSync microservice. + +However, writing `(current_setting('request.jwt.claims')::jsonb->>'sub')::uuid` in every RLS policy is verbose. Following the pattern used by Supabase and Neon, you can optionally create a small set of helper functions in a dedicated schema: + +```sql +-- Create a schema for auth helpers (optional, but keeps things clean) +CREATE SCHEMA IF NOT EXISTS auth; + +-- Returns all JWT claims as JSONB +CREATE OR REPLACE FUNCTION auth.session() + RETURNS jsonb AS $$ + SELECT current_setting('request.jwt.claims', true)::jsonb; +$$ LANGUAGE SQL STABLE; + +-- Returns the user ID (sub claim) +CREATE OR REPLACE FUNCTION auth.user_id() + RETURNS text AS $$ + SELECT auth.session()->>'sub'; +$$ LANGUAGE SQL STABLE; + +-- Returns the user's role claim +CREATE OR REPLACE FUNCTION auth.role() + RETURNS text AS $$ + SELECT auth.session()->>'role'; +$$ LANGUAGE SQL STABLE; +``` + +> **Note:** These are just convenience wrappers — they read from the same `request.jwt.claims` session variable that CloudSync sets. + +--- + +## Security Rules + +### Rule 1: Use Immutable Claims for RLS +```javascript +// ✅ Good: System-set, immutable +role: user.role, + +// ❌ Bad: User can modify +user_metadata: { role: "admin" } +``` + +### Rule 2: Don't Duplicate Database Lookups +```sql +-- ✅ Good: Trust the JWT claim +WHERE user_id = current_setting('request.user_id')::uuid + +-- ❌ Bad: Defeats the purpose of RLS +WHERE user_id = current_setting('request.user_id')::uuid + AND user_id IN (SELECT id FROM users WHERE active = true) +-- If RLS is on users table, this becomes circular +``` + +### Rule 3: Validate Claims in App Before Passing to DB +```go +// ✅ Good: Validate first +if !isValidRole(claims.Role) { + return Unauthorized("Invalid role") +} +db.Exec("SET request.user_role = $1", claims.Role) + +// ❌ Bad: Trust user input +db.Exec("SET request.user_role = $1", claims.Role) +// What if claims.Role is "superadmin"? +``` + +### Rule 4: Keep Session Variables Consistent +```go +// ✅ Good: Set all needed claims +db.Exec("SET request.user_id = $1", claims.Sub) +db.Exec("SET request.org_id = $1", claims.OrgID) + +// ❌ Bad: Partial claims, RLS breaks +db.Exec("SET request.user_id = $1", claims.Sub) +// What if RLS policy expects org_id? +``` + +--- + +## Example: Complete Flow + +**1. Auth server creates JWT:** +```javascript +const token = jwt.sign({ + sub: '550e8400-e29b-41d4-a716-446655440000', + email: 'user@example.com', + role: 'user', + org_id: 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', + permissions: ['todos:read', 'todos:write'] +}, privateKey, { algorithm: 'RS256', expiresIn: '24h' }); +``` + +**2. CloudSync passes JWT claims to PostgreSQL:** +```go +claims := jwt.Parse(token) +userData := map[string]any{ + "sub": claims.Sub, + "email": claims.Email, + "role": claims.Role, + "org_id": claims.OrgID, +} +claimJSON, _ := json.Marshal(userData) + +db.Exec( + `SELECT set_config('role', 'authenticated', true), + set_config('request.jwt.claims', $1, true)`, + string(claimJSON) +) +``` + +**3. PostgreSQL RLS policies read from JWT claims:** +```sql +CREATE POLICY "org_isolation" + ON todos FOR ALL + USING (org_id = (current_setting('request.jwt.claims')::jsonb->>'org_id')::uuid); + +CREATE POLICY "user_ownership" + ON todos FOR UPDATE + USING (user_id = (current_setting('request.jwt.claims')::jsonb->>'sub')::uuid); +``` + +**4. CloudSync executes queries (RLS filters automatically):** +```go +rows := db.Query(ctx, "SELECT * FROM todos") +// RLS automatically returns only matching rows +``` + +## Common RLS Patterns + +### Pattern 1: Filter by User ID (Most Common) +**Best for:** Todo apps, note-taking, personal data + +```sql +-- Table with user_id column +CREATE TABLE todos ( + id UUID PRIMARY KEY, + user_id UUID NOT NULL, + title TEXT, + created_at TIMESTAMP +); + +-- Enable RLS +ALTER TABLE todos ENABLE ROW LEVEL SECURITY; + +-- Read user_id from JWT claims (sub claim) +CREATE POLICY "users_see_own_todos" + ON todos FOR ALL + USING (user_id = (current_setting('request.jwt.claims')::jsonb->>'sub')::uuid); +``` + +**CloudSync passes:** JWT claims as JSON in `request.jwt.claims` +**RLS reads:** `current_setting('request.jwt.claims')::jsonb->>'sub'` + +### Pattern 2: Filter by Organization ID (Multi-tenant) +**Best for:** SaaS apps, team collaboration + +```sql +CREATE TABLE projects ( + id UUID PRIMARY KEY, + org_id UUID NOT NULL, + name TEXT +); + +ALTER TABLE projects ENABLE ROW LEVEL SECURITY; + +CREATE POLICY "orgs_see_own_projects" + ON projects FOR ALL + USING (org_id = (current_setting('request.jwt.claims')::jsonb->>'org_id')::uuid); +``` + +### Pattern 3: Filter by Role (Admin vs User) +**Best for:** Different access levels + +```sql +CREATE TABLE users ( + id UUID PRIMARY KEY, + email TEXT, + role TEXT +); + +ALTER TABLE users ENABLE ROW LEVEL SECURITY; + +CREATE POLICY "role_based_access" + ON users FOR SELECT + USING ( + (current_setting('request.jwt.claims')::jsonb->>'role') = 'admin' + OR id = (current_setting('request.jwt.claims')::jsonb->>'sub')::uuid + ); +``` + +### Pattern 4: Combine User ID + Organization +**Best for:** Team apps with shared data + +```sql +CREATE TABLE team_members ( + id UUID PRIMARY KEY, + org_id UUID NOT NULL, + user_id UUID NOT NULL, + role TEXT +); + +ALTER TABLE team_members ENABLE ROW LEVEL SECURITY; + +CREATE POLICY "see_org_members" + ON team_members FOR ALL + USING ( + org_id = (current_setting('request.jwt.claims')::jsonb->>'org_id')::uuid + AND user_id = (current_setting('request.jwt.claims')::jsonb->>'sub')::uuid + ); +``` + +--- diff --git a/docs/postgresql/RLS.md b/docs/postgresql/reference/rls.md similarity index 100% rename from docs/postgresql/RLS.md rename to docs/postgresql/reference/rls.md diff --git a/src/cloudsync.h b/src/cloudsync.h index ba94977..a0d17ae 100644 --- a/src/cloudsync.h +++ b/src/cloudsync.h @@ -18,7 +18,7 @@ extern "C" { #endif -#define CLOUDSYNC_VERSION "1.0.5" +#define CLOUDSYNC_VERSION "1.0.6" #define CLOUDSYNC_MAX_TABLENAME_LEN 512 #define CLOUDSYNC_VALUE_NOTSET -1