Add local supabase for DB isolation

This commit is contained in:
MartinBraquet
2026-02-17 12:10:17 +01:00
parent b7d1fd9903
commit f7c0d77e9c
29 changed files with 2590 additions and 287 deletions

85
scripts/combine-migrations.sh Executable file
View File

@@ -0,0 +1,85 @@
#!/bin/bash
set -euo pipefail
# Change to project root
cd "$(dirname "$0")"/..
echo "📦 Copying migrations from backend/supabase/ to supabase/migrations/"
echo ""
# Create migrations directory if it doesn't exist
mkdir -p supabase/migrations
# Read migration.sql and extract all \i commands
if [ ! -f "backend/supabase/migration.sql" ]; then
echo "❌ Error: backend/supabase/migration.sql not found"
exit 1
fi
# Extract file paths from \i commands
FILES=$(grep '\\i ' backend/supabase/migration.sql | sed 's/\\i //' | sed 's/;//' | tr -d '\r')
# Starting timestamp (you can adjust this)
TIMESTAMP=20260101000000
COUNTER=0
echo "Files to copy:"
echo "----------------------------------------"
# Copy each file with timestamp
while IFS= read -r file; do
# Remove leading/trailing whitespace
file=$(echo "$file" | xargs)
if [ -z "$file" ]; then
continue
fi
if [ ! -f "$file" ]; then
echo "⚠️ Warning: $file not found, skipping..."
continue
fi
# Calculate timestamp (increment by 1 minute for each file)
CURRENT_TIMESTAMP=$((TIMESTAMP + COUNTER))
# Get filename without path
BASENAME=$(basename "$file")
# Create descriptive name from path
# backend/supabase/users.sql -> users
# backend/supabase/migrations/20251106_add_message_actions.sql -> add_message_actions
if [[ "$file" == *"/migrations/"* ]]; then
# Already has a migration name
NAME=$(echo "$BASENAME" | sed 's/^[0-9_]*//;s/\.sql$//')
else
NAME=$(echo "$BASENAME" | sed 's/\.sql$//')
fi
# Output filename
OUTPUT="supabase/migrations/${CURRENT_TIMESTAMP}_${NAME}.sql"
# Add header comment to track source
{
echo "-- Migration: $NAME"
echo "-- Source: $file"
echo "-- Timestamp: $(date)"
echo ""
cat "$file"
} > "$OUTPUT"
echo "$file -> $OUTPUT"
COUNTER=$((COUNTER + 100))
done <<< "$FILES"
echo ""
echo "----------------------------------------"
echo "✅ Migration files copied to supabase/migrations/"
echo ""
echo "To apply migrations:"
echo " supabase db reset"
echo ""
echo "To view in Studio:"
echo " open http://127.0.0.1:54323"

View File

@@ -0,0 +1,19 @@
services:
postgres-test:
image: postgres:15-alpine
environment:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
ports:
- "5433:5432"
volumes:
- postgres-test-data:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U test_user -d test_db" ]
interval: 5s
timeout: 5s
retries: 5
volumes:
postgres-test-data:

View File

@@ -2,41 +2,132 @@
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}[E2E]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
# Array to track background process PIDs
PIDS=()
# Function to clean up background processes
cleanup() {
echo "Stopping background processes..."
print_status "Cleaning up..."
# Kill all background processes
for pid in "${PIDS[@]:-}"; do
if kill -0 "$pid" 2>/dev/null; then
kill "$pid" || true
kill "$pid" 2>/dev/null || true
wait "$pid" 2>/dev/null || true
echo "Killed PID $pid"
fi
done
# Stop Docker containers
if [ "${SKIP_DB_CLEANUP:-}" != "true" ]; then
print_status "Stopping test database..."
docker compose -f scripts/docker-compose.test.yml down -v
fi
print_status "Cleanup complete"
}
# Trap EXIT, INT, TERM to run cleanup automatically
trap cleanup EXIT INT TERM
# Change to project root
cd "$(dirname "$0")"/..
npx playwright install chromium
# Load test environment variables
if [ -f .env.test ]; then
export $(cat .env.test | grep -v '^#' | xargs)
fi
export NEXT_PUBLIC_API_URL=localhost:8088
export NEXT_PUBLIC_FIREBASE_ENV=DEV
export NEXT_PUBLIC_FIREBASE_EMULATOR=true
export FIREBASE_AUTH_EMULATOR_HOST=127.0.0.1:9099
export FIREBASE_STORAGE_EMULATOR_HOST=127.0.0.1:9199
# Start Supabase (includes Postgres, Auth, Storage, etc.)
supabase start
# Start servers in background and store their PIDs
PIDS=()
npx nyc --reporter=lcov yarn --cwd=web serve & PIDS+=($!)
npx nyc --reporter=lcov yarn --cwd=backend/api dev & PIDS+=($!)
# Apply migrations (if using Supabase migrations)
./scripts/combine-migrations.sh
supabase db reset
# Get connection details
export NEXT_PUBLIC_SUPABASE_URL=$(supabase status --output json | jq -r '.API_URL')
export NEXT_PUBLIC_SUPABASE_ANON_KEY=$(supabase status --output json | jq -r '.ANON_KEY')
export DATABASE_URL=$(supabase status --output json | jq -r '.DB_URL')
echo $NEXT_PUBLIC_SUPABASE_URL
echo $NEXT_PUBLIC_SUPABASE_ANON_KEY
echo $DATABASE_URL
print_status "Supabase started at: $DATABASE_URL"
# Install Playwright browsers
print_status "Installing Playwright browsers..."
npx playwright install chromium # --with-deps
# Start Firebase emulators
print_status "Starting Firebase emulators..."
yarn emulate & PIDS+=($!)
npx wait-on http://localhost:3000
# Wait for emulators to be ready
print_status "Waiting for Firebase emulators..."
npx wait-on \
http-get://127.0.0.1:9099 \
--timeout 30000
npx tsx scripts/setup-auth.ts
# Build backend (required?)
./scripts/build_api.sh
npx playwright test tests/e2e
# Seed test data if script exists
if [ -f "scripts/seed-test-data.ts" ]; then
print_status "Seeding test data..."
npx tsx scripts/seed-test-data.ts
fi
exit ${TEST_FAILED:-0}
# Start backend API
print_status "Starting backend API..."
yarn --cwd=backend/api dev & PIDS+=($!)
# Wait for API to be ready
print_status "Waiting for API..."
npx wait-on http://localhost:8088/health --timeout 30000 || {
print_error "API failed to start"
exit 1
}
# Start Next.js app
print_status "Starting Next.js app..."
yarn --cwd=web dev & PIDS+=($!)
# Wait for Next.js to be ready
print_status "Waiting for Next.js..."
npx wait-on http://localhost:3000 --timeout 60000 || {
print_error "Next.js failed to start"
exit 1
}
# Run Playwright tests
print_status "Running Playwright tests..."
TEST_FAILED=0
npx playwright test tests/e2e "$@" || TEST_FAILED=$?
if [ $TEST_FAILED -eq 0 ]; then
print_status "${GREEN}All tests passed!${NC}"
else
print_error "Some tests failed (exit code: $TEST_FAILED)"
fi
exit $TEST_FAILED

View File

@@ -1,62 +0,0 @@
#!/bin/bash
# What runs on each port?
# - 4000: Firebase emulator
# - 3000: Front end
# - 8088: Back end
# How to view users? Each user is stored in two locations for two different purposes:
# In the auth system (firebase emulator) to see the auth info (email, provider, etc.): http://127.0.0.1:4000/auth
# In the database (dev supabase project, users and private_users table) to see the user info specific to compass (username, notif preferences, etc.): use DBeaver to connect to the dev supabase db
# Clean ghost processes
kill_ghosts() {
for p in 3000 4000 4400 4500 8088; do
pids=$(lsof -ti :$p 2>/dev/null)
if [ -n "$pids" ]; then
kill $pids || true
fi
done
}
kill_ghosts
set -euo pipefail
# Function to clean up background processes
cleanup() {
echo "Stopping background processes..."
for pid in "${PIDS[@]:-}"; do
if kill -0 "$pid" 2>/dev/null; then
kill "$pid" || true
wait "$pid" 2>/dev/null || true
echo "Killed PID $pid"
fi
done
kill_ghosts
}
# Trap EXIT, INT, TERM to run cleanup automatically
trap cleanup EXIT INT TERM
cd "$(dirname "$0")"/..
export NEXT_PUBLIC_API_URL=localhost:8088
export NEXT_PUBLIC_FIREBASE_ENV=DEV
export NEXT_PUBLIC_FIREBASE_EMULATOR=true
export FIREBASE_AUTH_EMULATOR_HOST=127.0.0.1:9099
export FIREBASE_STORAGE_EMULATOR_HOST=127.0.0.1:9199
# Start servers in background and store their PIDs
PIDS=()
npx nyc --reporter=lcov yarn --cwd=web serve & PIDS+=($!)
npx nyc --reporter=lcov yarn --cwd=backend/api dev & PIDS+=($!)
yarn emulate & PIDS+=($!)
npx wait-on http://localhost:3000
# This creates a new user in firebase auth only (not in the db, hence it won't show in the list of profiles)
npx tsx scripts/setup-auth.ts
read -p "Press enter to exit..." < /dev/tty
exit ${TEST_FAILED:-0}

86
scripts/run_local_isolated.sh Executable file
View File

@@ -0,0 +1,86 @@
#!/bin/bash
# Run the web app locally in full isolation (database, storage and authentication all stored locally)
# What runs on each port?
# - 4000: Firebase emulator UI
# - 9099: Firebase emulator authentication
# - 9199: Firebase emulator storage
# - 54323: Supabase emulator UI
# - 54322: Supabase emulator Database (direct client)
# - 54321: Supabase emulator Database (font-end client)
# - 3000: Front end
# - 8088: Back end
# How to view users? Each user is stored in two locations for two different purposes:
# In the auth system (firebase emulator) to see the auth info (email, provider, etc.): http://127.0.0.1:4000/auth
# In the database (users and private_users table) to see the user info specific to compass (username, notif preferences, etc.): http://127.0.0.1:54323
# Clean ghost processes
kill_ghosts() {
for p in 3000 4000 4400 4500 8088; do
pids=$(lsof -ti :$p 2>/dev/null)
if [ -n "$pids" ]; then
kill $pids || true
fi
done
}
kill_ghosts
set -euo pipefail
# Function to clean up background processes
cleanup() {
echo "Stopping background processes..."
for pid in "${PIDS[@]:-}"; do
if kill -0 "$pid" 2>/dev/null; then
kill "$pid" || true
wait "$pid" 2>/dev/null || true
echo "Killed PID $pid"
fi
done
kill_ghosts
}
# Trap EXIT, INT, TERM to run cleanup automatically
trap cleanup EXIT INT TERM
cd "$(dirname "$0")"/..
export $(cat .env.test | grep -v '^#' | xargs)
# Ensure Supabase local stack is running; if not, reset/start it
STATUS_JSON=$(supabase status --output json 2>/dev/null || echo '')
API_URL=$(echo "$STATUS_JSON" | jq -r '.API_URL // empty')
if [ -z "$API_URL" ]; then
echo "Supabase is not running. Bootstrapping local stack with: yarn test:db:reset"
yarn test:db:reset
STATUS_JSON=$(supabase status --output json)
fi
export NEXT_PUBLIC_SUPABASE_URL=$(echo "$STATUS_JSON" | jq -r '.API_URL')
export NEXT_PUBLIC_SUPABASE_ANON_KEY=$(echo "$STATUS_JSON" | jq -r '.ANON_KEY')
export DATABASE_URL=$(echo "$STATUS_JSON" | jq -r '.DB_URL')
echo $NEXT_PUBLIC_SUPABASE_URL
echo $NEXT_PUBLIC_SUPABASE_ANON_KEY
echo $DATABASE_URL
# Start servers in background and store their PIDs
PIDS=()
npx nyc --reporter=lcov yarn --cwd=web serve & PIDS+=($!)
npx nyc --reporter=lcov yarn --cwd=backend/api dev & PIDS+=($!)
yarn emulate & PIDS+=($!)
npx wait-on http://localhost:3000
echo ""
echo "✅ Isolated web app fully running and ready!"
echo " Useful links:"
echo " - Front end: http://127.0.0.1:3000"
echo " - Supabase UI: http://127.0.0.1:54323"
echo " - Firebase UI: http://127.0.0.1:4000"
echo ""
read -p "Press enter to exit..." < /dev/tty
exit ${TEST_FAILED:-0}

56
scripts/seed-test-data.ts Normal file
View File

@@ -0,0 +1,56 @@
// TODO: add test data to firebase emulator as well (see example below, but user IDs from supabase and firebase need to the same)
import {createSupabaseDirectClient} from "shared/lib/supabase/init";
import UserAccountInformation from "../tests/e2e/backend/utils/userInformation";
import {seedDatabase} from "../tests/e2e/utils/seedDatabase";
import axios from 'axios';
import {config} from '../tests/e2e/web/SPEC_CONFIG.js';
async function createAuth(email: string, password: string) {
const base = 'http://localhost:9099/identitytoolkit.googleapis.com/v1';
await axios.post(`${base}/accounts:signUp?key=fake-api-key`, {
email: email,
password: password,
returnSecureToken: true
});
console.log('Auth created', config.USERS.DEV_1.EMAIL)
// TODY: retrieve real user ID from response
const userId = Date.now().toString()
return userId
}
// Can remove this later once we update tests/e2e/web/fixtures/signInFixture.ts
createAuth(config.USERS.DEV_1.EMAIL, config.USERS.DEV_1.PASSWORD)
type ProfileType = 'basic' | 'medium' | 'full'
(async () => {
const pg = createSupabaseDirectClient()
//Edit the count seedConfig to specify the amount of each profiles to create
const seedConfig = [
{count: 1, profileType: 'basic' as ProfileType},
{count: 1, profileType: 'medium' as ProfileType},
{count: 1, profileType: 'full' as ProfileType},
]
for (const {count, profileType} of seedConfig) {
for (let i = 0; i < count; i++) {
const userInfo = new UserAccountInformation()
userInfo.user_id = await createAuth(userInfo.email, userInfo.password)
if (i == 0) {
// Seed the first profile with deterministic data for the e2e tests
userInfo.name = 'Franklin Buckridge'
}
console.log('Seeded user:', userInfo)
await seedDatabase(pg, userInfo, profileType)
}
}
process.exit(0)
})()

41
scripts/test_db_migration.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
set -euo pipefail
cd "$(dirname "$0")/.."
# Test database config (hardcoded - no .env needed)
export DB_HOST=localhost
export DB_PORT=5433
export DB_USER=test_user
export DB_NAME=test_db
export PGPASSWORD=test_password
# Build connection URL
export DATABASE_URL="postgresql://$DB_USER:$PGPASSWORD@$DB_HOST:$DB_PORT/$DB_NAME"
echo "Migrating test database: $DATABASE_URL"
# Check if psql is available
if ! command -v psql &> /dev/null; then
echo "Error: psql not found. Use docker exec instead."
echo "Running: docker exec scripts-postgres-test-1 psql ..."
# Use docker exec if psql not installed
docker exec -i scripts-postgres-test-1 psql -U $DB_USER -d $DB_NAME <<EOF
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
EOF
# Apply migration via docker
docker exec -i scripts-postgres-test-1 psql -U $DB_USER -d $DB_NAME < backend/supabase/migration.sql
else
# Using local psql
# Clear existing schema
psql "$DATABASE_URL" -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;"
# Apply migration
psql "$DATABASE_URL" -f backend/supabase/migration.sql
fi
echo "✅ Test database migration complete"