Isolate frontend and delivery runtimes with separate volumes

This commit is contained in:
Ruslan Bakiev
2026-02-20 00:48:48 +07:00
parent 938c06240e
commit 8895810aea
3 changed files with 46 additions and 56 deletions

View File

@@ -3,10 +3,6 @@ set -euo pipefail
cd "$(dirname "$0")/.."
# Serialize dependency install when multiple containers share the same workspace.
LOCK_FILE=/app/Frontend/.npm-install.lock
exec 9>"$LOCK_FILE"
# Prevent path leakage between host Nuxt build cache and Docker runtime.
# If any cache contains absolute /Users/... imports, Nitro dev runtime can break in /app.
mkdir -p .nuxt .output
@@ -14,29 +10,24 @@ find .nuxt -mindepth 1 -maxdepth 1 -exec rm -rf {} + || true
find .output -mindepth 1 -maxdepth 1 -exec rm -rf {} + || true
rm -rf node_modules/.cache node_modules/.vite
# Only installation steps are serialized; runtime must not hold the lock.
(
flock 9
# Install deps (container starts from a clean image).
# This workspace has mixed Apollo/Nuxt peer graphs; keep install deterministic in Docker.
npm install --legacy-peer-deps
# Install deps (container starts from a clean image).
# This workspace has mixed Apollo/Nuxt peer graphs; keep install deterministic in Docker.
npm install --legacy-peer-deps
# sharp is a native module and can break when cached node_modules were installed
# for a different CPU variant (for example arm64v8). Force a local rebuild.
ARCH="$(uname -m)"
if [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then
npm rebuild sharp --platform=linux --arch=arm64v8 \
|| npm rebuild sharp --platform=linux --arch=arm64 \
|| npm install sharp --platform=linux --arch=arm64v8 --save-exact=false \
|| npm install sharp --platform=linux --arch=arm64 --save-exact=false
elif [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
npm rebuild sharp --platform=linux --arch=x64 \
|| npm install sharp --platform=linux --arch=x64 --save-exact=false
else
npm rebuild sharp || true
fi
)
# sharp is a native module and can break when cached node_modules were installed
# for a different CPU variant (for example arm64v8). Force a local rebuild.
ARCH="$(uname -m)"
if [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then
npm rebuild sharp --platform=linux --arch=arm64v8 \
|| npm rebuild sharp --platform=linux --arch=arm64 \
|| npm install sharp --platform=linux --arch=arm64v8 --save-exact=false \
|| npm install sharp --platform=linux --arch=arm64 --save-exact=false
elif [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
npm rebuild sharp --platform=linux --arch=x64 \
|| npm install sharp --platform=linux --arch=x64 --save-exact=false
else
npm rebuild sharp || true
fi
# Wait until PostgreSQL is reachable before applying schema.
until node -e "const u=new URL(process.env.DATABASE_URL||''); const net=require('net'); const s=net.createConnection({host:u.hostname,port:Number(u.port||5432)}); s.on('connect',()=>{s.end(); process.exit(0);}); s.on('error',()=>process.exit(1)); setTimeout(()=>process.exit(1), 1000);" ; do

View File

@@ -3,31 +3,22 @@ set -euo pipefail
cd "$(dirname "$0")/.."
# Serialize dependency install when multiple containers share the same workspace.
LOCK_FILE=/app/Frontend/.npm-install.lock
exec 9>"$LOCK_FILE"
# Only dependency bootstrap is serialized; worker runtime must not hold the lock.
(
flock 9
# Worker container starts from clean image.
# Install deps without frontend postinstall hooks (nuxt prepare) to keep worker lean/stable.
npm install --ignore-scripts --legacy-peer-deps
ARCH="$(uname -m)"
if [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then
npm rebuild sharp --platform=linux --arch=arm64v8 \
|| npm rebuild sharp --platform=linux --arch=arm64 \
|| npm install sharp --platform=linux --arch=arm64v8 --save-exact=false \
|| npm install sharp --platform=linux --arch=arm64 --save-exact=false
elif [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
npm rebuild sharp --platform=linux --arch=x64 \
|| npm install sharp --platform=linux --arch=x64 --save-exact=false
else
npm rebuild sharp || true
fi
npx prisma generate
)
# Worker container starts from clean image.
# Install deps without frontend postinstall hooks (nuxt prepare) to keep worker lean/stable.
npm install --ignore-scripts --legacy-peer-deps
ARCH="$(uname -m)"
if [ "$ARCH" = "aarch64" ] || [ "$ARCH" = "arm64" ]; then
npm rebuild sharp --platform=linux --arch=arm64v8 \
|| npm rebuild sharp --platform=linux --arch=arm64 \
|| npm install sharp --platform=linux --arch=arm64v8 --save-exact=false \
|| npm install sharp --platform=linux --arch=arm64 --save-exact=false
elif [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
npm rebuild sharp --platform=linux --arch=x64 \
|| npm install sharp --platform=linux --arch=x64 --save-exact=false
else
npm rebuild sharp || true
fi
npx prisma generate
# Ensure DB is reachable before the worker starts consuming jobs.
until node -e "const u=new URL(process.env.DATABASE_URL||''); const net=require('net'); const s=net.createConnection({host:u.hostname,port:Number(u.port||5432)}); s.on('connect',()=>{s.end(); process.exit(0);}); s.on('error',()=>process.exit(1)); setTimeout(()=>process.exit(1), 1000);" ; do

View File

@@ -1,9 +1,12 @@
services:
frontend:
image: node:22-bookworm-slim
working_dir: /app/Frontend
working_dir: /app/frontend
volumes:
- ./Frontend:/app/Frontend
- ./Frontend:/app/frontend
- frontend_node_modules:/app/frontend/node_modules
- frontend_nuxt:/app/frontend/.nuxt
- frontend_output:/app/frontend/.output
expose:
- "3000"
environment:
@@ -48,11 +51,12 @@ services:
- default
- dokploy-network
delivery-worker:
delivery:
image: node:22-bookworm-slim
working_dir: /app/Frontend
working_dir: /app/delivery
volumes:
- ./Frontend:/app/Frontend
- ./Frontend:/app/delivery
- delivery_node_modules:/app/delivery/node_modules
environment:
DATABASE_URL: "${DATABASE_URL:-postgresql://postgres:dpb6gmj1umjhohso@crm-sql-q57r8m:5432/postgres?schema=public}"
REDIS_URL: "${REDIS_URL:-redis://default:nw0mv1pemhnbh7gw@crm-redis-vkpxku:6379}"
@@ -198,6 +202,10 @@ services:
start_period: 5s
volumes:
frontend_node_modules:
frontend_nuxt:
frontend_output:
delivery_node_modules:
langfuse_postgres_data:
langfuse_clickhouse_data:
langfuse_clickhouse_logs: