first commit
Copilot Setup Steps / copilot-setup-steps (push) Has been cancelled

This commit is contained in:
2026-04-22 19:51:20 +07:00
commit 93d1b7c3d3
579 changed files with 99797 additions and 0 deletions
+46
View File
@@ -0,0 +1,46 @@
#!/bin/sh
# Entrypoint script for the Analytics service in compose.yml
## This script configures the environment for the Analytics service to use Tinybird local.
## It depends on the `tb-cli` service, which creates the `.env` file, which is mounted
## into the Analytics service container at `/app/.env`.
# Note: the analytics service's container is based on alpine, hence `sh` instead of `bash`.
set -eu
# Initialize child process variable
child=""
# Handle shutdown signals gracefully.
_term() {
echo "Caught SIGTERM/SIGINT signal, shutting down gracefully..."
if [ -n "$child" ]; then
kill -TERM "$child" 2>/dev/null || true
wait "$child" 2>/dev/null || true
fi
exit 0
}
# Set up signal handlers (POSIX-compliant signal names)
trap _term TERM INT
# Set the TINYBIRD_TRACKER_TOKEN environment variable from the .env file
# This file is created by the `tb-cli` service and mounted into the Analytics service container
if [ -f /mnt/shared-config/.env.tinybird ]; then
. /mnt/shared-config/.env.tinybird
if [ -n "${TINYBIRD_TRACKER_TOKEN:-}" ]; then
export TINYBIRD_TRACKER_TOKEN="$TINYBIRD_TRACKER_TOKEN"
echo "Tinybird tracker token configured successfully"
else
echo "WARNING: TINYBIRD_TRACKER_TOKEN not found in /mnt/shared-config/.env.tinybird" >&2
fi
else
echo "WARNING: /mnt/shared-config/.env.tinybird file not found - Tinybird tracking may not work" >&2
fi
# Start the process in the background
"$@" &
child=$!
# Wait for the child process
wait "$child"
+59
View File
@@ -0,0 +1,59 @@
{
local_certs
}
# Run `sudo ./docker/caddy/trust_caddy_ca.sh` while the caddy container is running to trust the Caddy CA
(common_ghost_config) {
log {
output stdout
format json
}
# Proxy analytics requests with any prefix (e.g. /.ghost/analytics/ or /blog/.ghost/analytics/)
@analytics_paths path_regexp analytics_match ^(.*)/\.ghost/analytics(.*)$
handle @analytics_paths {
rewrite * {re.analytics_match.2}
reverse_proxy {$ANALYTICS_PROXY_TARGET}
}
handle /ember-cli-live-reload.js {
reverse_proxy admin:4200
}
reverse_proxy server:2368
}
# Allow http to be used
## Disables automatic redirect to https in development
http://localhost {
import common_ghost_config
}
# Allow https to be used by explicitly requesting https://localhost
## Note: Caddy uses self-signed certificates. Your browser will warn you about this.
## Run `sudo ./docker/caddy/trust_caddy_ca.sh` while the caddy container is running to trust the Caddy CA
https://localhost {
import common_ghost_config
}
# Access Ghost at https://site.ghost
## Add the following to your /etc/hosts file:
## 127.0.0.1 site.ghost
site.ghost {
reverse_proxy server:2368
}
# Access Ghost Admin at https://admin.ghost/ghost
## Add the following to your /etc/hosts file:
## 127.0.0.1 admin.ghost
admin.ghost {
handle /ember-cli-live-reload.js {
reverse_proxy admin:4200
}
handle {
reverse_proxy server:2368
}
}
+18
View File
@@ -0,0 +1,18 @@
# E2E Test Caddyfile - Routes analytics requests to the analytics service
:80 {
log {
output stdout
format json
}
# Proxy analytics requests with any prefix (e.g. /.ghost/analytics/ or /blog/.ghost/analytics/)
@analytics_paths path_regexp analytics_match ^(.*)/\.ghost/analytics(.*)$
handle @analytics_paths {
rewrite * {re.analytics_match.2}
reverse_proxy {$ANALYTICS_PROXY_TARGET}
}
# Default response for healthcheck and other requests
# E2E tests create Ghost instances dynamically, so we don't proxy to a fixed server
respond "OK" 200
}
+57
View File
@@ -0,0 +1,57 @@
#!/bin/bash
# --- Configuration ---
# !! IMPORTANT: Set this to your Caddy Docker container name or ID !!
CADDY_CONTAINER_NAME="ghost-caddy" # PLEASE UPDATE IF YOUR CONTAINER NAME IS DIFFERENT
# Path where Caddy stores its local root CA inside the container
CADDY_INTERNAL_CERT_PATH="/data/caddy/pki/authorities/local/root.crt"
# Temporary path on your host to save the certificate
HOST_TEMP_CERT_PATH="./caddy_local_root_for_keychain.crt"
# --- End Configuration ---
# Check if running as root (needed for 'security add-trusted-cert' and /etc/hosts modification)
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run as root (e.g., using sudo) to modify the System Keychain."
exit 1
fi
echo "--- Managing Caddy Local CA Trust ---"
echo "Attempting to copy Caddy's local root CA certificate from container '$CADDY_CONTAINER_NAME'..."
# Step 1: Copy the certificate from the Docker container
docker cp "${CADDY_CONTAINER_NAME}:${CADDY_INTERNAL_CERT_PATH}" "${HOST_TEMP_CERT_PATH}"
if [ $? -ne 0 ]; then
echo "Error: Failed to copy certificate from Docker container."
echo "Please ensure the container name '$CADDY_CONTAINER_NAME' is correct and the container is running."
echo "Also, Caddy needs to have served an HTTPS site at least once to generate its local CA."
exit 1
fi
echo "Certificate copied successfully to ${HOST_TEMP_CERT_PATH}"
echo "Adding certificate to System Keychain and trusting it..."
# Step 2: Add the certificate to the System Keychain and set trust settings
security add-trusted-cert -d -r trustRoot -k "/Library/Keychains/System.keychain" "${HOST_TEMP_CERT_PATH}"
if [ $? -ne 0 ]; then
echo "Error: Failed to add or trust the certificate in Keychain."
echo "You might see a duplicate if a previous version of this CA with the same name was already added but not fully trusted."
# Clean up the temp cert
rm -f "${HOST_TEMP_CERT_PATH}"
exit 1
fi
echo "Certificate successfully added to System Keychain and trusted."
# Step 3: Clean up the temporary certificate file
rm -f "${HOST_TEMP_CERT_PATH}"
echo "Temporary certificate file cleaned up."
echo "--- Caddy Local CA Trust complete ---"
echo ""
echo "Script finished."
echo "IMPORTANT: You may need to restart your web browser(s) and/or clear your browser cache for the changes to take full effect."
exit 0
+219
View File
@@ -0,0 +1,219 @@
{
admin off
}
:80 {
# Compact log format for development
log {
output stdout
format transform "{common_log}"
}
# Ember live reload (runs on separate port 4201)
# This handles both the script injection and WebSocket connections
handle /ember-cli-live-reload.js {
reverse_proxy {env.ADMIN_LIVE_RELOAD_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
# Enable WebSocket support for live reload
header_up Connection {>Connection}
header_up Upgrade {>Upgrade}
}
}
# Ghost API - must go to Ghost backend, not admin dev server
handle /ghost/api/* {
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
# Always tell Ghost requests are HTTPS to prevent redirects
header_up X-Forwarded-Proto https
}
}
# Analytics API - proxy analytics requests to analytics service
# Handles paths like /.ghost/analytics/* or /blog/.ghost/analytics/*
@analytics_paths path_regexp analytics_match ^(.*)/\.ghost/analytics(.*)$
handle @analytics_paths {
rewrite * {re.analytics_match.2}
reverse_proxy {env.ANALYTICS_PROXY_TARGET} {
header_up Host {host}
header_up X-Forwarded-Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
}
}
# ActivityPub API - proxy activityPub requests to activityPub service (running in separate project)
# Requires activitypub containers to be running via the ActivityPub project's docker-compose
handle /.ghost/activitypub/* {
reverse_proxy {env.ACTIVITYPUB_PROXY_TARGET} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
# WebFinger - required for ActivityPub federation
handle /.well-known/webfinger {
reverse_proxy {env.ACTIVITYPUB_PROXY_TARGET} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
# NodeInfo - required for ActivityPub federation
handle /.well-known/nodeinfo {
reverse_proxy {env.ACTIVITYPUB_PROXY_TARGET} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
# Public app dev server assets - must come BEFORE general /ghost/* handler
# Ghost is configured to load these from /ghost/assets/* via compose.dev.yaml
handle /ghost/assets/* {
# Strip /ghost/assets/ prefix
uri strip_prefix /ghost/assets
# Koenig Lexical Editor (optional - for developing Lexical in separate Koenig repo)
# Requires EDITOR_URL=/ghost/assets/koenig-lexical/ when starting admin dev server
# Falls back to Ghost backend (built package) via handle_errors if dev server isn't running
@lexical path /koenig-lexical/*
handle @lexical {
uri strip_prefix /koenig-lexical
reverse_proxy {env.LEXICAL_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
# Fail quickly if dev server is down
fail_duration 1s
unhealthy_request_count 1
}
}
# Portal
@portal path /portal/*
handle @portal {
uri strip_prefix /portal
reverse_proxy {env.PORTAL_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
# Comments UI
@comments path /comments-ui/*
handle @comments {
uri strip_prefix /comments-ui
reverse_proxy {env.COMMENTS_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
# Signup Form
@signup path /signup-form/*
handle @signup {
uri strip_prefix /signup-form
reverse_proxy {env.SIGNUP_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
# Sodo Search
@search path /sodo-search/*
handle @search {
uri strip_prefix /sodo-search
reverse_proxy {env.SEARCH_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
# Announcement Bar
@announcement path /announcement-bar/*
handle @announcement {
uri strip_prefix /announcement-bar
reverse_proxy {env.ANNOUNCEMENT_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
# Everything else under /ghost/assets/* goes to admin dev server
handle {
# Re-add the prefix we stripped for admin dev server
rewrite * /ghost/assets{path}
reverse_proxy {env.ADMIN_DEV_SERVER} {
header_up Host {http.reverse_proxy.upstream.hostport}
header_up X-Forwarded-Host {host}
}
}
}
# Auth frame - must go to Ghost backend for comment admin authentication
handle /ghost/auth-frame/* {
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
# JWKS endpoint - must go to Ghost backend for JWT verification
handle /ghost/.well-known/* {
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
# Admin interface - served from admin dev server
# This includes /ghost/, etc. (but /ghost/assets/* is handled above)
# Also handles WebSocket upgrade requests for HMR
handle /ghost* {
reverse_proxy {env.ADMIN_DEV_SERVER} {
header_up X-Forwarded-Host {host}
}
}
# Everything else goes to Ghost backend
handle {
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
# Always tell Ghost requests are HTTPS to prevent redirects
header_up X-Forwarded-Proto https
}
}
# Handle errors
handle_errors {
# Fallback for Lexical when dev server is unavailable (502/503/504)
# Forwards to Ghost backend which serves the built koenig-lexical package
@lexical_fallback `{http.request.orig_uri.path}.startsWith("/ghost/assets/koenig-lexical/")`
handle @lexical_fallback {
rewrite * {http.request.orig_uri.path}
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Forwarded-Proto https
}
}
# Default error response
respond "{err.status_code} {err.status_text}"
}
}
+36
View File
@@ -0,0 +1,36 @@
# Build mode Caddyfile
# Used for testing pre-built images (local or registry)
{
admin off
}
:80 {
log {
output stdout
format console
}
# Analytics API - proxy to analytics service
# Handles paths like /.ghost/analytics/* or /blog/.ghost/analytics/*
@analytics_paths path_regexp analytics_match ^(.*)/\.ghost/analytics(.*)$
handle @analytics_paths {
rewrite * {re.analytics_match.2}
reverse_proxy {env.ANALYTICS_PROXY_TARGET} {
header_up Host {host}
header_up X-Forwarded-Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
}
}
# Everything else to Ghost
handle {
reverse_proxy {env.GHOST_BACKEND} {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto https
}
}
}
+19
View File
@@ -0,0 +1,19 @@
FROM caddy:2-alpine@sha256:fce4f15aad23222c0ac78a1220adf63bae7b94355d5ea28eee53910624acedfa
RUN caddy add-package github.com/caddyserver/transform-encoder
# Default proxy targets (can be overridden via environment variables)
ENV GHOST_BACKEND=ghost-dev:2368 \
ADMIN_DEV_SERVER=host.docker.internal:5174 \
ADMIN_LIVE_RELOAD_SERVER=host.docker.internal:4200 \
PORTAL_DEV_SERVER=host.docker.internal:4175 \
COMMENTS_DEV_SERVER=host.docker.internal:7173 \
SIGNUP_DEV_SERVER=host.docker.internal:6174 \
SEARCH_DEV_SERVER=host.docker.internal:4178 \
ANNOUNCEMENT_DEV_SERVER=host.docker.internal:4177 \
LEXICAL_DEV_SERVER=host.docker.internal:4173 \
ANALYTICS_PROXY_TARGET=analytics:3000 \
ACTIVITYPUB_PROXY_TARGET=host.docker.internal:8080
COPY Caddyfile /etc/caddy/Caddyfile
EXPOSE 80 2368
+55
View File
@@ -0,0 +1,55 @@
# Dev Gateway (Caddy)
This directory contains the Caddy reverse proxy configuration for the Ghost development environment.
## Purpose
The Caddy reverse proxy container:
1. **Routes Ghost requests** to the Ghost container backend
2. **Proxies asset requests** to local dev servers running on the host
3. **Enables hot-reload** for frontend development without rebuilding Ghost
## Configuration
### Environment Variables
Caddy uses environment variables (set in `compose.dev.yaml`) to configure proxy targets:
- `GHOST_BACKEND` - Ghost container hostname (e.g., `ghost-dev:2368`)
- `ADMIN_DEV_SERVER` - React admin dev server (e.g., `host.docker.internal:5174`)
- `ADMIN_LIVE_RELOAD_SERVER` - Ember live reload WebSocket (e.g., `host.docker.internal:4200`)
- `PORTAL_DEV_SERVER` - Portal dev server (e.g., `host.docker.internal:4175`)
- `COMMENTS_DEV_SERVER` - Comments UI (e.g., `host.docker.internal:7173`)
- `SIGNUP_DEV_SERVER` - Signup form (e.g., `host.docker.internal:6174`)
- `SEARCH_DEV_SERVER` - Sodo search (e.g., `host.docker.internal:4178`)
- `ANNOUNCEMENT_DEV_SERVER` - Announcement bar (e.g., `host.docker.internal:4177`)
- `LEXICAL_DEV_SERVER` - *Optional:* Local Koenig Lexical editor dev server (e.g., `host.docker.internal:4173`)
- For developing Lexical in the separate [Koenig repository](https://github.com/TryGhost/Koenig)
- Requires `EDITOR_URL=/ghost/assets/koenig-lexical/` when starting admin dev server
- Automatically falls back to Ghost backend (built package) if dev server is not running
- `ACTIVITYPUB_PROXY_TARGET` - *Optional:* ActivityPub service (e.g., `host.docker.internal:8080`)
- For developing with the [ActivityPub project](https://github.com/TryGhost/ActivityPub) running locally
- Requires the ActivityPub docker-compose services to be running
**Note:** AdminX React apps (admin-x-settings, activitypub, posts, stats) are served through the admin dev server so they don't need separate proxy entries.
### Ghost Configuration
Ghost is configured via environment variables in `compose.dev.yaml` to load public app assets from `/ghost/assets/*` (e.g., `portal__url: /ghost/assets/portal/portal.min.js`). This uses the same path structure as built admin assets.
### Routing Rules
The Caddyfile defines these routing rules:
| Path Pattern | Target | Purpose |
|--------------------------------------|-------------------------------------|------------------------------------------------------------------------|
| `/ember-cli-live-reload.js` | Admin live reload (port 4200) | Ember hot-reload script and WebSocket |
| `/ghost/api/*` | Ghost backend | Ghost API (bypasses admin dev server) |
| `/.ghost/activitypub/*` | ActivityPub server (port 8080) | *Optional:* ActivityPub API (requires AP project running) |
| `/.well-known/webfinger` | ActivityPub server (port 8080) | *Optional:* WebFinger for federation |
| `/.well-known/nodeinfo` | ActivityPub server (port 8080) | *Optional:* NodeInfo for federation |
| `/ghost/assets/koenig-lexical/*` | Lexical dev server (port 4173) | *Optional:* Koenig Lexical editor (falls back to Ghost if not running) |
| `/ghost/assets/portal/*` | Portal dev server (port 4175) | Membership UI |
| `/ghost/assets/comments-ui/*` | Comments dev server (port 7173) | Comments widget |
| `/ghost/assets/signup-form/*` | Signup dev server (port 6174) | Signup form widget |
| `/ghost/assets/sodo-search/*` | Search dev server (port 4178) | Search widget (JS + CSS) |
| `/ghost/assets/announcement-bar/*` | Announcement dev server (port 4177) | Announcement widget |
| `/ghost/assets/*` | Admin dev server (port 5174) | Other admin assets (fallback) |
| `/ghost/*` | Admin dev server (port 5174) | Admin interface |
| Everything else | Ghost backend | Main Ghost application |
**Note:** All port numbers listed are the host ports where dev servers run by default.
+56
View File
@@ -0,0 +1,56 @@
#!/bin/bash
set -euo pipefail
# Runs `pnpm install` if `pnpm-lock.yaml` has changed to avoid a full `docker build` when changing branches/dependencies
## Dockerfile calculates a hash and stores it in `.pnpmhash/pnpm-lock.yaml.md5`
## compose.yml mounts a named volume to persist the `.pnpmhash` directory
(
cd /home/ghost
pnpm_lock_hash_file_path=".pnpmhash/pnpm-lock.yaml.md5"
calculated_hash=$(md5sum pnpm-lock.yaml | awk '{print $1}')
if [ -f "$pnpm_lock_hash_file_path" ]; then
stored_hash=$(cat "$pnpm_lock_hash_file_path")
if [ "$calculated_hash" != "$stored_hash" ]; then
echo "INFO: pnpm-lock.yaml has changed. Running pnpm install..."
pnpm install
mkdir -p .pnpmhash
echo "$calculated_hash" > "$pnpm_lock_hash_file_path"
fi
else
echo "WARNING: pnpm-lock.yaml hash file ($pnpm_lock_hash_file_path) not found. Running pnpm install as a precaution."
pnpm install
mkdir -p .pnpmhash
echo "$calculated_hash" > "$pnpm_lock_hash_file_path"
fi
)
# Configure Ghost to use Tinybird Local
if [ -f /mnt/shared-config/.env.tinybird ]; then
source /mnt/shared-config/.env.tinybird
if [ -n "${TINYBIRD_WORKSPACE_ID:-}" ] && [ -n "${TINYBIRD_ADMIN_TOKEN:-}" ]; then
export tinybird__workspaceId="$TINYBIRD_WORKSPACE_ID"
export tinybird__adminToken="$TINYBIRD_ADMIN_TOKEN"
else
echo "WARNING: Tinybird not enabled: Missing required environment variables"
fi
else
echo "WARNING: Tinybird not enabled: .env file not found"
fi
# Configure Stripe webhook secret
if [ -f /mnt/shared-config/.env.stripe ]; then
source /mnt/shared-config/.env.stripe
if [ -n "${STRIPE_WEBHOOK_SECRET:-}" ]; then
export WEBHOOK_SECRET="$STRIPE_WEBHOOK_SECRET"
echo "Stripe webhook secret configured successfully"
else
echo "WARNING: Stripe webhook secret not found in shared config"
fi
fi
pnpm nx reset
# Execute the CMD
exec "$@"
+58
View File
@@ -0,0 +1,58 @@
# Minimal Development Dockerfile for Ghost Core
# Source code is mounted at runtime for hot-reload support
ARG NODE_VERSION=22.18.0
FROM node:$NODE_VERSION-bullseye-slim
# Install system dependencies needed for building native modules
RUN apt-get update && \
apt-get install -y \
build-essential \
curl \
python3 \
git && \
rm -rf /var/lib/apt/lists/* && \
apt clean
WORKDIR /home/ghost
# Copy package files for dependency installation
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ghost/core/package.json ghost/core/package.json
COPY ghost/i18n/package.json ghost/i18n/package.json
COPY ghost/parse-email-address/package.json ghost/parse-email-address/package.json
# Install dependencies
# Note: Dependencies are installed at build time, but source code is mounted at runtime.
# Copy root lifecycle scripts/hooks needed by `pnpm install`
COPY .github/scripts .github/scripts
COPY .github/hooks .github/hooks
# Enable corepack so it can read packageManager from package.json and provide pnpm
RUN corepack enable
# Install deps with a persistent pnpm store cache to speed up rebuilds
RUN --mount=type=cache,target=/root/.local/share/pnpm/store,id=pnpm-store \
pnpm install --frozen-lockfile --prefer-offline
# Copy entrypoint script that optionally loads Tinybird config
COPY docker/ghost-dev/entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
# Public app assets are served via /ghost/assets/* in dev mode.
# Caddy forwards these paths to host frontend dev servers.
ENV portal__url=/ghost/assets/portal/portal.min.js \
comments__url=/ghost/assets/comments-ui/comments-ui.min.js \
sodoSearch__url=/ghost/assets/sodo-search/sodo-search.min.js \
sodoSearch__styles=/ghost/assets/sodo-search/main.css \
signupForm__url=/ghost/assets/signup-form/signup-form.min.js \
announcementBar__url=/ghost/assets/announcement-bar/announcement-bar.min.js
# Source code will be mounted from host at /home/ghost/ghost/core
# This allows the Ghost dev script to pick up file changes for hot-reload
WORKDIR /home/ghost/ghost/core
ENTRYPOINT ["/home/ghost/entrypoint.sh"]
CMD ["pnpm", "dev"]
+35
View File
@@ -0,0 +1,35 @@
# Ghost Core Dev Docker Image
Minimal Docker image for running Ghost Core in development with hot-reload support.
## Purpose
This lightweight image:
- Installs only Ghost Core dependencies
- Mounts source code from the host at runtime
- Enables `nodemon` for automatic restarts on file changes
- Works with the Caddy gateway to proxy frontend assets from host dev servers
## Key Differences from Main Dockerfile
**Main `Dockerfile`** (for E2E tests, full builds):
- Builds all frontend apps (Admin, Portal, AdminX apps, etc.)
- Bundles everything into the image
- ~15 build stages, 5-10 minute build time
**This `Dockerfile`** (for local development):
- Only installs dependencies
- No frontend builds or bundling
- Source code mounted at runtime
- Used for: Local development with `pnpm dev`
## Usage
This image is used automatically when running:
```bash
pnpm dev # Starts Docker backend + frontend dev servers on host
pnpm dev:analytics # Include Tinybird analytics
pnpm dev:storage # Include MinIO S3-compatible object storage
pnpm dev:all # Include all optional services
```
+34
View File
@@ -0,0 +1,34 @@
#!/bin/bash
set -euo pipefail
# Configure Ghost to use Tinybird Local
# Sources tokens from /mnt/shared-config/.env.tinybird created by tb-cli
if [ -f /mnt/shared-config/.env.tinybird ]; then
source /mnt/shared-config/.env.tinybird
if [ -n "${TINYBIRD_WORKSPACE_ID:-}" ] && [ -n "${TINYBIRD_ADMIN_TOKEN:-}" ]; then
export tinybird__workspaceId="$TINYBIRD_WORKSPACE_ID"
export tinybird__adminToken="$TINYBIRD_ADMIN_TOKEN"
echo "Tinybird configuration loaded successfully"
else
echo "WARNING: Tinybird not enabled: Missing required environment variables in .env.tinybird" >&2
fi
else
echo "WARNING: Tinybird not enabled: .env.tinybird file not found at /mnt/shared-config/.env.tinybird" >&2
fi
# Configure Stripe webhook secret
if [ -f /mnt/shared-config/.env.stripe ]; then
source /mnt/shared-config/.env.stripe
if [ -n "${STRIPE_WEBHOOK_SECRET:-}" ]; then
export WEBHOOK_SECRET="$STRIPE_WEBHOOK_SECRET"
echo "Stripe webhook secret configured successfully"
else
echo "WARNING: Stripe webhook secret not found in shared config"
fi
fi
# Execute the CMD
exec "$@"
+15
View File
@@ -0,0 +1,15 @@
## This file is used to point to the folder where the dashboards are stored
## To edit or create a dashboard, add a .json file to the ./dashboards folder
apiVersion: 1
providers:
- name: "Dashboard provider"
orgId: 1
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
foldersFromFilesStructure: true
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy
editable: true
+15
View File
@@ -0,0 +1,15 @@
#!/bin/sh
set -euo pipefail
BUCKET=${MINIO_BUCKET:-ghost-dev}
echo "Configuring MinIO alias..."
mc alias set local http://minio:9000 "${MINIO_ROOT_USER}" "${MINIO_ROOT_PASSWORD}"
echo "Ensuring bucket '${BUCKET}' exists..."
mc mb --ignore-existing "local/${BUCKET}"
echo "Setting anonymous download policy on '${BUCKET}'..."
mc anonymous set download "local/${BUCKET}"
echo "MinIO bucket '${BUCKET}' ready."
View File
+26
View File
@@ -0,0 +1,26 @@
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'pushgateway'
scrape_interval: 1s
static_configs:
- targets: ['pushgateway:9091']
honor_labels: true
remote_write:
- url: http://grafana:3000/api/prom/push
+95
View File
@@ -0,0 +1,95 @@
#!/bin/sh
# Entrypoint script for the Stripe CLI service in compose.yml
## This script fetches the webhook secret from Stripe CLI and writes it to a shared config file
## that the Ghost server can read to verify webhook signatures.
# Note: the stripe CLI container is based on alpine, hence `sh` instead of `bash`.
set -eu
# Initialize child process variable
child=""
# Handle shutdown signals gracefully.
_term() {
echo "Caught SIGTERM/SIGINT signal, shutting down gracefully..."
if [ -n "$child" ]; then
kill -TERM "$child" 2>/dev/null || true
wait "$child" 2>/dev/null || true
fi
exit 0
}
# Set up signal handlers (POSIX-compliant signal names)
trap _term TERM INT
# Remove any stale config file from previous runs
rm -f /mnt/shared-config/.env.stripe
# Check if STRIPE_SECRET_KEY is set
if [ -z "${STRIPE_SECRET_KEY:-}" ]; then
echo "================================================================================"
echo "ERROR: STRIPE_SECRET_KEY is not set"
echo ""
echo "To use the Stripe service, you must set STRIPE_SECRET_KEY in your .env file:"
echo " STRIPE_SECRET_KEY=sk_test_..."
echo ""
echo "You can find your secret key at: https://dashboard.stripe.com/test/apikeys"
echo "================================================================================"
exit 1
fi
echo "Using STRIPE_SECRET_KEY for authentication"
# Fetch the webhook secret with timeout
echo "Fetching Stripe webhook secret..."
WEBHOOK_SECRET=$(timeout 10s stripe listen --print-secret --api-key "${STRIPE_SECRET_KEY}" 2>&1 || echo "TIMEOUT")
# Check if we got a timeout
if [ "$WEBHOOK_SECRET" = "TIMEOUT" ]; then
echo "ERROR: Timed out waiting for Stripe CLI (10s)"
echo "Please check that your STRIPE_SECRET_KEY is valid"
exit 1
fi
# Check if we got a valid secret (should start with "whsec_")
if echo "$WEBHOOK_SECRET" | grep -q "^whsec_"; then
echo "Successfully fetched webhook secret"
else
echo "ERROR: Failed to fetch Stripe webhook secret"
echo "Output: $WEBHOOK_SECRET"
echo "Please ensure STRIPE_SECRET_KEY is set in your environment"
exit 1
fi
# Write the webhook secret to the shared config file
ENV_FILE="/mnt/shared-config/.env.stripe"
TMP_ENV_FILE="/mnt/shared-config/.env.stripe.tmp"
echo "Writing Stripe configuration to $ENV_FILE..."
cat > "$TMP_ENV_FILE" << EOF
STRIPE_WEBHOOK_SECRET=$WEBHOOK_SECRET
EOF
if [ $? -eq 0 ]; then
mv "$TMP_ENV_FILE" "$ENV_FILE"
if [ $? -eq 0 ]; then
echo "Successfully wrote Stripe configuration to $ENV_FILE"
else
echo "ERROR: Failed to move temporary file to $ENV_FILE"
exit 1
fi
else
echo "ERROR: Failed to create temporary configuration file"
rm -f "$TMP_ENV_FILE"
exit 1
fi
# Start stripe listen in the background
echo "Starting Stripe webhook listener forwarding to ${GHOST_URL}/members/webhooks/stripe/"
stripe listen --forward-to ${GHOST_URL}/members/webhooks/stripe/ --api-key "${STRIPE_SECRET_KEY}" &
child=$!
# Wait for the child process
wait "$child"
+41
View File
@@ -0,0 +1,41 @@
#!/bin/bash
# Wrapper script to run commands with the Stripe profile enabled
# Checks for STRIPE_SECRET_KEY before starting, failing early with helpful error
#
# Usage: ./docker/stripe/with-stripe.sh <command>
# Example: ./docker/stripe/with-stripe.sh nx run ghost-monorepo:docker:dev
set -e
check_stripe_key() {
# Check environment variable first
if [ -n "$STRIPE_SECRET_KEY" ]; then
return 0
fi
# Check .env file for non-empty value
if [ -f .env ] && grep -qE '^STRIPE_SECRET_KEY=.+' .env; then
return 0
fi
return 1
}
if ! check_stripe_key; then
echo ""
echo "================================================================================"
echo "ERROR: STRIPE_SECRET_KEY is not set"
echo ""
echo "To use the Stripe service, set STRIPE_SECRET_KEY in your .env file or ENV vars:"
echo " STRIPE_SECRET_KEY=sk_test_..."
echo ""
echo "You can find your secret key at: https://dashboard.stripe.com/test/apikeys"
echo "================================================================================"
echo ""
exit 1
fi
# Run the command with the stripe profile enabled
export COMPOSE_PROFILES="${COMPOSE_PROFILES:+$COMPOSE_PROFILES,}stripe"
exec "$@"
+21
View File
@@ -0,0 +1,21 @@
FROM python:3.13-slim@sha256:eefe082c4b73082d83b8e7705ed999bc8a1dae57fe1ea723f907a0fc4b90f088
# Install uv from Astral.sh
COPY --from=ghcr.io/astral-sh/uv:0.11.6@sha256:b1e699368d24c57cda93c338a57a8c5a119009ba809305cc8e86986d4a006754 /uv /uvx /bin/
# Install dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
jq \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /home/tinybird
RUN uv tool install tinybird@0.0.1.dev285 --python 3.13 --force
ENV PATH="/root/.local/bin:$PATH"
COPY docker/tb-cli/entrypoint.sh /usr/local/bin
RUN chmod +x /usr/local/bin/entrypoint.sh
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
+105
View File
@@ -0,0 +1,105 @@
#!/bin/bash
# Entrypoint script for the Tinybird CLI service in compose.yml
## This script deploys the Tinybird files to Tinybird local, then retrieves important configuration values
## and writes them to a .env file in /ghost/core/core/server/data/tinybird. This .env file is used by
## Ghost and the Analytics service to automatically configure their connections to Tinybird Local
set -euo pipefail
# Build the Tinybird files
tb --local build
# Get the Tinybird workspace ID and admin token from the Tinybird Local container
TB_INFO=$(tb --output json info)
# Get the workspace ID from the JSON output
WORKSPACE_ID=$(echo "$TB_INFO" | jq -r '.local.workspace_id')
# Check if workspace ID is valid
if [ -z "$WORKSPACE_ID" ] || [ "$WORKSPACE_ID" = "null" ]; then
echo "Error: Failed to get workspace ID from Tinybird. Please ensure Tinybird is running and initialized." >&2
exit 1
fi
WORKSPACE_TOKEN=$(echo "$TB_INFO" | jq -r '.local.token')
# Check if workspace token is valid
if [ -z "$WORKSPACE_TOKEN" ] || [ "$WORKSPACE_TOKEN" = "null" ]; then
echo "Error: Failed to get workspace token from Tinybird. Please ensure Tinybird is running and initialized." >&2
exit 1
fi
#
# Get the admin token from the Tinybird API
## This is different from the workspace admin token
echo "Fetching tokens from Tinybird API..."
MAX_RETRIES=10
RETRY_DELAY=1
for i in $(seq 1 $MAX_RETRIES); do
set +e
TOKENS_RESPONSE=$(curl --fail --show-error -s -H "Authorization: Bearer $WORKSPACE_TOKEN" http://tinybird-local:7181/v0/tokens 2>&1)
CURL_EXIT=$?
set -e
if [ $CURL_EXIT -eq 0 ]; then
# Find admin token by looking for ADMIN scope (more robust than name matching)
ADMIN_TOKEN=$(echo "$TOKENS_RESPONSE" | jq -r '.tokens[] | select(.scopes[]? | .type == "ADMIN") | .token' | head -n1)
if [ -n "$ADMIN_TOKEN" ] && [ "$ADMIN_TOKEN" != "null" ]; then
break
fi
fi
if [ $i -lt $MAX_RETRIES ]; then
echo "Attempt $i failed, retrying in ${RETRY_DELAY}s..." >&2
sleep $RETRY_DELAY
fi
done
# Check if admin token is valid
if [ -z "$ADMIN_TOKEN" ] || [ "$ADMIN_TOKEN" = "null" ]; then
echo "Error: Failed to get admin token from Tinybird API after $MAX_RETRIES attempts. Please ensure Tinybird is properly configured." >&2
echo "Tokens response: $TOKENS_RESPONSE" >&2
exit 1
fi
echo "Successfully found admin token with ADMIN scope"
# Get the tracker token from the same response
TRACKER_TOKEN=$(echo "$TOKENS_RESPONSE" | jq -r '.tokens[] | select(.name == "tracker") | .token')
# Check if tracker token is valid
if [ -z "$TRACKER_TOKEN" ] || [ "$TRACKER_TOKEN" = "null" ]; then
echo "Error: Failed to get tracker token from Tinybird API. Please ensure Tinybird is properly configured." >&2
exit 1
fi
# Write environment variables to .env file
ENV_FILE="/mnt/shared-config/.env.tinybird"
TMP_ENV_FILE="/mnt/shared-config/.env.tinybird.tmp"
echo "Writing Tinybird configuration to $ENV_FILE..."
cat > "$TMP_ENV_FILE" << EOF
TINYBIRD_WORKSPACE_ID=$WORKSPACE_ID
TINYBIRD_ADMIN_TOKEN=$ADMIN_TOKEN
TINYBIRD_TRACKER_TOKEN=$TRACKER_TOKEN
EOF
if [ $? -eq 0 ]; then
mv "$TMP_ENV_FILE" "$ENV_FILE"
if [ $? -eq 0 ]; then
echo "Successfully wrote Tinybird configuration to $ENV_FILE"
else
echo "Error: Failed to move temporary file to $ENV_FILE" >&2
exit 1
fi
else
echo "Error: Failed to create temporary configuration file" >&2
rm -f "$TMP_ENV_FILE"
exit 1
fi
exec "$@"
+245
View File
@@ -0,0 +1,245 @@
#!/usr/bin/env node
const { spawn } = require('child_process');
const chokidar = require('chokidar');
const path = require('path');
// Colors for output
const colors = {
reset: '\x1b[0m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m',
white: '\x1b[37m'
};
// App configurations - now only need paths, colors, and nx project names
const apps = {
shade: { path: 'apps/shade', color: colors.white, nxName: '@tryghost/shade' },
design: { path: 'apps/admin-x-design-system', color: colors.cyan, nxName: '@tryghost/admin-x-design-system' },
framework: { path: 'apps/admin-x-framework', color: colors.magenta, nxName: '@tryghost/admin-x-framework' },
activitypub: { path: 'apps/admin-x-activitypub', color: colors.red, nxName: '@tryghost/admin-x-activitypub' },
settings: { path: 'apps/admin-x-settings', color: colors.green, nxName: '@tryghost/admin-x-settings' },
posts: { path: 'apps/posts', color: colors.yellow, nxName: '@tryghost/posts' },
stats: { path: 'apps/stats', color: colors.blue, nxName: '@tryghost/stats' }
};
// Track all child processes and watchers for cleanup
const activeProcesses = new Set();
const activeWatchers = new Set();
function log(appName, message) {
const app = apps[appName];
console.log(`${app.color}[${appName}]${colors.reset} ${message}`);
}
function buildAllProjects(triggerApp) {
return new Promise((resolve) => {
log(triggerApp, 'Running nx run-many to rebuild all projects...');
const allProjects = Object.values(apps).map(app => app.nxName).join(',');
const child = spawn('pnpm', ['nx', 'run-many', '-t', 'build', `--projects=${allProjects}`], {
cwd: '/home/ghost',
stdio: 'pipe',
env: {
...process.env,
NX_DAEMON: 'false'
}
});
activeProcesses.add(child);
child.stdout.on('data', (data) => {
data.toString().split('\n').forEach(line => {
if (line.trim()) log(triggerApp, `nx: ${line}`);
});
});
child.stderr.on('data', (data) => {
data.toString().split('\n').forEach(line => {
if (line.trim()) log(triggerApp, `nx: ${line}`);
});
});
child.on('close', (code) => {
activeProcesses.delete(child);
if (code === 0) {
log(triggerApp, 'All builds complete');
} else {
log(triggerApp, `Some builds failed with code ${code}`);
}
resolve();
});
child.on('error', (error) => {
activeProcesses.delete(child);
log(triggerApp, `Build error: ${error.message}`);
resolve();
});
});
}
function startWatching() {
const watchPaths = Object.values(apps).map(app => path.join('/home/ghost', app.path, 'src'));
console.log('Watching all project src folders for changes...');
const watcher = chokidar.watch(watchPaths, {
persistent: true,
ignoreInitial: true,
usePolling: true,
interval: 1000
});
// Track the watcher for cleanup
activeWatchers.add(watcher);
let rebuildTimer;
watcher.on('all', (event, filePath) => {
const relativePath = path.relative('/home/ghost', filePath);
// Find which project changed for better logging
const changedProject = Object.keys(apps).find(name =>
filePath.includes(apps[name].path)
) || 'unknown';
log(changedProject, `Change detected: ${event} ${relativePath}`);
// Debounce rebuilds
clearTimeout(rebuildTimer);
rebuildTimer = setTimeout(async () => {
await buildAllProjects(changedProject);
}, 500);
});
watcher.on('error', (error) => {
console.log(`Watcher error: ${error.message}`);
console.log('Exiting process - Docker will restart the service');
process.exit(1);
});
watcher.on('close', () => {
console.log('Watcher closed unexpectedly');
console.log('Exiting process - Docker will restart the service');
process.exit(1);
});
return watcher;
}
async function main() {
console.log('Starting admin apps build and watch system...');
try {
// Phase 1: Build everything with nx handling dependency order and parallelization
const allProjects = Object.values(apps).map(app => app.nxName).join(',');
const child = spawn('pnpm', ['nx', 'run-many', '-t', 'build', `--projects=${allProjects}`], {
cwd: '/home/ghost',
stdio: 'pipe',
env: {
...process.env,
NX_DAEMON: 'false'
}
});
activeProcesses.add(child);
child.stdout.on('data', (data) => {
data.toString().split('\n').forEach(line => {
if (line.trim()) console.log(`[nx] ${line}`);
});
});
child.stderr.on('data', (data) => {
data.toString().split('\n').forEach(line => {
if (line.trim()) console.log(`[nx] ${line}`);
});
});
await new Promise((resolve, reject) => {
child.on('close', (code) => {
activeProcesses.delete(child);
if (code === 0) {
console.log('\nAll projects built successfully!');
resolve();
} else {
console.log(`\nSome builds failed, but continuing with watch processes...`);
resolve(); // Don't crash the watch system if some builds fail
}
});
child.on('error', (error) => {
activeProcesses.delete(child);
reject(error);
});
});
// Phase 2: Start single watcher for all projects
// Single watcher for all projects - any change triggers nx run-many (with caching)
const watcher = startWatching();
console.log('\nAll watch processes started. Press Ctrl+C to stop.');
// Keep the process alive
await new Promise(() => { });
} catch (error) {
console.error('Failed to start:', error.message);
process.exit(1);
}
}
// Graceful shutdown handler
function cleanup() {
console.log('\nShutting down...');
// Kill all active child processes
for (const child of activeProcesses) {
try {
child.kill('SIGTERM');
// Force kill after 1 second if still running
setTimeout(() => {
if (!child.killed) {
child.kill('SIGKILL');
}
}, 1000);
} catch (error) {
// Process might already be dead
}
}
// Close all watchers
for (const watcher of activeWatchers) {
try {
watcher.close();
} catch (error) {
// Watcher might already be closed
}
}
console.log('Cleanup complete.');
process.exit(0);
}
// Handle various termination signals
process.on('SIGINT', cleanup);
process.on('SIGTERM', cleanup);
process.on('SIGQUIT', cleanup);
// Handle uncaught exceptions to ensure cleanup
process.on('uncaughtException', (error) => {
console.error('Uncaught exception:', error);
cleanup();
});
process.on('unhandledRejection', (reason) => {
console.error('Unhandled rejection:', reason);
cleanup();
});
main();