690418:1638 Refactor Infra gitea
CI / CD Pipeline / build (push) Has been cancelled
CI / CD Pipeline / deploy (push) Has been cancelled

This commit is contained in:
2026-04-18 16:38:04 +07:00
parent 8b658e8530
commit 29a6509c58
36 changed files with 1824 additions and 157 deletions
@@ -0,0 +1,18 @@
# Per-stack .env.example (S3) — app stack
# คัดลอกจาก template หลัก แล้วเก็บเฉพาะ vars ที่ stack นี้ใช้
# Source: specs/04-Infrastructure-OPS/04-00-docker-compose/.env.template
#
# วิธีใช้ (บน QNAP):
# cp /share/np-dms/.env.master /share/np-dms/app/.env
# chmod 600 /share/np-dms/app/.env
# --- ใช้โดย docker-compose-app.yml ---
DB_PASSWORD=
REDIS_PASSWORD=
ELASTICSEARCH_USERNAME=elastic
ELASTICSEARCH_PASSWORD=
JWT_SECRET=
JWT_REFRESH_SECRET=
AUTH_SECRET=
BACKEND_IMAGE_TAG=latest
FRONTEND_IMAGE_TAG=latest
@@ -0,0 +1,210 @@
# File: /share/np-dms/app/docker-compose-app.yml
# DMS Container v1.8.6: Application Stack (Backend + Frontend)
# Application name: lcbp3-app
# ============================================================
# ⚠️ ใช้งานร่วมกับ services อื่นที่รันอยู่แล้วบน QNAP:
# - mariadb (lcbp3-db)
# - redis (lcbp3-redis)
# - cache (services)
# - search (services)
# - npm (lcbp3-npm)
# ============================================================
# 🔒 SECURITY (ADR-016, Tier-1):
# - ห้าม commit ค่า secret จริงในไฟล์นี้
# - ใช้ .env (gitignored) คู่กับ compose:
# docker compose --env-file .env -f docker-compose-app.yml up -d
# - QNAP Container Station 3.x รองรับ env_file แล้ว
# - JWT_SECRET (backend) ต้องคนละค่ากับ AUTH_SECRET (frontend NextAuth)
# ============================================================
name: lcbp3
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
networks:
lcbp3:
external: true
services:
# ----------------------------------------------------------------
# 1. Backend API (NestJS)
# Service Name: backend (ตามที่ NPM อ้างอิง → backend:3000)
# ----------------------------------------------------------------
backend:
<<: [*restart_policy, *default_logging]
image: lcbp3-backend:${BACKEND_IMAGE_TAG:-latest}
container_name: backend
# M4: container hardening
user: 'node'
# L1: stdin_open/tty removed — production services ไม่ต้องใช้ interactive TTY
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=256m
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
deploy:
resources:
limits:
cpus: '2.0'
memory: 1536M
reservations:
cpus: '0.5'
memory: 512M
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
# --- Database ---
DB_HOST: 'mariadb'
DB_PORT: '3306'
DB_DATABASE: 'lcbp3'
DB_USERNAME: 'center'
DB_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD required}
# --- Redis ---
REDIS_HOST: 'cache'
REDIS_PORT: '6379'
REDIS_PASSWORD: ${REDIS_PASSWORD:?REDIS_PASSWORD required}
# --- Elasticsearch ---
ELASTICSEARCH_HOST: 'search'
ELASTICSEARCH_PORT: '9200'
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:?ELASTICSEARCH_PASSWORD required}
# --- JWT (backend only) ---
JWT_SECRET: ${JWT_SECRET:?JWT_SECRET required}
JWT_EXPIRATION: '8h'
JWT_REFRESH_SECRET: ${JWT_REFRESH_SECRET:?JWT_REFRESH_SECRET required}
# --- ClamAV (ADR-016 file upload scan) ---
CLAMAV_HOST: 'clamav'
CLAMAV_PORT: '3310'
# --- Numbering ---
NUMBERING_LOCK_TIMEOUT: '5000'
NUMBERING_RESERVATION_TTL: '300'
# --- File Upload ---
UPLOAD_TEMP_DIR: '/app/uploads/temp'
UPLOAD_PERMANENT_DIR: '/app/uploads/permanent'
PORT: '3000'
MAX_FILE_SIZE: '52428800'
networks:
- lcbp3
volumes:
# Two-Phase Storage: จัดเก็บไฟล์นอก container
- '/share/np-dms-as/data/uploads/temp:/app/uploads/temp'
- '/share/np-dms-as/data/uploads/permanent:/app/uploads/permanent'
- '/share/np-dms/data/logs/backend:/app/logs'
# Mount legacy staging folder to match n8n's output path
- '/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/health']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
depends_on:
clamav:
condition: service_healthy
# ----------------------------------------------------------------
# 2. Frontend Web App (Next.js)
# Service Name: frontend (ตามที่ NPM อ้างอิง → frontend:3000)
# ----------------------------------------------------------------
frontend:
<<: [*restart_policy, *default_logging]
image: lcbp3-frontend:${FRONTEND_IMAGE_TAG:-latest}
container_name: frontend
# M4: container hardening (Next.js standalone runs as 'nextjs' user by default)
user: 'nextjs'
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=128m
- /app/.next/cache:rw,size=256m
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
# L1: stdin_open/tty removed
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '0.25'
memory: 512M
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
HOSTNAME: '0.0.0.0'
PORT: '3000'
# --- API Backend URL ---
NEXT_PUBLIC_API_URL: 'https://backend.np-dms.work/api'
# --- NextAuth (ห้ามใช้ค่าเดียวกับ JWT_SECRET) ---
AUTH_SECRET: ${AUTH_SECRET:?AUTH_SECRET required}
AUTH_URL: 'https://lcbp3.np-dms.work'
AUTH_TRUST_HOST: 'true'
INTERNAL_API_URL: 'http://backend:3000/api'
networks:
- lcbp3
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/']
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
depends_on:
backend:
condition: service_healthy
# ----------------------------------------------------------------
# 3. ClamAV (Antivirus scanning for file uploads — ADR-016)
# Service Name: clamav (Backend อ้างอิง CLAMAV_HOST=clamav, port 3310)
# ----------------------------------------------------------------
clamav:
<<: [*restart_policy, *default_logging]
image: clamav/clamav:1.3
container_name: clamav
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETUID
- SETGID
deploy:
resources:
limits:
cpus: '1.0'
memory: 2G
reservations:
cpus: '0.25'
memory: 1G
environment:
TZ: 'Asia/Bangkok'
CLAMAV_NO_FRESHCLAMD: 'false'
CLAMAV_NO_CLAMD: 'false'
CLAMD_STARTUP_TIMEOUT: '1800'
networks:
- lcbp3
volumes:
# cache definitions เพื่อไม่ต้อง download ทุกครั้งที่ restart
- '/share/np-dms/clamav/data:/var/lib/clamav'
- '/share/np-dms/data/logs/clamav:/var/log/clamav'
healthcheck:
test: ['CMD', 'clamdcheck.sh']
interval: 60s
timeout: 30s
retries: 3
start_period: 300s
@@ -0,0 +1,124 @@
# File: /share/np-dms/app/docker-compose.yml
# DMS Container v1.8.0: Application Stack (Backend + Frontend)
# Application name: lcbp3-app
# ============================================================
# ⚠️ ใช้งานร่วมกับ services อื่นที่รันอยู่แล้วบน QNAP:
# - mariadb (lcbp3-db)
# - cache (services)
# - search (services)
# - npm (lcbp3-npm)
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
networks:
lcbp3:
external: true
services:
# ----------------------------------------------------------------
# 1. Backend API (NestJS)
# Service Name: backend (ตามที่ NPM อ้างอิง → backend:3000)
# ----------------------------------------------------------------
backend:
<<: [*restart_policy, *default_logging]
image: lcbp3-backend:latest
container_name: backend
stdin_open: true
tty: true
deploy:
resources:
limits:
cpus: '2.0'
memory: 1536M
reservations:
cpus: '0.5'
memory: 512M
environment:
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
# --- Database ---
DB_HOST: 'mariadb'
DB_PORT: '3306'
DB_DATABASE: 'lcbp3'
DB_USERNAME: 'center'
DB_PASSWORD: 'Center#2025'
# --- Redis ---
REDIS_HOST: 'cache'
REDIS_PORT: '6379'
REDIS_PASSWORD: 'Center2025'
# --- Elasticsearch ---
ELASTICSEARCH_HOST: 'search'
ELASTICSEARCH_PORT: '9200'
# --- JWT ---
JWT_SECRET: 'eebc122aa65adde8c76c6a0847d9649b2b67a06db1504693e6c912e51499b76e'
JWT_EXPIRATION: '8h'
JWT_REFRESH_SECRET: 'a1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1b2'
# --- Numbering ---
NUMBERING_LOCK_TIMEOUT: '5000'
NUMBERING_RESERVATION_TTL: '300'
# --- File Upload ---
UPLOAD_TEMP_DIR: '/app/uploads/temp'
UPLOAD_PERMANENT_DIR: '/app/uploads/permanent'
MAX_FILE_SIZE: '52428800'
networks:
- lcbp3
volumes:
# Two-Phase Storage: จัดเก็บไฟล์นอก container
- '/share/np-dms/data/uploads/temp:/app/uploads/temp'
- '/share/np-dms/data/uploads/permanent:/app/uploads/permanent'
- '/share/np-dms/data/logs/backend:/app/logs'
# Mount legacy staging folder to match n8n's output path
- '/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/health']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
# ----------------------------------------------------------------
# 2. Frontend Web App (Next.js)
# Service Name: frontend (ตามที่ NPM อ้างอิง → frontend:3000)
# ----------------------------------------------------------------
frontend:
<<: [*restart_policy, *default_logging]
image: lcbp3-frontend:latest
container_name: frontend
stdin_open: true
tty: true
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '0.25'
memory: 512M
environment:
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
HOSTNAME: '0.0.0.0'
PORT: '3000'
# --- NextAuth ---
AUTH_SECRET: 'eebc122aa65adde8c76c6a0847d9649b2b67a06db1504693e6c912e51499b76e'
AUTH_URL: 'https://lcbp3.np-dms.work'
networks:
- lcbp3
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/']
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
depends_on:
backend:
condition: service_healthy
@@ -0,0 +1 @@
GITEA_DB_PASSWORD=Center#2025
@@ -0,0 +1,93 @@
# File: /share/np-dms/git/docker-compose.yml
# DMS Container v1.8.6 — Application: git, Service: gitea
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
name: lcbp3-gitea
networks:
lcbp3:
external: true
giteanet:
external: true
name: gitnet
services:
gitea:
<<: [*restart_policy, *default_logging]
image: gitea/gitea:1.22.3-rootless
container_name: gitea
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '0.25'
memory: 512M
security_opt:
- no-new-privileges:true
env_file:
- .env
environment:
# ---- File ownership in QNAP ----
USER_UID: '1000'
USER_GID: '1000'
TZ: Asia/Bangkok
# ---- Server / Reverse proxy (NPM) ----
GITEA__server__ROOT_URL: https://git.np-dms.work/
GITEA__server__DOMAIN: git.np-dms.work
GITEA__server__SSH_DOMAIN: git.np-dms.work
GITEA__server__START_SSH_SERVER: 'true'
GITEA__server__SSH_PORT: '22'
GITEA__server__SSH_LISTEN_PORT: '22'
GITEA__server__LFS_START_SERVER: 'true'
GITEA__server__HTTP_ADDR: '0.0.0.0'
GITEA__server__HTTP_PORT: '3000'
GITEA__server__TRUSTED_PROXIES: '127.0.0.1/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16'
# --- การตั้งค่าฐานข้อมูล
GITEA__database__DB_TYPE: mysql
GITEA__database__HOST: mariadb:3306
GITEA__database__NAME: 'gitea'
GITEA__database__USER: 'gitea'
GITEA__database__PASSWD: ${GITEA_DB_PASSWORD:?GITEA_DB_PASSWORD required}
# --- repos
GITEA__repository__ROOT: /var/lib/gitea/git/repositories
DISABLE_HTTP_GIT: 'false'
ENABLE_BASIC_AUTHENTICATION: 'true'
# --- Enable Package Registry ---
GITEA__packages__ENABLED: 'true'
GITEA__packages__REGISTRY__ENABLED: 'true'
GITEA__packages__REGISTRY__STORAGE_TYPE: local
GITEA__packages__REGISTRY__STORAGE_PATH: /data/registry
# Optional: lock install after setup (เปลี่ยนเป็น true เมื่อจบ onboarding)
GITEA__security__INSTALL_LOCK: 'true'
volumes:
- /share/np-dms/gitea/backup:/backup
- /share/np-dms/gitea/etc:/etc/gitea
- /share/np-dms/gitea/lib:/var/lib/gitea
# ให้ repo root ใช้จาก /share/dms-data/gitea_repos
- /share/np-dms/gitea/gitea_repos:/var/lib/gitea/git/repositories
- /share/np-dms/gitea/gitea_registry:/data/registry
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- '3003:3000' # HTTP (ไปหลัง NPM)
- '2222:22' # SSH สำหรับ git clone/push
networks:
- lcbp3
- giteanet
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:3000/api/healthz']
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# L4: ขั้นตอน ops (folder permissions, DB bootstrap) ย้ายไปที่:
# specs/04-Infrastructure-OPS/04-08-release-management-policy.md
@@ -0,0 +1,3 @@
# Per-stack .env.example — MariaDB + pma
DB_ROOT_PASSWORD=
DB_PASSWORD=
@@ -0,0 +1,97 @@
# File: /share/np-dms/mariadb/docker-compose-lcbp3-db.yml
# DMS Container v1.8.6 : Application name: lcbp3-db, Service: mariadb, pma
# ============================================================
# SECURITY (ADR-016, Tier-1):
# - root user / app user must use different passwords (least privilege)
# - host port 3306 bind only to 127.0.0.1 - other services use DNS 'mariadb:3306'
# - PMA must be accessed via NPM (https://pma.np-dms.work) only
# - set .env in same folder:
# DB_ROOT_PASSWORD, DB_PASSWORD, NPM_DB_PASSWORD, GITEA_DB_PASSWORD, N8N_DB_PASSWORD
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
name: lcbp3-db
services:
mariadb:
<<: [*restart_policy, *default_logging]
image: mariadb:11.8
container_name: mariadb
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '0.5'
memory: 1G
command: >-
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
env_file:
- .env
environment:
# root password must differ from app user (least privilege)
MARIADB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:?DB_ROOT_PASSWORD required}
MARIADB_DATABASE: 'lcbp3'
MARIADB_USER: 'center'
MARIADB_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD required}
TZ: 'Asia/Bangkok'
# bind only to loopback for backup/migration on host - not exposed to LAN
ports:
- '127.0.0.1:3306:3306'
networks:
- lcbp3
volumes:
- '/share/np-dms/mariadb/data:/var/lib/mysql'
- '/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro'
- '/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro'
- '/share/dms-data/mariadb/backup:/backup'
healthcheck:
test: ['CMD', 'healthcheck.sh', '--connect', '--innodb_initialized']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
pma:
<<: [*restart_policy, *default_logging]
image: phpmyadmin:5-apache
container_name: pma
deploy:
resources:
limits:
cpus: '0.25'
memory: 256M
environment:
TZ: 'Asia/Bangkok'
PMA_HOST: 'mariadb'
PMA_PORT: '3306'
PMA_ABSOLUTE_URI: 'https://pma.np-dms.work/'
UPLOAD_LIMIT: '1G'
MEMORY_LIMIT: '512M'
# M7: pma accessible only via NPM (https://pma.np-dms.work) - do not publish port 89 to LAN
expose:
- '80'
networks:
- lcbp3
volumes:
- '/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro'
- '/share/np-dms/pma/zzz-custom.ini:/usr/local/etc/php/conf.d/zzz-custom.ini:ro'
- '/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw'
- '/share/dms-data/logs/pma:/var/log/apache2'
depends_on:
mariadb:
condition: service_healthy
networks:
lcbp3:
external: true
@@ -0,0 +1,95 @@
# File: /share/np-dms/mariadb/docker-compose-lcbp3-db.yml
# DMS Container v1.8.6 : Application name: lcbp3-db, Service: mariadb, pma
# ============================================================
# 🔒 SECURITY (ADR-016, Tier-1):
# - root user / app user must use different passwords (least privilege)
# - host port 3306 bind only to 127.0.0.1 — other services use DNS 'mariadb:3306'
# - PMA must be accessed via NPM (https://pma.np-dms.work) only
# - set .env in same folder:
# DB_ROOT_PASSWORD, DB_PASSWORD, NPM_DB_PASSWORD, GITEA_DB_PASSWORD, N8N_DB_PASSWORD
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
name: lcbp3-db
services:
mariadb:
<<: [*restart_policy, *default_logging]
image: mariadb:11.8
container_name: mariadb
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '0.5'
memory: 1G
command: >-
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
env_file:
- .env
environment:
# root password must differ from app user (least privilege)
MARIADB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:?DB_ROOT_PASSWORD required}
MARIADB_DATABASE: 'lcbp3'
MARIADB_USER: 'center'
MARIADB_PASSWORD: ${DB_PASSWORD:?DB_PASSWORD required}
TZ: 'Asia/Bangkok'
# bind only to loopback for backup/migration on host — not exposed to LAN
ports:
- '127.0.0.1:3306:3306'
networks:
- lcbp3
volumes:
- '/share/np-dms/mariadb/data:/var/lib/mysql'
- '/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro'
- '/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro'
- '/share/dms-data/mariadb/backup:/backup'
healthcheck:
test: ['CMD', 'healthcheck.sh', '--connect', '--innodb_initialized']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
pma:
<<: [*restart_policy, *default_logging]
image: phpmyadmin:5-apache
container_name: pma
deploy:
resources:
limits:
cpus: '0.25'
memory: 256M
environment:
TZ: 'Asia/Bangkok'
PMA_HOST: 'mariadb'
PMA_PORT: '3306'
PMA_ABSOLUTE_URI: 'https://pma.np-dms.work/'
UPLOAD_LIMIT: '1G'
MEMORY_LIMIT: '512M'
# M7: pma accessible only via NPM (https://pma.np-dms.work) — do not publish port 89 to LAN
expose:
- '80'
networks:
- lcbp3
volumes:
- '/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro'
- '/share/np-dms/pma/zzz-custom.ini:/usr/local/etc/php/conf.d/zzz-custom.ini:ro'
- '/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw'
- '/share/dms-data/logs/pma:/var/log/apache2'
depends_on:
mariadb:
condition: service_healthy
networks:
lcbp3:
external: true
@@ -0,0 +1,78 @@
# File: /share/np-dms/monitoring/docker-compose.yml (QNAP)
# DMS Container v1.8.6 — เฉพาะ exporters
# ============================================================
# Prometheus รันบน ASUSTOR — scrape ผ่าน lcbp3 network DNS
# - node-exporter:9100
# - cadvisor:8080
# H5: ไม่ publish ports ออก LAN, ตัด obsolete `version:` field, pin tags
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
networks:
lcbp3:
external: true
services:
node-exporter:
<<: [*restart_policy, *default_logging]
image: prom/node-exporter:v1.8.2
container_name: node-exporter
deploy:
resources:
limits:
cpus: '0.5'
memory: 128M
environment:
TZ: 'Asia/Bangkok'
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
expose:
- '9100'
networks:
- lcbp3
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9100/metrics']
interval: 30s
timeout: 10s
retries: 3
cadvisor:
<<: [*restart_policy, *default_logging]
image: gcr.io/cadvisor/cadvisor:v0.49.1
container_name: cadvisor
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
environment:
TZ: 'Asia/Bangkok'
expose:
- '8080'
networks:
- lcbp3
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:8080/healthz']
interval: 30s
timeout: 10s
retries: 3
@@ -0,0 +1,3 @@
# Per-stack .env.example — n8n + postgres + tika + docker-socket-proxy
N8N_DB_PASSWORD=
N8N_ENCRYPTION_KEY=
@@ -0,0 +1,198 @@
# File: /share/np-dms/n8n/docker-compose.yml
# DMS Container v1.8.6 — Application: n8n
# ============================================================
# 🔒 SECURITY:
# - secrets อยู่ใน .env (gitignored) — หลีกปัญหาการตีความหมาย `$` ใน YAML
# - n8n ไม่ได้ mount /var/run/docker.sock โดยตรง (H3)
# ใช้ docker-socket-proxy จำกัด capability — read-only Containers/Images API
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
services:
n8n-db:
<<: [*restart_policy, *default_logging]
image: postgres:16.4-alpine
container_name: n8n-db
env_file:
- .env
environment:
- POSTGRES_USER=n8n
- POSTGRES_PASSWORD=${N8N_DB_PASSWORD:?N8N_DB_PASSWORD required}
- POSTGRES_DB=n8n
volumes:
- '/share/np-dms/n8n/postgres-data:/var/lib/postgresql/data'
networks:
lcbp3: {}
healthcheck:
test: ['CMD-SHELL', 'pg_isready -h localhost -U n8n -d n8n']
interval: 10s
timeout: 5s
retries: 5
# ----------------------------------------------------------------
# Docker Socket Proxy (H3) — ให้เฉพาะ read-only Containers/Images API
# n8n ต้องตั้ง DOCKER_HOST=tcp://docker-socket-proxy:2375 (ถ้าใช้ docker node)
# ----------------------------------------------------------------
docker-socket-proxy:
<<: [*restart_policy, *default_logging]
image: tecnativa/docker-socket-proxy:0.2
container_name: docker-socket-proxy
environment:
TZ: 'Asia/Bangkok'
# เปิดเฉพาะ endpoint ที่ n8n จำเป็นต้องใช้
CONTAINERS: '1'
IMAGES: '1'
INFO: '1'
VERSION: '1'
# ปิดหมดที่อันตราย ซึ่งเป็นค่า default ของ image
POST: '0'
DELETE: '0'
EXEC: '0'
VOLUMES: '0'
NETWORKS: '0'
SERVICES: '0'
TASKS: '0'
SWARM: '0'
SYSTEM: '0'
AUTH: '0'
SECRETS: '0'
NODES: '0'
CONFIGS: '0'
DISTRIBUTION: '0'
PLUGINS: '0'
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
lcbp3: {}
expose:
- '2375'
healthcheck:
test: ['CMD-SHELL', 'wget -qO- http://localhost:2375/version || exit 1']
interval: 30s
timeout: 5s
retries: 3
tika:
<<: [*restart_policy, *default_logging]
image: apache/tika:2.9.2.1-full
container_name: tika
user: 'root'
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
security_opt:
- no-new-privileges:true
environment:
TZ: 'Asia/Bangkok'
TESSDATA_PREFIX: '/tessdata'
volumes:
- /share/np-dms/n8n/tessdata:/tessdata
networks:
lcbp3: {}
expose:
- '9998'
healthcheck:
test: ['CMD-SHELL', 'wget -qO- http://localhost:9998/tika || exit 1']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
n8n:
<<: [*restart_policy, *default_logging]
image: n8nio/n8n:1.66.0
container_name: n8n
depends_on:
n8n-db:
condition: service_healthy
docker-socket-proxy:
condition: service_healthy
deploy:
resources:
limits:
cpus: '1.5'
memory: 3G
reservations:
cpus: '0.25'
memory: 512M
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
# N8N_PATH: "/n8n/"
N8N_PUBLIC_URL: 'https://n8n.np-dms.work/'
WEBHOOK_URL: 'https://n8n.np-dms.work/'
N8N_EDITOR_BASE_URL: 'https://n8n.np-dms.work/'
N8N_PROTOCOL: 'https'
N8N_HOST: 'n8n.np-dms.work'
N8N_PORT: 5678
N8N_PROXY_HOPS: '1'
N8N_DIAGNOSTICS_ENABLED: 'false'
N8N_SECURE_COOKIE: 'true'
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY:?N8N_ENCRYPTION_KEY required}
# File access control for "Read/Write Files from Disk" nodes
# Ref: https://github.com/n8n-io/n8n/blob/master/packages/@n8n/config/src/configs/security.config.ts
N8N_RESTRICT_FILE_ACCESS_TO: '/home/node/.n8n-files'
N8N_BLOCK_FILE_ACCESS_TO_N8N_FILES: 'false'
GENERIC_TIMEZONE: 'Asia/Bangkok'
NODE_FUNCTION_ALLOW_BUILTIN: '*'
NODES_EXCLUDE: '[]'
# H3: ใช้ socket proxy แทนการผูก docker.sock โดยตรง
DOCKER_HOST: 'tcp://docker-socket-proxy:2375'
# DB Setup
DB_TYPE: postgresdb
DB_POSTGRESDB_DATABASE: n8n
DB_POSTGRESDB_HOST: n8n-db
DB_POSTGRESDB_PORT: 5432
DB_POSTGRESDB_USER: n8n
DB_POSTGRESDB_PASSWORD: ${N8N_DB_PASSWORD:?N8N_DB_PASSWORD required}
# Data Prune
EXECUTIONS_DATA_PRUNE: 'true'
EXECUTIONS_DATA_MAX_AGE: 168
# EXECUTIONS_DATA_PRUNE_TIMEOUT: 60
ports:
- '5678:5678'
networks:
lcbp3: {}
volumes:
- '/share/np-dms/n8n:/home/node/.n8n'
- '/share/np-dms/n8n/cache:/home/node/.cache'
- '/share/np-dms/n8n/scripts:/scripts'
- '/share/np-dms/n8n/data:/data'
# H3: ลบ docker.sock direct mount — ใช้ docker-socket-proxy แทน
# read-only: อ่านไฟล์ PDF ต้นฉบับเท่านั้น
- '/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro' # Add alias for np-dms-as to match the node setting
# read-write: เขียน Log และ CSV ทั้งหมด
- '/share/np-dms/n8n/migration_logs:/home/node/.n8n-files/migration_logs:rw'
healthcheck:
test: ['CMD-SHELL', 'wget -qO- http://127.0.0.1:5678/healthz || exit 1']
interval: 30s
timeout: 10s
start_period: 60s
retries: 5
networks:
lcbp3:
external: true
# สำหรับ n8n volumes
# chown -R 1000:1000 /share/np-dms/n8n
# chmod -R 755 /share/np-dms/n8n3
# chown -R 999:999 /share/np-dms/n8n/postgres-data
# chmod -R 700 /share/np-dms/n8n/postgres-data
#
# docker compose -f docker-compose-lcbp3-n8n.yml build n8n
@@ -0,0 +1,11 @@
FROM n8nio/n8n:latest-debian
USER root
# Fix Debian 10 Buster EOL package repositories
RUN echo "deb http://archive.debian.org/debian buster main" > /etc/apt/sources.list && \
echo "deb http://archive.debian.org/debian-security buster/updates main" >> /etc/apt/sources.list && \
apt-get update -y && \
apt-get install -y poppler-utils
USER node
@@ -0,0 +1,2 @@
# Per-stack .env.example — Nginx Proxy Manager + landing
NPM_DB_PASSWORD=Center#2026
@@ -0,0 +1,106 @@
# File: /share/np-dms/npm/docker-compose.yml
# DMS Container v1.8.6 — Application: lcbp3-npm, Service: npm + landing
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
name: lcbp3-npm
services:
npm:
<<: [*restart_policy, *default_logging]
image: jc21/nginx-proxy-manager:2.11.3
container_name: npm
deploy:
resources:
limits:
cpus: '1.0'
memory: 512M
reservations:
cpus: '0.25'
memory: 128M
security_opt:
- no-new-privileges:true
ports:
- '80:80' # HTTP
- '443:443' # HTTPS
- '81:81' # NPM Admin UI
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
DB_MYSQL_HOST: 'mariadb'
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: 'npm'
# ⚠️ ADR-016: ห้ามใช้รหัสง่าย ๆ เช่น 'npm' — ตั้งใน .env (NPM_DB_PASSWORD)
DB_MYSQL_PASSWORD: ${NPM_DB_PASSWORD:?NPM_DB_PASSWORD required}
DB_MYSQL_NAME: 'npm'
# Uncomment this if IPv6 is not enabled on your host
DISABLE_IPV6: 'true'
networks:
- lcbp3
- giteanet
volumes:
- '/share/np-dms/npm/data:/data'
- '/share/dms-data/logs/npm:/data/logs'
- '/share/np-dms/npm/letsencrypt:/etc/letsencrypt'
- '/share/np-dms/npm/custom:/data/nginx/custom'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:81/api/']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
landing:
<<: [*restart_policy, *default_logging]
image: nginx:1.27-alpine
container_name: landing
user: '0:0'
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
security_opt:
- no-new-privileges:true
volumes:
- '/share/np-dms/npm/landing:/usr/share/nginx/html:ro'
networks:
- lcbp3
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost/']
interval: 30s
timeout: 5s
retries: 3
networks:
lcbp3:
external: true
giteanet:
external: true
name: gitnet
# docker exec -it npm id
# chown -R 0:0 /share/Container/npm
# setfacl -R -m u:0:rwx /share/Container/npm
# :Email: admin@example.com Password: changeme
# Note: Configurations
# Domain Names | Forward Hostname | IP Forward Port | Cache Assets | Block Common Exploits | Websockets | Force SSL | HTTP/2 | SupportHSTS Enabled |
# backend.np-dms.work | backend | 3000 | [ ] | [x] | [ ] | [x] | [x] | [ ] |
# lcbp3.np-dms.work | frontend | 3000 | [x] | [x] | [x] | [x] | [x] | [ ] |
# db.np-dms.work | mariadb | 3306 | [x] | [x] | [x] | [x] | [x] | [ ] |
# git.np-dms.work | gitea | 3000 | [x] | [x] | [x] | [x] | [x] | [ ] |
# n8n.np-dms.work | n8n | 5678 | [x] | [x] | [x] | [x] | [x] | [ ] |
# npm.np-dms.work | npm | 81 | [ ] | [x] | [x] | [x] | [x] | [ ] |
# pma.np-dms.work | pma | 80 | [x] | [x] | [ ] | [x] | [x] | [ ] |
# np-dms.work, | landing | 80 | [x] | [x] | [ ] | [x] | [x] | [ ] |
# www.np-dms.work | landing | 80 | [x] | [x] | [ ] | [x] | [x] | [ ] |
# L4: runbook details ertain ops (folder permissions, DB bootstrap) moved to:
# specs/04-Infrastructure-OPS/04-08-release-management-policy.md
# Initial admin: admin@example.com / changeme ( )เปลี่ยนทันทีหลัง onboarding)
@@ -0,0 +1,93 @@
# File: npm/docker-compose-npm.yml
# DMS Container v1_4_1 แยก service และ folder /lcbp3-npm
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
services:
npm:
<<: [*restart_policy, *default_logging]
image: jc21/nginx-proxy-manager:latest
container_name: npm
stdin_open: true
tty: true
deploy:
resources:
limits:
cpus: "1.0" # 50% CPU
memory: 512M
ports:
- "80:80" # HTTP
- "443:443" # HTTPS
- "81:81" # NPM Admin UI
environment:
TZ: "Asia/Bangkok"
DB_MYSQL_HOST: "mariadb"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
# Uncomment this if IPv6 is not enabled on your host
DISABLE_IPV6: "true"
networks:
- lcbp3
- giteanet
volumes:
- "/share/Container/npm/data:/data"
- "/share/Container/dms-data/logs/npm:/data/logs" # <-- เพิ่ม logging volume
- "/share/Container/npm/letsencrypt:/etc/letsencrypt"
- "/share/Container/npm/custom:/data/nginx/custom" # <-- สำคัญสำหรับ http_top.conf
# - "/share/Container/lcbp3/npm/landing:/data/landing:ro"
landing:
image: nginx:1.27-alpine
container_name: landing
restart: unless-stopped
volumes:
- "/share/Container/npm/landing:/usr/share/nginx/html:ro"
networks:
- lcbp3
networks:
lcbp3:
external: true
giteanet:
external: true
name: gitnet
# docker exec -it npm id
# chown -R 0:0 /share/Container/npm
# setfacl -R -m u:0:rwx /share/Container/npm
# ค่าเริ่มต้นคือ:Email: admin@example.com Password: changeme
# Note: Configurations
# Domain Names | Forward Hostname | IP Forward Port | Cache Assets | Block Common Exploits | Websockets | Force SSL | HTTP/2 | SupportHSTS Enabled |
# backend.np-dms.work | backend | 3000 | [ ] | [x] | [ ] | [x] | [x] | [ ] |
# lcbp3.np-dms.work | frontend | 3000 | [x] | [x] | [x] | [x] | [x] | [ ] |
# db.np-dms.work | mariadb | 3306 | [x] | [x] | [x] | [x] | [x] | [ ] |
# git.np-dms.work | gitea | 3000 | [x] | [x] | [x] | [x] | [x] | [ ] |
# n8n.np-dms.work | n8n | 5678 | [x] | [x] | [x] | [x] | [x] | [ ] |
# npm.np-dms.work | npm | 81 | [ ] | [x] | [x] | [x] | [x] | [ ] |
# pma.np-dms.work | pma | 80 | [x] | [x] | [ ] | [x] | [x] | [ ] |
# np-dms.work, | localhost | 80 | [x] | [x] | [ ] | [x] | [x] | [ ] |
# www.np-dms.work | | | | | | | | |
# Note: The 'landing' service has been removed in this version.
# landing:
# image: nginx:1.27-alpine
# container_name: lcbp3_landing
# restart: unless-stopped
# volumes:
# - "/share/Container/lcbp3/npm/landing:/usr/share/nginx/html:ro"
# networks:
# - lcbp3
# Note: Add mariadb service is expected to be defined in a separate docker-compose file.
# Create database and user for NPM in the mariadb container:
# CREATE DATABASE npm;
# CREATE USER 'npm'@'%' IDENTIFIED BY 'npm';
# GRANT ALL PRIVILEGES ON npm.* TO 'npm'@'%';
# FLUSH PRIVILEGES;
@@ -0,0 +1,4 @@
MONGO_ROOT_USERNAME=root
MONGO_ROOT_PASSWORD=
MONGO_RC_USERNAME=rocketchat
MONGO_RC_PASSWORD=
@@ -0,0 +1,180 @@
# File: /share/np-dms/rocketchat/docker-compose.yml
# DMS Container v1.8.6 — RocketChat + MongoDB
# ============================================================
# 🔒 SECURITY (M8):
# MongoDB รันแบบ replica set + auth
# Prerequisite (ทำครั้งเดียวก่อน deploy):
# openssl rand -base64 756 > /share/np-dms/rocketchat/mongo-keyfile
# chmod 400 /share/np-dms/rocketchat/mongo-keyfile
# chown 999:999 /share/np-dms/rocketchat/mongo-keyfile
# Env (.env):
# MONGO_ROOT_USERNAME, MONGO_ROOT_PASSWORD,
# MONGO_RC_USERNAME, MONGO_RC_PASSWORD
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
services:
mongodb:
<<: [*restart_policy, *default_logging]
image: docker.io/library/mongo:7.0.14
container_name: mongodb
# M8: เปิด --auth + keyFile สำหรับ replica set internal auth
command:
- 'mongod'
- '--oplogSize=128'
- '--replSet=rs0'
- '--bind_ip_all'
- '--auth'
- '--keyFile=/etc/mongo/keyfile'
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ROOT_USERNAME:?MONGO_ROOT_USERNAME required}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ROOT_PASSWORD:?MONGO_ROOT_PASSWORD required}
volumes:
- /share/np-dms/rocketchat/data/db:/data/db
- /share/np-dms/rocketchat/data/dump:/dump
- /share/np-dms/rocketchat/mongo-keyfile:/etc/mongo/keyfile:ro
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
security_opt:
- no-new-privileges:true
networks:
- lcbp3
expose:
- '27017'
# M2: healthcheck via mongosh (authenticated)
healthcheck:
test:
[
'CMD-SHELL',
'mongosh --quiet -u "$$MONGO_INITDB_ROOT_USERNAME" -p "$$MONGO_INITDB_ROOT_PASSWORD" --authenticationDatabase admin --eval "db.adminCommand(\"ping\").ok" | grep -q 1',
]
interval: 30s
timeout: 10s
retries: 5
start_period: 40s
# Service สำหรับ Init Replica Set + สร้าง RocketChat user (รันแล้วจบ)
mongo-init-replica:
image: docker.io/library/mongo:7.0.14
container_name: mongo-init-replica
restart: 'no'
<<: *default_logging
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
depends_on:
mongodb:
condition: service_healthy
entrypoint:
- bash
- -c
- |
set -e
echo "Waiting for mongodb..."
until mongosh --host mongodb \
-u "$$MONGO_ROOT_USERNAME" -p "$$MONGO_ROOT_PASSWORD" \
--authenticationDatabase admin --quiet \
--eval "db.adminCommand('ping')"; do
sleep 2
done
mongosh --host mongodb \
-u "$$MONGO_ROOT_USERNAME" -p "$$MONGO_ROOT_PASSWORD" \
--authenticationDatabase admin --quiet --eval '
try { rs.status() } catch (e) {
rs.initiate({ _id: "rs0", members: [{ _id: 0, host: "mongodb:27017" }] });
}'
# สร้าง user rocketchat ถ้ายังไม่มี
mongosh --host mongodb \
-u "$$MONGO_ROOT_USERNAME" -p "$$MONGO_ROOT_PASSWORD" \
--authenticationDatabase admin --quiet --eval '
const u = db.getSiblingDB("rocketchat").getUser("'"$$MONGO_RC_USERNAME"'");
if (!u) {
db.getSiblingDB("rocketchat").createUser({
user: "'"$$MONGO_RC_USERNAME"'",
pwd: "'"$$MONGO_RC_PASSWORD"'",
roles: [
{ role: "readWrite", db: "rocketchat" },
{ role: "read", db: "local" }
]
});
}'
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
networks:
- lcbp3
rocketchat:
<<: [*restart_policy, *default_logging]
image: registry.rocket.chat/rocketchat/rocket.chat:6.10.5
container_name: rocketchat
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
env_file:
- .env
environment:
- TZ=Asia/Bangkok
- PORT=3000
- ROOT_URL=https://chat.np-dms.work
# M8: ใช้ authenticated URL
- MONGO_URL=mongodb://${MONGO_RC_USERNAME}:${MONGO_RC_PASSWORD}@mongodb:27017/rocketchat?replicaSet=rs0&authSource=rocketchat
- MONGO_OPLOG_URL=mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongodb:27017/local?replicaSet=rs0&authSource=admin
- DEPLOY_METHOD=docker
- ACCOUNTS_AVATAR_STORE_PATH=/app/uploads
volumes:
- /share/np-dms/rocketchat/uploads:/app/uploads
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
depends_on:
mongo-init-replica:
condition: service_completed_successfully
networks:
- lcbp3
expose:
- '3000'
# M2: healthcheck
healthcheck:
test:
[
'CMD-SHELL',
'curl -sf http://localhost:3000/api/info | grep -q ''"success":true'' || exit 1',
]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
networks:
lcbp3:
external: true
@@ -0,0 +1,4 @@
# Per-stack .env.example — services (cache, search)
# Source: ../../.env.template
REDIS_PASSWORD=
ELASTICSEARCH_PASSWORD=
@@ -0,0 +1,125 @@
# File: /share/np-dms/services/docker-compose.yml
# DMS Container v1.8.6: Application name: services
# Services: cache (Redis), search (Elasticsearch)
# ============================================================
# 🔒 SECURITY (ADR-016, Tier-1):
# - Redis: ใช้ --requirepass บังคับ auth ฝั่ง server
# - Elasticsearch: ปิด host port mapping (ใช้ DNS ภายใน lcbp3 network เท่านั้น)
# - ใช้ .env (gitignored) ในโฟลเดอร์เดียวกัน:
# docker compose --env-file .env up -d
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
networks:
lcbp3:
external: true
services:
# ----------------------------------------------------------------
# 1. Redis (Caching + Distributed Lock + BullMQ queues)
# Service Name: cache (Backend อ้างอิง REDIS_HOST=cache)
# ----------------------------------------------------------------
cache:
<<: [*restart_policy, *default_logging]
image: redis:7-alpine
container_name: cache
deploy:
resources:
limits:
cpus: '1.0'
memory: 2G
reservations:
cpus: '0.25'
memory: 512M
environment:
TZ: 'Asia/Bangkok'
env_file:
- .env
# บังคับ auth ฝั่ง server, เปิด AOF persistence
command:
- 'redis-server'
- '--requirepass'
- '${REDIS_PASSWORD:?REDIS_PASSWORD required}'
- '--appendonly'
- 'yes'
- '--maxmemory-policy'
- 'allkeys-lru'
# bind เฉพาะ loopback host เพื่อ debug — service อื่นใช้ DNS 'cache:6379' ผ่าน lcbp3 network
ports:
- '127.0.0.1:6379:6379'
networks:
- lcbp3
volumes:
- '/share/np-dms/services/cache/data:/data'
healthcheck:
test:
[
'CMD',
'redis-cli',
'-a',
'${REDIS_PASSWORD}',
'--no-auth-warning',
'ping',
]
interval: 10s
timeout: 5s
retries: 5
# ----------------------------------------------------------------
# 2. Elasticsearch (Advanced Search)
# Service Name: search (Backend อ้างอิง ELASTICSEARCH_HOST=search)
# ----------------------------------------------------------------
search:
<<: [*restart_policy, *default_logging]
image: elasticsearch:8.11.1
container_name: search
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '0.5'
memory: 2G
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
# --- Single-node ---
discovery.type: 'single-node'
# --- Security (ADR-016) ---
# NOTE: หากเปิด xpack.security ต้องตั้ง ELASTIC_PASSWORD และอัปเดต backend client config
# ค่าเริ่มต้น keep ปิดไว้เพราะ network เข้าถึงได้เฉพาะภายใน lcbp3 (ไม่มี host port)
xpack.security.enabled: 'false'
# --- Performance ---
ES_JAVA_OPTS: '-Xms1g -Xmx1g'
ulimits:
memlock:
soft: -1
hard: -1
# ❌ ห้าม publish 9200 ไปยัง LAN (ADR-016)
# service ภายในใช้ DNS 'search:9200' ผ่าน lcbp3 network
expose:
- '9200'
networks:
- lcbp3
volumes:
- '/share/np-dms/services/search/data:/usr/share/elasticsearch/data'
healthcheck:
test:
[
'CMD-SHELL',
'curl -s http://localhost:9200/_cluster/health | grep -q ''"status":"green"\|"status":"yellow"''',
]
interval: 30s
timeout: 10s
retries: 5
@@ -0,0 +1,109 @@
# ============================================================
# ⚠️ DEPRECATED — ชื่อไฟล์มี typo (docker-compse.yml)
# ไฟล์นี้ถูกแทนที่ด้วย ./docker-compose.yml (v1.8.6)
# ไฟล์ใหม่มีการแก้ไข Tier-1 security:
# - Redis: --requirepass + bind 127.0.0.1
# - Elasticsearch: ปิด host port (internal only)
# โปรดลบไฟล์นี้หลัง verify ว่า deploy ใหม่สำเร็จ:
# docker compose -f docker-compose.yml --env-file .env up -d
# git rm specs/04-Infrastructure-OPS/04-00-docker-compose/QNAP/service/docker-compse.yml
# ============================================================
# (เนื้อหาเดิมเก็บไว้เพื่อ reference ระหว่าง migration เท่านั้น)
# File: /share/np-dms/services/docker-compose.yml (หรือไฟล์ที่คุณใช้รวม)
# DMS Container v1_7_0: เพิ่ม Application name: services
#Services 'cache' (Redis) และ 'search' (Elasticsearch)
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
networks:
lcbp3:
external: true
services:
# ----------------------------------------------------------------
# 1. Redis (สำหรับ Caching และ Distributed Lock)
# Service Name: cache (ตามที่ NPM และ Backend Plan อ้างอิง)
# ----------------------------------------------------------------
cache:
<<: [ *restart_policy, *default_logging ]
image: redis:7-alpine # ใช้ Alpine image เพื่อให้มีขนาดเล็ก
container_name: cache
stdin_open: true
tty: true
deploy:
resources:
limits:
cpus: "1.0"
memory: 2G # Redis เป็น in-memory, ให้ memory เพียงพอต่อการใช้งาน
reservations:
cpus: "0.25"
memory: 512M
environment:
TZ: "Asia/Bangkok"
ports:
- "6379:6379"
networks:
- lcbp3 # เชื่อมต่อ network ภายในเท่านั้น
volumes:
- "/share/np-dms/services/cache/data:/data" # Map volume สำหรับเก็บข้อมูล (ถ้าต้องการ persistence)
healthcheck:
test: [ "CMD", "redis-cli", "ping" ] # ตรวจสอบว่า service พร้อมใช้งาน
interval: 10s
timeout: 5s
retries: 5
# ----------------------------------------------------------------
# 2. Elasticsearch (สำหรับ Advanced Search)
# Service Name: search (ตามที่ NPM และ Backend Plan อ้างอิง)
# ----------------------------------------------------------------
search:
<<: [ *restart_policy, *default_logging ]
image: elasticsearch:8.11.1 # แนะนำให้ระบุเวอร์ชันชัดเจน (V.8)
container_name: search
stdin_open: true
tty: true
deploy:
resources:
limits:
cpus: "2.0" # Elasticsearch ใช้ CPU และ Memory ค่อนข้างหนัก
memory: 4G
reservations:
cpus: "0.5"
memory: 2G
environment:
TZ: "Asia/Bangkok"
# --- Critical Settings for Single-Node ---
discovery.type: "single-node" # สำคัญมาก: กำหนดให้รันแบบ 1 node
# --- Security (Disable for Development) ---
# ปิด xpack security เพื่อให้ NestJS เชื่อมต่อง่าย (backend -> search:9200)
# หากเป็น Production จริง ควรเปิดใช้งานและตั้งค่า token/cert ครับ
xpack.security.enabled: "false"
# --- Performance Tuning ---
# กำหนด Heap size (1GB) ให้เหมาะสมกับ memory limit (4GB)
ES_JAVA_OPTS: "-Xms1g -Xmx1g"
ports:
- "9200:9200"
networks:
- lcbp3 # เชื่อมต่อ network ภายใน (NPM จะ proxy port 9200 จากภายนอก)
volumes:
- "/share/np-dms/services/search/data:/usr/share/elasticsearch/data" # Map volume สำหรับเก็บ data/indices
healthcheck:
# รอจนกว่า cluster health จะเป็น yellow หรือ green
test:
[
"CMD-SHELL",
"curl -s http://localhost:9200/_cluster/health | grep -q
'\"status\":\"green\"\\|\\\"status\":\"yellow\"'",
]
interval: 30s
timeout: 10s
retries: 5