260322:1648 Correct Coresspondence / Doing RFA / Correct CI
CI Pipeline / build (push) Failing after 12m41s
Build and Deploy / deploy (push) Failing after 2m44s

This commit is contained in:
admin
2026-03-22 16:48:12 +07:00
parent e5deedb42e
commit 11984bfa29
683 changed files with 105251 additions and 29068 deletions
@@ -6,10 +6,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
services:
mariadb:
@@ -21,31 +21,31 @@ services:
deploy:
resources:
limits:
cpus: "2.0"
cpus: '2.0'
memory: 4G
reservations:
cpus: "0.5"
cpus: '0.5'
memory: 1G
command: >-
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
environment:
MYSQL_ROOT_PASSWORD: "Center#2025"
MYSQL_DATABASE: "lcbp3"
MYSQL_USER: "center"
MYSQL_PASSWORD: "Center#2025"
TZ: "Asia/Bangkok"
MYSQL_ROOT_PASSWORD: 'Center#2025'
MYSQL_DATABASE: 'lcbp3'
MYSQL_USER: 'center'
MYSQL_PASSWORD: 'Center#2025'
TZ: 'Asia/Bangkok'
ports:
- "3306:3306"
- '3306:3306'
networks:
- lcbp3
volumes:
- "/share/np-dms/mariadb/data:/var/lib/mysql"
- "/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro"
- "/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro"
- "/share/dms-data/mariadb/backup:/backup"
- '/share/np-dms/mariadb/data:/var/lib/mysql'
- '/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro'
- '/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro'
- '/share/dms-data/mariadb/backup:/backup'
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
test: ['CMD', 'healthcheck.sh', '--connect', '--innodb_initialized']
interval: 10s
timeout: 5s
retries: 3
@@ -60,26 +60,26 @@ services:
deploy:
resources:
limits:
cpus: "0.25"
cpus: '0.25'
memory: 256M
environment:
TZ: "Asia/Bangkok"
PMA_HOST: "mariadb"
PMA_PORT: "3306"
PMA_ABSOLUTE_URI: "https://pma.np-dms.work/"
UPLOAD_LIMIT: "1G"
MEMORY_LIMIT: "512M"
TZ: 'Asia/Bangkok'
PMA_HOST: 'mariadb'
PMA_PORT: '3306'
PMA_ABSOLUTE_URI: 'https://pma.np-dms.work/'
UPLOAD_LIMIT: '1G'
MEMORY_LIMIT: '512M'
ports:
- "89:80"
- '89:80'
networks:
- lcbp3
# expose:
# - "80"
volumes:
- "/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro"
- "/share/np-dms/pma/zzz-custom.ini:/usr/local/etc/php/conf.d/zzz-custom.ini:ro"
- "/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw"
- "/share/dms-data/logs/pma:/var/log/apache2"
- '/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro'
- '/share/np-dms/pma/zzz-custom.ini:/usr/local/etc/php/conf.d/zzz-custom.ini:ro'
- '/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw'
- '/share/dms-data/logs/pma:/var/log/apache2'
depends_on:
mariadb:
condition: service_healthy
@@ -17,37 +17,37 @@ services:
tty: true
environment:
# ---- File ownership in QNAP ----
USER_UID: "1000"
USER_GID: "1000"
USER_UID: '1000'
USER_GID: '1000'
TZ: Asia/Bangkok
# ---- Server / Reverse proxy (NPM) ----
GITEA__server__ROOT_URL: https://git.np-dms.work/
GITEA__server__DOMAIN: git.np-dms.work
GITEA__server__SSH_DOMAIN: git.np-dms.work
GITEA__server__START_SSH_SERVER: "true"
GITEA__server__SSH_PORT: "22"
GITEA__server__SSH_LISTEN_PORT: "22"
GITEA__server__LFS_START_SERVER: "true"
GITEA__server__HTTP_ADDR: "0.0.0.0"
GITEA__server__HTTP_PORT: "3000"
GITEA__server__TRUSTED_PROXIES: "127.0.0.1/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
GITEA__server__START_SSH_SERVER: 'true'
GITEA__server__SSH_PORT: '22'
GITEA__server__SSH_LISTEN_PORT: '22'
GITEA__server__LFS_START_SERVER: 'true'
GITEA__server__HTTP_ADDR: '0.0.0.0'
GITEA__server__HTTP_PORT: '3000'
GITEA__server__TRUSTED_PROXIES: '127.0.0.1/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16'
# --- การตั้งค่าฐานข้อมูล
GITEA__database__DB_TYPE: mysql
GITEA__database__HOST: mariadb:3306
GITEA__database__NAME: "gitea"
GITEA__database__USER: "gitea"
GITEA__database__PASSWD: "Center#2025"
GITEA__database__NAME: 'gitea'
GITEA__database__USER: 'gitea'
GITEA__database__PASSWD: 'Center#2025'
# --- repos
GITEA__repository__ROOT: /var/lib/gitea/git/repositories
DISABLE_HTTP_GIT: "false"
ENABLE_BASIC_AUTHENTICATION: "true"
DISABLE_HTTP_GIT: 'false'
ENABLE_BASIC_AUTHENTICATION: 'true'
# --- Enable Package Registry ---
GITEA__packages__ENABLED: "true"
GITEA__packages__REGISTRY__ENABLED: "true"
GITEA__packages__ENABLED: 'true'
GITEA__packages__REGISTRY__ENABLED: 'true'
GITEA__packages__REGISTRY__STORAGE_TYPE: local
GITEA__packages__REGISTRY__STORAGE_PATH: /data/registry
# Optional: lock install after setup (เปลี่ยนเป็น true เมื่อจบ onboarding)
GITEA__security__INSTALL_LOCK: "true"
GITEA__security__INSTALL_LOCK: 'true'
volumes:
- /share/np-dms/gitea/backup:/backup
- /share/np-dms/gitea/etc:/etc/gitea
@@ -58,12 +58,11 @@ services:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3003:3000" # HTTP (ไปหลัง NPM)
- "2222:22" # SSH สำหรับ git clone/push
- '3003:3000' # HTTP (ไปหลัง NPM)
- '2222:22' # SSH สำหรับ git clone/push
networks:
- lcbp3
- giteanet
# networks:
# gitea_net:
# driver: bridge
@@ -5,10 +5,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
services:
n8n-db:
<<: [*restart_policy, *default_logging]
@@ -19,7 +19,7 @@ services:
- POSTGRES_PASSWORD=Np721220$
- POSTGRES_DB=n8n
volumes:
- "/share/np-dms/n8n/postgres-data:/var/lib/postgresql/data"
- '/share/np-dms/n8n/postgres-data:/var/lib/postgresql/data'
networks:
lcbp3: {}
healthcheck:
@@ -32,7 +32,7 @@ services:
<<: [*restart_policy, *default_logging]
image: apache/tika:latest-full
container_name: tika
user: "root"
user: 'root'
environment:
- TESSDATA_PREFIX=/tessdata
volumes:
@@ -40,7 +40,7 @@ services:
networks:
lcbp3: {}
expose:
- "9998"
- '9998'
n8n:
<<: [*restart_policy, *default_logging]
@@ -56,33 +56,33 @@ services:
deploy:
resources:
limits:
cpus: "1.5"
cpus: '1.5'
memory: 3G
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 512M
environment:
TZ: "Asia/Bangkok"
NODE_ENV: "production"
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
# N8N_PATH: "/n8n/"
N8N_PUBLIC_URL: "https://n8n.np-dms.work/"
WEBHOOK_URL: "https://n8n.np-dms.work/"
N8N_EDITOR_BASE_URL: "https://n8n.np-dms.work/"
N8N_PROTOCOL: "https"
N8N_HOST: "n8n.np-dms.work"
N8N_PUBLIC_URL: 'https://n8n.np-dms.work/'
WEBHOOK_URL: 'https://n8n.np-dms.work/'
N8N_EDITOR_BASE_URL: 'https://n8n.np-dms.work/'
N8N_PROTOCOL: 'https'
N8N_HOST: 'n8n.np-dms.work'
N8N_PORT: 5678
N8N_PROXY_HOPS: "1"
N8N_PROXY_HOPS: '1'
N8N_DIAGNOSTICS_ENABLED: 'false'
N8N_SECURE_COOKIE: 'true'
N8N_ENCRYPTION_KEY: "9AAIB7Da9DW1qAhJE5/Bz4SnbQjeAngI"
N8N_ENCRYPTION_KEY: '9AAIB7Da9DW1qAhJE5/Bz4SnbQjeAngI'
# File access control for "Read/Write Files from Disk" nodes
# Ref: https://github.com/n8n-io/n8n/blob/master/packages/@n8n/config/src/configs/security.config.ts
# Default is "~/.n8n-files". Separate multiple dirs with semicolon (;)
N8N_RESTRICT_FILE_ACCESS_TO: "/home/node/.n8n-files"
N8N_BLOCK_FILE_ACCESS_TO_N8N_FILES: "false"
GENERIC_TIMEZONE: "Asia/Bangkok"
NODE_FUNCTION_ALLOW_BUILTIN: "*"
NODES_EXCLUDE: "[]"
N8N_RESTRICT_FILE_ACCESS_TO: '/home/node/.n8n-files'
N8N_BLOCK_FILE_ACCESS_TO_N8N_FILES: 'false'
GENERIC_TIMEZONE: 'Asia/Bangkok'
NODE_FUNCTION_ALLOW_BUILTIN: '*'
NODES_EXCLUDE: '[]'
# DB Setup
DB_TYPE: postgresdb
DB_POSTGRESDB_DATABASE: n8n
@@ -96,22 +96,22 @@ services:
# EXECUTIONS_DATA_PRUNE_TIMEOUT: 60
ports:
- "5678:5678"
- '5678:5678'
networks:
lcbp3: {}
volumes:
- "/share/np-dms/n8n:/home/node/.n8n"
- "/share/np-dms/n8n/cache:/home/node/.cache"
- "/share/np-dms/n8n/scripts:/scripts"
- "/share/np-dms/n8n/data:/data"
- "/var/run/docker.sock:/var/run/docker.sock"
- '/share/np-dms/n8n:/home/node/.n8n'
- '/share/np-dms/n8n/cache:/home/node/.cache'
- '/share/np-dms/n8n/scripts:/scripts'
- '/share/np-dms/n8n/data:/data'
- '/var/run/docker.sock:/var/run/docker.sock'
# read-only: อ่านไฟล์ PDF ต้นฉบับเท่านั้น
- "/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro" # Add alias for np-dms-as to match the node setting
- '/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro' # Add alias for np-dms-as to match the node setting
# read-write: เขียน Log และ CSV ทั้งหมด
- "/share/np-dms/n8n/migration_logs:/home/node/.n8n-files/migration_logs:rw"
- '/share/np-dms/n8n/migration_logs:/home/node/.n8n-files/migration_logs:rw'
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:5678/healthz || exit 1"]
test: ['CMD-SHELL', 'wget -qO- http://127.0.0.1:5678/healthz || exit 1']
interval: 30s
timeout: 10s
start_period: 60s
@@ -120,7 +120,6 @@ services:
networks:
lcbp3:
external: true
# สำหรับ n8n volumes
# chown -R 1000:1000 /share/np-dms/n8n
# chmod -R 755 /share/np-dms/n8n3
@@ -6,10 +6,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
services:
npm:
<<: [*restart_policy, *default_logging]
@@ -20,45 +20,44 @@ services:
deploy:
resources:
limits:
cpus: "1.0" # 50% CPU
cpus: '1.0' # 50% CPU
memory: 512M
ports:
- "80:80" # HTTP
- "443:443" # HTTPS
- "81:81" # NPM Admin UI
- '80:80' # HTTP
- '443:443' # HTTPS
- '81:81' # NPM Admin UI
environment:
TZ: "Asia/Bangkok"
DB_MYSQL_HOST: "mariadb"
TZ: 'Asia/Bangkok'
DB_MYSQL_HOST: 'mariadb'
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
DB_MYSQL_USER: 'npm'
DB_MYSQL_PASSWORD: 'npm'
DB_MYSQL_NAME: 'npm'
# Uncomment this if IPv6 is not enabled on your host
DISABLE_IPV6: "true"
DISABLE_IPV6: 'true'
networks:
- lcbp3
- giteanet
volumes:
- "/share/np-dms/npm/data:/data"
- "/share/dms-data/logs/npm:/data/logs" # <-- เพิ่ม logging volume
- "/share/np-dms/npm/letsencrypt:/etc/letsencrypt"
- "/share/np-dms/npm/custom:/data/nginx/custom" # <-- สำคัญสำหรับ http_top.conf
- '/share/np-dms/npm/data:/data'
- '/share/dms-data/logs/npm:/data/logs' # <-- เพิ่ม logging volume
- '/share/np-dms/npm/letsencrypt:/etc/letsencrypt'
- '/share/np-dms/npm/custom:/data/nginx/custom' # <-- สำคัญสำหรับ http_top.conf
# - "/share/Container/lcbp3/npm/landing:/data/landing:ro"
landing:
image: nginx:1.27-alpine
container_name: landing
restart: unless-stopped
volumes:
- "/share/np-dms/npm/landing:/usr/share/nginx/html:ro"
networks:
- lcbp3
image: nginx:1.27-alpine
container_name: landing
restart: unless-stopped
volumes:
- '/share/np-dms/npm/landing:/usr/share/nginx/html:ro'
networks:
- lcbp3
networks:
lcbp3:
external: true
giteanet:
external: true
name: gitnet
# docker exec -it npm id
# chown -R 0:0 /share/Container/npm
# setfacl -R -m u:0:rwx /share/Container/npm
@@ -8,10 +8,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -30,27 +30,27 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 1G
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--web.enable-lifecycle'
ports:
- "9090:9090"
- '9090:9090'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/prometheus/config:/etc/prometheus:ro"
- "/volume1/np-dms/monitoring/prometheus/data:/prometheus"
- '/volume1/np-dms/monitoring/prometheus/config:/etc/prometheus:ro'
- '/volume1/np-dms/monitoring/prometheus/data:/prometheus'
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9090/-/healthy']
interval: 30s
timeout: 10s
retries: 3
@@ -67,27 +67,27 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 512M
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 128M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
GF_SECURITY_ADMIN_USER: admin
GF_SECURITY_ADMIN_PASSWORD: "Center#2025"
GF_SERVER_ROOT_URL: "https://grafana.np-dms.work"
GF_SECURITY_ADMIN_PASSWORD: 'Center#2025'
GF_SERVER_ROOT_URL: 'https://grafana.np-dms.work'
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-piechart-panel
ports:
- "3000:3000"
- '3000:3000'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/grafana/data:/var/lib/grafana"
- '/volume1/np-dms/monitoring/grafana/data:/var/lib/grafana'
depends_on:
- prometheus
healthcheck:
test: ["CMD-SHELL", "wget --spider -q http://localhost:3000/api/health || exit 1"]
test: ['CMD-SHELL', 'wget --spider -q http://localhost:3000/api/health || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -102,18 +102,18 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
ports:
- "3001:3001"
- '3001:3001'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/uptime-kuma/data:/app/data"
- '/volume1/np-dms/monitoring/uptime-kuma/data:/app/data'
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:3001/api/entry-page || exit 1"]
test: ['CMD-SHELL', 'curl -f http://localhost:3001/api/entry-page || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -128,16 +128,16 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 128M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
ports:
- "9100:9100"
- '9100:9100'
networks:
- lcbp3
volumes:
@@ -145,7 +145,7 @@ services:
- /sys:/host/sys:ro
- /:/rootfs:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9100/metrics"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9100/metrics']
interval: 30s
timeout: 10s
retries: 3
@@ -160,12 +160,12 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
ports:
- "8088:8088"
- '8088:8088'
networks:
- lcbp3
volumes:
@@ -174,7 +174,7 @@ services:
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/healthz"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:8080/healthz']
interval: 30s
timeout: 10s
retries: 3
@@ -189,19 +189,19 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 512M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command: -config.file=/etc/loki/local-config.yaml
ports:
- "3100:3100"
- '3100:3100'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/loki/data:/loki"
- '/volume1/np-dms/monitoring/loki/data:/loki'
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:3100/ready"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:3100/ready']
interval: 30s
timeout: 10s
retries: 3
@@ -213,20 +213,20 @@ services:
<<: [*restart_policy, *default_logging]
image: grafana/promtail:2.9.0
container_name: promtail
user: "0:0"
user: '0:0'
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command: -config.file=/etc/promtail/promtail-config.yml
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/promtail/config:/etc/promtail:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
- '/volume1/np-dms/monitoring/promtail/config:/etc/promtail:ro'
- '/var/run/docker.sock:/var/run/docker.sock:ro'
- '/var/lib/docker/containers:/var/lib/docker/containers:ro'
depends_on:
- loki
- loki
@@ -1,4 +1,5 @@
# 04.1 Infrastructure Setup & Docker Compose
**Project:** LCBP3-DMS
**Version:** 1.8.0
**Status:** Active
@@ -396,9 +397,7 @@ Backend validates environment variables at startup:
import * as Joi from 'joi';
export const envValidationSchema = Joi.object({
NODE_ENV: Joi.string()
.valid('development', 'staging', 'production')
.required(),
NODE_ENV: Joi.string().valid('development', 'staging', 'production').required(),
DB_HOST: Joi.string().required(),
DB_PORT: Joi.number().default(3306),
DB_USER: Joi.string().required(),
@@ -480,7 +479,6 @@ docker exec lcbp3-backend env | grep NODE_ENV
**Last Review:** 2025-12-01
**Next Review:** 2026-03-01
---
# Infrastructure Setup
@@ -502,6 +500,7 @@ docker exec lcbp3-backend env | grep NODE_ENV
## 1. Redis Configuration (Standalone + Persistence)
### 1.1 Docker Compose Setup
```yaml
# docker-compose-redis.yml
version: '3.8'
@@ -530,10 +529,10 @@ networks:
external: true
```
## 2. Database Configuration
### 2.1 MariaDB Optimization for Numbering
```sql
-- /etc/mysql/mariadb.conf.d/50-numbering.cnf
@@ -568,6 +567,7 @@ long_query_time = 1
```
### 2.2 Monitoring Locks
```sql
-- Check for lock contention
SELECT
@@ -595,6 +595,7 @@ KILL <thread_id>;
### 3.1 Backend Service Deployment
#### Docker Compose
```yaml
# docker-compose-backend.yml
version: '3.8'
@@ -611,7 +612,7 @@ services:
- NUMBERING_LOCK_TIMEOUT=5000
- NUMBERING_RESERVATION_TTL=300
ports:
- "3001:3000"
- '3001:3000'
depends_on:
- mariadb
- cache
@@ -619,7 +620,7 @@ services:
- lcbp3
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
test: ['CMD', 'curl', '-f', 'http://localhost:3000/health']
interval: 30s
timeout: 10s
retries: 3
@@ -633,7 +634,7 @@ services:
- REDIS_HOST=cache
- REDIS_PORT=6379
ports:
- "3002:3000"
- '3002:3000'
depends_on:
- mariadb
- cache
@@ -647,6 +648,7 @@ networks:
```
#### Health Check Endpoint
```typescript
// health/numbering.health.ts
import { Injectable } from '@nestjs/common';
@@ -658,17 +660,13 @@ import { DataSource } from 'typeorm';
export class NumberingHealthIndicator extends HealthIndicator {
constructor(
private redis: Redis,
private dataSource: DataSource,
private dataSource: DataSource
) {
super();
}
async isHealthy(key: string): Promise<HealthIndicatorResult> {
const checks = await Promise.all([
this.checkRedis(),
this.checkDatabase(),
this.checkSequenceIntegrity(),
]);
const checks = await Promise.all([this.checkRedis(), this.checkDatabase(), this.checkSequenceIntegrity()]);
const isHealthy = checks.every((check) => check.status === 'up');
@@ -737,7 +735,7 @@ alerting:
- alertmanager:9093
rule_files:
- "/etc/prometheus/alerts/numbering.yml"
- '/etc/prometheus/alerts/numbering.yml'
scrape_configs:
- job_name: 'backend'
@@ -815,6 +813,7 @@ receivers:
### 4.3 Grafana Dashboards
#### Import Dashboard JSON
```bash
# Download dashboard template
curl -o numbering-dashboard.json \
@@ -827,6 +826,7 @@ curl -X POST http://admin:admin@localhost:3000/api/dashboards/db \
```
#### Key Panels to Monitor
1. **Numbers Generated per Minute** - Rate of number creation
2. **Sequence Utilization** - Current usage vs max (alert >90%)
3. **Lock Wait Time (p95)** - Performance indicator
@@ -841,6 +841,7 @@ curl -X POST http://admin:admin@localhost:3000/api/dashboards/db \
### 5.1 Database Backup Strategy
#### Automated Backup Script
```bash
#!/bin/bash
# scripts/backup-numbering-db.sh
@@ -875,6 +876,7 @@ echo "✅ Backup complete: numbering_$DATE.sql.gz"
```
#### Cron Schedule
```cron
# Run backup daily at 2 AM
0 2 * * * /opt/lcbp3/scripts/backup-numbering-db.sh >> /var/log/numbering-backup.log 2>&1
@@ -886,6 +888,7 @@ echo "✅ Backup complete: numbering_$DATE.sql.gz"
### 5.2 Redis Backup
#### Enable RDB Persistence
```conf
# redis.conf
save 900 1 # Save if 1 key changed after 900 seconds
@@ -902,6 +905,7 @@ appendfsync everysec
```
#### Backup Script
```bash
#!/bin/bash
# scripts/backup-redis.sh
@@ -941,6 +945,7 @@ echo "✅ Redis backup complete: redis_${DATE}.tar.gz"
### 5.3 Recovery Procedures
#### Scenario 1: Restore from Database Backup
```bash
#!/bin/bash
# scripts/restore-numbering-db.sh
@@ -976,6 +981,7 @@ echo "🔄 Please verify sequence integrity"
```
#### Scenario 2: Redis Failure
```bash
# Check Redis status
docker exec cache redis-cli ping
@@ -997,6 +1003,7 @@ docker exec cache redis-cli ping
### 6.1 Sequence Adjustment
#### Increase Max Value
```sql
-- Check current utilization
SELECT
@@ -1026,6 +1033,7 @@ INSERT INTO document_numbering_audit_logs (
```
#### Reset Yearly Sequence
```sql
-- For document types with yearly reset
-- Run on January 1st
@@ -1091,6 +1099,7 @@ LINES TERMINATED BY '\n';
### 6.3 Redis Maintenance
#### Flush Expired Reservations
```bash
#!/bin/bash
# scripts/cleanup-expired-reservations.sh
@@ -1122,6 +1131,7 @@ echo "✅ Cleaned up $COUNT expired reservations"
### 7.1 Total System Failure
#### Recovery Steps
```bash
#!/bin/bash
# scripts/disaster-recovery.sh
@@ -1184,7 +1194,9 @@ echo "⚠️ Please verify system functionality manually"
**Alert**: `SequenceWarning` or `SequenceCritical`
**Steps**:
1. Check current utilization
```sql
SELECT document_type, current_value, max_value,
ROUND((current_value * 100.0 / max_value), 2) as pct
@@ -1199,6 +1211,7 @@ echo "⚠️ Please verify system functionality manually"
- Days until exhaustion?
3. Take action
```sql
-- Option A: Increase max_value
UPDATE document_numbering_configs
@@ -1219,13 +1232,16 @@ echo "⚠️ Please verify system functionality manually"
**Alert**: `HighLockWaitTime`
**Steps**:
1. Check Redis cluster health
```bash
docker exec lcbp3-redis-1 redis-cli cluster info
docker exec lcbp3-redis-1 redis-cli cluster nodes
```
2. Check database locks
```sql
SELECT * FROM information_schema.innodb_lock_waits;
SELECT * FROM information_schema.innodb_trx
@@ -1251,18 +1267,22 @@ echo "⚠️ Please verify system functionality manually"
**Alert**: `RedisUnavailable`
**Steps**:
1. Verify Redis is down
```bash
docker exec cache redis-cli ping || echo "Redis DOWN"
```
2. Check system falls back to DB-only mode
```bash
curl http://localhost:3001/health/numbering
# Should show: fallback_mode: true
```
3. Restart Redis container
```bash
docker restart cache
sleep 10
@@ -1270,11 +1290,13 @@ echo "⚠️ Please verify system functionality manually"
```
4. If restart fails, restore from backup
```bash
./scripts/restore-redis.sh /backups/redis/latest.tar.gz
```
5. Verify numbering system back to normal
```bash
curl http://localhost:3001/health/numbering
# Should show: fallback_mode: false
@@ -1292,6 +1314,7 @@ echo "⚠️ Please verify system functionality manually"
### 9.1 Slow Number Generation
**Diagnosis**:
```sql
-- Check slow queries
SELECT * FROM mysql.slow_log
@@ -1306,6 +1329,7 @@ FOR UPDATE;
```
**Optimizations**:
```sql
-- Add missing indexes
CREATE INDEX idx_sequence_lookup
@@ -1373,8 +1397,8 @@ networks:
- subnet: 172.20.0.0/16
driver_opts:
com.docker.network.bridge.name: lcbp3-br
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.enable_ip_masquerade: 'true'
```
---
@@ -1383,7 +1407,7 @@ networks:
### 11.1 Audit Log Retention
```sql
````sql
-- Export audit logs for compliance
SELECT *
FROM document_numbering
@@ -1419,7 +1443,7 @@ chmod 755 /share/np-dms/pma/tmp
# CREATE USER 'exporter'@'%' IDENTIFIED BY '<PASSWORD>' WITH MAX_USER_CONNECTIONS 3;
# GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'exporter'@'%';
# FLUSH PRIVILEGES;
```
````
### Add Databases for NPM & Gitea
@@ -1444,10 +1468,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -1463,27 +1487,27 @@ services:
deploy:
resources:
limits:
cpus: "2.0"
cpus: '2.0'
memory: 4G
reservations:
cpus: "0.5"
cpus: '0.5'
memory: 1G
environment:
MYSQL_ROOT_PASSWORD: "<ROOT_PASSWORD>"
MYSQL_DATABASE: "lcbp3"
MYSQL_USER: "center"
MYSQL_PASSWORD: "<PASSWORD>"
TZ: "Asia/Bangkok"
MYSQL_ROOT_PASSWORD: '<ROOT_PASSWORD>'
MYSQL_DATABASE: 'lcbp3'
MYSQL_USER: 'center'
MYSQL_PASSWORD: '<PASSWORD>'
TZ: 'Asia/Bangkok'
ports:
- "3306:3306"
- '3306:3306'
networks:
- lcbp3
volumes:
- "/share/np-dms/mariadb/data:/var/lib/mysql"
- "/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro"
- "/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro"
- '/share/np-dms/mariadb/data:/var/lib/mysql'
- '/share/np-dms/mariadb/my.cnf:/etc/mysql/conf.d/my.cnf:ro'
- '/share/np-dms/mariadb/init:/docker-entrypoint-initdb.d:ro'
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
test: ['CMD', 'healthcheck.sh', '--connect', '--innodb_initialized']
interval: 10s
timeout: 5s
retries: 3
@@ -1496,22 +1520,22 @@ services:
deploy:
resources:
limits:
cpus: "0.25"
cpus: '0.25'
memory: 256M
environment:
TZ: "Asia/Bangkok"
PMA_HOST: "mariadb"
PMA_PORT: "3306"
PMA_ABSOLUTE_URI: "https://pma.np-dms.work/"
UPLOAD_LIMIT: "1G"
MEMORY_LIMIT: "512M"
TZ: 'Asia/Bangkok'
PMA_HOST: 'mariadb'
PMA_PORT: '3306'
PMA_ABSOLUTE_URI: 'https://pma.np-dms.work/'
UPLOAD_LIMIT: '1G'
MEMORY_LIMIT: '512M'
ports:
- "89:80"
- '89:80'
networks:
- lcbp3
volumes:
- "/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro"
- "/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw"
- '/share/np-dms/pma/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php:ro'
- '/share/np-dms/pma/tmp:/var/lib/phpmyadmin/tmp:rw'
depends_on:
mariadb:
condition: service_healthy
@@ -1546,10 +1570,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -1563,21 +1587,21 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 2G
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 512M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
ports:
- "6379:6379"
- '6379:6379'
networks:
- lcbp3
volumes:
- "/share/np-dms/services/cache/data:/data"
- '/share/np-dms/services/cache/data:/data'
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: ['CMD', 'redis-cli', 'ping']
interval: 10s
timeout: 5s
retries: 5
@@ -1589,25 +1613,29 @@ services:
deploy:
resources:
limits:
cpus: "2.0"
cpus: '2.0'
memory: 4G
reservations:
cpus: "0.5"
cpus: '0.5'
memory: 2G
environment:
TZ: "Asia/Bangkok"
discovery.type: "single-node"
xpack.security.enabled: "false"
TZ: 'Asia/Bangkok'
discovery.type: 'single-node'
xpack.security.enabled: 'false'
# Heap locked at 1GB per ADR-005
ES_JAVA_OPTS: "-Xms1g -Xmx1g"
ES_JAVA_OPTS: '-Xms1g -Xmx1g'
ports:
- "9200:9200"
- '9200:9200'
networks:
- lcbp3
volumes:
- "/share/np-dms/services/search/data:/usr/share/elasticsearch/data"
- '/share/np-dms/services/search/data:/usr/share/elasticsearch/data'
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health | grep -q '\"status\":\"green\"\\|\"status\":\"yellow\"'"]
test:
[
'CMD-SHELL',
"curl -s http://localhost:9200/_cluster/health | grep -q '\"status\":\"green\"\\|\"status\":\"yellow\"'",
]
interval: 30s
timeout: 10s
retries: 5
@@ -1622,15 +1650,15 @@ services:
### NPM Proxy Host Config Reference
| Domain | Forward Host | Port | Cache | Block Exploits | WebSocket | SSL |
| :-------------------- | :----------- | :--- | :---- | :------------- | :-------- | :--- |
| `backend.np-dms.work` | `backend` | 3000 | ❌ | ✅ | ❌ | ✅ |
| `lcbp3.np-dms.work` | `frontend` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `git.np-dms.work` | `gitea` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `n8n.np-dms.work` | `n8n` | 5678 | ✅ | ✅ | ✅ | ✅ |
| `chat.np-dms.work` | `rocketchat` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `npm.np-dms.work` | `npm` | 81 | ❌ | ✅ | ✅ | ✅ |
| `pma.np-dms.work` | `pma` | 80 | ✅ | ✅ | ❌ | ✅ |
| Domain | Forward Host | Port | Cache | Block Exploits | WebSocket | SSL |
| :-------------------- | :----------- | :--- | :---- | :------------- | :-------- | :-- |
| `backend.np-dms.work` | `backend` | 3000 | ❌ | ✅ | ❌ | ✅ |
| `lcbp3.np-dms.work` | `frontend` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `git.np-dms.work` | `gitea` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `n8n.np-dms.work` | `n8n` | 5678 | ✅ | ✅ | ✅ | ✅ |
| `chat.np-dms.work` | `rocketchat` | 3000 | ✅ | ✅ | ✅ | ✅ |
| `npm.np-dms.work` | `npm` | 81 | ❌ | ✅ | ✅ | ✅ |
| `pma.np-dms.work` | `pma` | 80 | ✅ | ✅ | ❌ | ✅ |
```yaml
# /share/np-dms/npm/docker-compose.yml
@@ -1639,10 +1667,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -1659,34 +1687,34 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 512M
ports:
- "80:80"
- "443:443"
- "81:81"
- '80:80'
- '443:443'
- '81:81'
environment:
TZ: "Asia/Bangkok"
DB_MYSQL_HOST: "mariadb"
TZ: 'Asia/Bangkok'
DB_MYSQL_HOST: 'mariadb'
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
DISABLE_IPV6: "true"
DB_MYSQL_USER: 'npm'
DB_MYSQL_PASSWORD: 'npm'
DB_MYSQL_NAME: 'npm'
DISABLE_IPV6: 'true'
networks:
- lcbp3
- giteanet
volumes:
- "/share/np-dms/npm/data:/data"
- "/share/np-dms/npm/letsencrypt:/etc/letsencrypt"
- "/share/np-dms/npm/custom:/data/nginx/custom"
- '/share/np-dms/npm/data:/data'
- '/share/np-dms/npm/letsencrypt:/etc/letsencrypt'
- '/share/np-dms/npm/custom:/data/nginx/custom'
landing:
image: nginx:1.27-alpine
container_name: landing
restart: unless-stopped
volumes:
- "/share/np-dms/npm/landing:/usr/share/nginx/html:ro"
- '/share/np-dms/npm/landing:/usr/share/nginx/html:ro'
networks:
- lcbp3
```
@@ -1722,25 +1750,25 @@ services:
container_name: gitea
restart: always
environment:
USER_UID: "1000"
USER_GID: "1000"
USER_UID: '1000'
USER_GID: '1000'
TZ: Asia/Bangkok
GITEA__server__ROOT_URL: https://git.np-dms.work/
GITEA__server__DOMAIN: git.np-dms.work
GITEA__server__SSH_DOMAIN: git.np-dms.work
GITEA__server__START_SSH_SERVER: "true"
GITEA__server__SSH_PORT: "22"
GITEA__server__SSH_LISTEN_PORT: "22"
GITEA__server__LFS_START_SERVER: "true"
GITEA__server__HTTP_PORT: "3000"
GITEA__server__TRUSTED_PROXIES: "127.0.0.1/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
GITEA__server__START_SSH_SERVER: 'true'
GITEA__server__SSH_PORT: '22'
GITEA__server__SSH_LISTEN_PORT: '22'
GITEA__server__LFS_START_SERVER: 'true'
GITEA__server__HTTP_PORT: '3000'
GITEA__server__TRUSTED_PROXIES: '127.0.0.1/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16'
GITEA__database__DB_TYPE: mysql
GITEA__database__HOST: mariadb:3306
GITEA__database__NAME: "gitea"
GITEA__database__USER: "gitea"
GITEA__database__PASSWD: "<PASSWORD>"
GITEA__packages__ENABLED: "true"
GITEA__security__INSTALL_LOCK: "true"
GITEA__database__NAME: 'gitea'
GITEA__database__USER: 'gitea'
GITEA__database__PASSWD: '<PASSWORD>'
GITEA__packages__ENABLED: 'true'
GITEA__security__INSTALL_LOCK: 'true'
volumes:
- /share/np-dms/gitea/backup:/backup
- /share/np-dms/gitea/etc:/etc/gitea
@@ -1750,8 +1778,8 @@ services:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3003:3000"
- "2222:22"
- '3003:3000'
- '2222:22'
networks:
- lcbp3
- giteanet
@@ -1775,10 +1803,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -1792,39 +1820,39 @@ services:
deploy:
resources:
limits:
cpus: "1.5"
cpus: '1.5'
memory: 2G
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 512M
environment:
TZ: "Asia/Bangkok"
NODE_ENV: "production"
N8N_PUBLIC_URL: "https://n8n.np-dms.work/"
WEBHOOK_URL: "https://n8n.np-dms.work/"
N8N_HOST: "n8n.np-dms.work"
TZ: 'Asia/Bangkok'
NODE_ENV: 'production'
N8N_PUBLIC_URL: 'https://n8n.np-dms.work/'
WEBHOOK_URL: 'https://n8n.np-dms.work/'
N8N_HOST: 'n8n.np-dms.work'
N8N_PORT: 5678
N8N_PROTOCOL: "https"
N8N_PROXY_HOPS: "1"
N8N_PROTOCOL: 'https'
N8N_PROXY_HOPS: '1'
N8N_DIAGNOSTICS_ENABLED: 'false'
N8N_SECURE_COOKIE: 'true'
# N8N_ENCRYPTION_KEY should be kept in .env (gitignored)
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS: 'true'
DB_TYPE: mysqldb
DB_MYSQLDB_DATABASE: "n8n"
DB_MYSQLDB_USER: "center"
DB_MYSQLDB_HOST: "mariadb"
DB_MYSQLDB_DATABASE: 'n8n'
DB_MYSQLDB_USER: 'center'
DB_MYSQLDB_HOST: 'mariadb'
DB_MYSQLDB_PORT: 3306
ports:
- "5678:5678"
- '5678:5678'
networks:
lcbp3: {}
volumes:
- "/share/np-dms/n8n:/home/node/.n8n"
- "/share/np-dms/n8n/cache:/home/node/.cache"
- "/share/np-dms/n8n/scripts:/scripts"
- '/share/np-dms/n8n:/home/node/.n8n'
- '/share/np-dms/n8n/cache:/home/node/.cache'
- '/share/np-dms/n8n/scripts:/scripts'
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:5678/"]
test: ['CMD', 'wget', '-qO-', 'http://127.0.0.1:5678/']
interval: 15s
timeout: 5s
retries: 30
@@ -1839,4 +1867,3 @@ services:
This stack contains `backend` (NestJS) and `frontend` (Next.js).
Refer to [04-04-deployment-guide.md](./04-04-deployment-guide.md) for full deployment steps and CI/CD pipeline details.
@@ -1,4 +1,5 @@
# 04.2 Backup & Disaster Recovery
**Project:** LCBP3-DMS
**Version:** 1.8.0
**Status:** Active
@@ -391,7 +392,6 @@ WHERE created_at < DATE_SUB(NOW(), INTERVAL 1 YEAR);
**Last Review:** 2025-12-01
**Next Review:** 2026-03-01
---
# Backup Strategy สำหรับ LCBP3-DMS
@@ -642,7 +642,6 @@ restic -r /volume1/backup/restic-repo snapshots --latest 5
> 📝 **หมายเหตุ**: เอกสารนี้อ้างอิงจาก Architecture Document **v1.8.0**
---
# Disaster Recovery Plan สำหรับ LCBP3-DMS
+114 -122
View File
@@ -1,4 +1,5 @@
# 04.3 Monitoring & Alerting
**Project:** LCBP3-DMS
**Version:** 1.8.0
**Status:** Active
@@ -78,12 +79,7 @@ This document describes monitoring setup, health checks, and alerting rules for
```typescript
// File: backend/src/health/health.controller.ts
import { Controller, Get } from '@nestjs/common';
import {
HealthCheck,
HealthCheckService,
TypeOrmHealthIndicator,
DiskHealthIndicator,
} from '@nestjs/terminus';
import { HealthCheck, HealthCheckService, TypeOrmHealthIndicator, DiskHealthIndicator } from '@nestjs/terminus';
@Controller('health')
export class HealthController {
@@ -208,12 +204,7 @@ done
```typescript
// File: backend/src/common/interceptors/performance.interceptor.ts
import {
Injectable,
NestInterceptor,
ExecutionContext,
CallHandler,
} from '@nestjs/common';
import { Injectable, NestInterceptor, ExecutionContext, CallHandler } from '@nestjs/common';
import { Observable } from 'rxjs';
import { tap } from 'rxjs/operators';
import { logger } from 'src/config/logger.config';
@@ -460,7 +451,6 @@ ab -n 1000 -c 10 \
**Last Review:** 2025-12-01
**Next Review:** 2026-03-01
---
# การติดตั้ง Monitoring Stack บน ASUSTOR
@@ -472,15 +462,15 @@ ab -n 1000 -c 10 \
Stack สำหรับ Monitoring ประกอบด้วย:
| Service | Port | Purpose | Host |
| :---------------- | :--------------------------- | :-------------------------------- | :------ |
| Service | Port | Purpose | Host |
| :---------------- | :--------------------------- | :--------------------------------- | :------ |
| **Prometheus** | 9090 | เก็บ Metrics และ Time-series data | ASUSTOR |
| **Grafana** | 3000 | Dashboard สำหรับแสดงผล Metrics | ASUSTOR |
| **Grafana** | 3000 | Dashboard สำหรับแสดงผล Metrics | ASUSTOR |
| **Node Exporter** | 9100 | เก็บ Metrics ของ Host system | Both |
| **cAdvisor** | 8080 (ASUSTOR) / 8088 (QNAP) | เก็บ Metrics ของ Docker containers | Both |
| **Uptime Kuma** | 3001 | Service Availability Monitoring | ASUSTOR |
| **Loki** | 3100 | Log aggregation | ASUSTOR |
| **Promtail** | - | Log shipper (Sender) | ASUSTOR |
| **Uptime Kuma** | 3001 | Service Availability Monitoring | ASUSTOR |
| **Loki** | 3100 | Log aggregation | ASUSTOR |
| **Promtail** | - | Log shipper (Sender) | ASUSTOR |
---
@@ -613,10 +603,10 @@ x-restart: &restart_policy
x-logging: &default_logging
logging:
driver: "json-file"
driver: 'json-file'
options:
max-size: "10m"
max-file: "5"
max-size: '10m'
max-file: '5'
networks:
lcbp3:
@@ -635,27 +625,27 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 1G
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--web.enable-lifecycle'
ports:
- "9090:9090"
- '9090:9090'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/prometheus/config:/etc/prometheus:ro"
- "/volume1/np-dms/monitoring/prometheus/data:/prometheus"
- '/volume1/np-dms/monitoring/prometheus/config:/etc/prometheus:ro'
- '/volume1/np-dms/monitoring/prometheus/data:/prometheus'
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9090/-/healthy']
interval: 30s
timeout: 10s
retries: 3
@@ -672,27 +662,27 @@ services:
deploy:
resources:
limits:
cpus: "1.0"
cpus: '1.0'
memory: 512M
reservations:
cpus: "0.25"
cpus: '0.25'
memory: 128M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
GF_SECURITY_ADMIN_USER: admin
GF_SECURITY_ADMIN_PASSWORD: "Center#2025"
GF_SERVER_ROOT_URL: "https://grafana.np-dms.work"
GF_SECURITY_ADMIN_PASSWORD: 'Center#2025'
GF_SERVER_ROOT_URL: 'https://grafana.np-dms.work'
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-piechart-panel
ports:
- "3000:3000"
- '3000:3000'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/grafana/data:/var/lib/grafana"
- '/volume1/np-dms/monitoring/grafana/data:/var/lib/grafana'
depends_on:
- prometheus
healthcheck:
test: ["CMD-SHELL", "wget --spider -q http://localhost:3000/api/health || exit 1"]
test: ['CMD-SHELL', 'wget --spider -q http://localhost:3000/api/health || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -707,18 +697,18 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
ports:
- "3001:3001"
- '3001:3001'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/uptime-kuma/data:/app/data"
- '/volume1/np-dms/monitoring/uptime-kuma/data:/app/data'
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:3001/api/entry-page || exit 1"]
test: ['CMD-SHELL', 'curl -f http://localhost:3001/api/entry-page || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -733,16 +723,16 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 128M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
ports:
- "9100:9100"
- '9100:9100'
networks:
- lcbp3
volumes:
@@ -750,7 +740,7 @@ services:
- /sys:/host/sys:ro
- /:/rootfs:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9100/metrics"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9100/metrics']
interval: 30s
timeout: 10s
retries: 3
@@ -768,12 +758,12 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
ports:
- "8088:8088"
- '8088:8088'
networks:
- lcbp3
volumes:
@@ -783,7 +773,7 @@ services:
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/healthz"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:8080/healthz']
interval: 30s
timeout: 10s
retries: 3
@@ -798,19 +788,19 @@ services:
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 512M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command: -config.file=/etc/loki/local-config.yaml
ports:
- "3100:3100"
- '3100:3100'
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/loki/data:/loki"
- '/volume1/np-dms/monitoring/loki/data:/loki'
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:3100/ready"]
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:3100/ready']
interval: 30s
timeout: 10s
retries: 3
@@ -822,21 +812,21 @@ services:
<<: [*restart_policy, *default_logging]
image: grafana/promtail:2.9.0
container_name: promtail
user: "0:0"
user: '0:0'
deploy:
resources:
limits:
cpus: "0.5"
cpus: '0.5'
memory: 256M
environment:
TZ: "Asia/Bangkok"
TZ: 'Asia/Bangkok'
command: -config.file=/etc/promtail/promtail-config.yml
networks:
- lcbp3
volumes:
- "/volume1/np-dms/monitoring/promtail/config:/etc/promtail:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
- '/volume1/np-dms/monitoring/promtail/config:/etc/promtail:ro'
- '/var/run/docker.sock:/var/run/docker.sock:ro'
- '/var/lib/docker/containers:/var/lib/docker/containers:ro'
depends_on:
- loki
```
@@ -867,7 +857,7 @@ services:
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
ports:
- "9100:9100"
- '9100:9100'
networks:
- lcbp3
volumes:
@@ -881,7 +871,7 @@ services:
restart: unless-stopped
privileged: true
ports:
- "8088:8080"
- '8088:8080'
networks:
- lcbp3
volumes:
@@ -899,11 +889,11 @@ services:
command:
- '--config.my-cnf=/etc/mysql/my.cnf'
ports:
- "9104:9104"
- '9104:9104'
networks:
- lcbp3
volumes:
- "/share/np-dms/monitoring/mysqld-exporter/.my.cnf:/etc/mysql/my.cnf:ro"
- '/share/np-dms/monitoring/mysqld-exporter/.my.cnf:/etc/mysql/my.cnf:ro'
```
---
@@ -1012,7 +1002,6 @@ scrape_configs:
| 14204 | Elasticsearch | Elasticsearch view |
| 13106 | MySQL/MariaDB Overview | Detailed MySQL/MariaDB metrics |
### Import Dashboard via Grafana UI
1. Go to **Dashboards → Import**
@@ -1026,13 +1015,13 @@ scrape_configs:
### 📋 Prerequisites Checklist
| # | ขั้นตอน | Status |
| :--- | :------------------------------------------------------------------------------------------------- | :----- |
| 1 | SSH เข้า ASUSTOR ได้ (`ssh admin@192.168.10.9`) | ✅ |
| 2 | Docker Network `lcbp3` สร้างแล้ว (ดูหัวข้อ [สร้าง Docker Network](#-สร้าง-docker-network-ทำครั้งแรกครั้งเดียว)) | ✅ |
| 3 | สร้าง Directories และกำหนดสิทธิ์แล้ว (ดูหัวข้อ [กำหนดสิทธิ](#กำหนดสิทธิ-บน-asustor)) | ✅ |
| 4 | สร้าง `prometheus.yml` แล้ว (ดูหัวข้อ [Prometheus Configuration](#prometheus-configuration)) | ✅ |
| 5 | สร้าง `promtail-config.yml` แล้ว (ดูหัวข้อ [Step 1.2](#step-12-สร้าง-promtail-configyml)) | ✅ |
| # | ขั้นตอน | Status |
| :-- | :-------------------------------------------------------------------------------------------------------------- | :----- |
| 1 | SSH เข้า ASUSTOR ได้ (`ssh admin@192.168.10.9`) | ✅ |
| 2 | Docker Network `lcbp3` สร้างแล้ว (ดูหัวข้อ [สร้าง Docker Network](#-สร้าง-docker-network-ทำครั้งแรกครั้งเดียว)) | ✅ |
| 3 | สร้าง Directories และกำหนดสิทธิ์แล้ว (ดูหัวข้อ [กำหนดสิทธิ](#กำหนดสิทธิ-บน-asustor)) | ✅ |
| 4 | สร้าง `prometheus.yml` แล้ว (ดูหัวข้อ [Prometheus Configuration](#prometheus-configuration)) | ✅ |
| 5 | สร้าง `promtail-config.yml` แล้ว (ดูหัวข้อ [Step 1.2](#step-12-สร้าง-promtail-configyml)) | ✅ |
---
@@ -1093,7 +1082,7 @@ cat /volume1/np-dms/monitoring/prometheus/config/prometheus.yml
ต้องสร้าง Config ให้ Promtail อ่าน logs จาก Docker containers และส่งไป Loki:
```bash
````bash
# สร้างไฟล์ promtail-config.yml
cat > /volume1/np-dms/monitoring/promtail/config/promtail-config.yml << 'EOF'
server:
@@ -1127,9 +1116,10 @@ EOF
CREATE USER 'exporter'@'%' IDENTIFIED BY 'Center2025' WITH MAX_USER_CONNECTIONS 3;
GRANT PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR ON *.* TO 'exporter'@'%';
FLUSH PRIVILEGES;
```
````
### 2. สร้างไฟล์คอนฟิก .my.cnf บน QNAP
เพื่อให้ `mysqld-exporter` อ่านรหัสผ่านที่มีตัวอักษรพิเศษได้ถูกต้อง:
1. **SSH เข้า QNAP** (หรือใช้ File Station สร้าง Folder):
@@ -1143,11 +1133,11 @@ FLUSH PRIVILEGES;
3. **สร้างไฟล์ .my.cnf**:
```bash
cat > /share/np-dms/monitoring/mysqld-exporter/.my.cnf << 'EOF'
[client]
user=exporter
password=Center2025
host=mariadb
EOF
[client]
user=exporter
password=Center2025
host=mariadb
EOF
```
4. **กำหนดสิทธิ์ไฟล์** (เพื่อให้ Container อ่านไฟล์ได้):
```bash
@@ -1155,8 +1145,10 @@ EOF
```
# ตรวจสอบ
cat /volume1/np-dms/monitoring/promtail/config/promtail-config.yml
```
````
---
@@ -1187,7 +1179,7 @@ docker compose up -d
# ตรวจสอบ container status
docker compose ps
```
````
---
@@ -1200,15 +1192,15 @@ docker ps --filter "name=prometheus" --filter "name=grafana" \
--filter "name=cadvisor" --filter "name=loki" --filter "name=promtail"
```
| Service | วิธีตรวจสอบ | Expected Result |
| :---------------- | :----------------------------------------------------------------- | :------------------------------------ |
| ✅ **Prometheus** | `curl http://192.168.10.9:9090/-/healthy` | `Prometheus Server is Healthy` |
| ✅ **Grafana** | เปิด `https://grafana.np-dms.work` (หรือ `http://192.168.10.9:3000`) | หน้า Login |
| ✅ **Uptime Kuma** | เปิด `https://uptime.np-dms.work` (หรือ `http://192.168.10.9:3001`) | หน้า Setup |
| ✅ **Node Exp.** | `curl http://192.168.10.9:9100/metrics \| head` | Metrics output |
| ✅ **cAdvisor** | `curl http://192.168.10.9:8080/healthz` | `ok` |
| ✅ **Loki** | `curl http://192.168.10.9:3100/ready` | `ready` |
| ✅ **Promtail** | เช็ค Logs: `docker logs promtail` | ไม่ควรมี Error + เห็น connection success |
| Service | วิธีตรวจสอบ | Expected Result |
| :----------------- | :------------------------------------------------------------------- | :--------------------------------------- |
| ✅ **Prometheus** | `curl http://192.168.10.9:9090/-/healthy` | `Prometheus Server is Healthy` |
| ✅ **Grafana** | เปิด `https://grafana.np-dms.work` (หรือ `http://192.168.10.9:3000`) | หน้า Login |
| ✅ **Uptime Kuma** | เปิด `https://uptime.np-dms.work` (หรือ `http://192.168.10.9:3001`) | หน้า Setup |
| ✅ **Node Exp.** | `curl http://192.168.10.9:9100/metrics \| head` | Metrics output |
| ✅ **cAdvisor** | `curl http://192.168.10.9:8080/healthz` | `ok` |
| ✅ **Loki** | `curl http://192.168.10.9:3100/ready` | `ready` |
| ✅ **Promtail** | เช็ค Logs: `docker logs promtail` | ไม่ควรมี Error + เห็น connection success |
---
@@ -1262,30 +1254,33 @@ curl -s http://localhost:9090/api/v1/targets | grep -E '"qnap-(node|cadvisor)"'
เพื่อการ Monitor ที่สมบูรณ์ แนะนำให้ Import Dashboards ต่อไปนี้:
#### 6.1 Host Monitoring (Node Exporter)
* **Concept:** ดู resource ของเครื่อง Host (CPU, RAM, Disk, Network)
* **Dashboard ID:** `1860` (Node Exporter Full)
* **วิธี Import:**
1. ไปที่ **Dashboards****New** → **Import**
2. ช่อง **Import via grafana.com** ใส่เลข `1860` กด **Load**
3. เลือก Data source: **Prometheus**
4. กด **Import**
- **Concept:** ดู resource ของเครื่อง Host (CPU, RAM, Disk, Network)
- **Dashboard ID:** `1860` (Node Exporter Full)
- **วิธี Import:**
1. ไปที่ **Dashboards****New** → **Import**
2. ช่อง **Import via grafana.com** ใส่เลข `1860` กด **Load**
3. เลือก Data source: **Prometheus**
4. กด **Import**
#### 6.2 Container Monitoring (cAdvisor)
* **Concept:** ดู resource ของแต่ละ Container (เชื่อม Logs ด้วย)
* **Dashboard ID:** `14282` (Cadvisor exporter)
* **วิธี Import:**
1. ใส่เลข `14282` กด **Load**
2. เลือก Data source: **Prometheus**
3. กด **Import**
- **Concept:** ดู resource ของแต่ละ Container (เชื่อม Logs ด้วย)
- **Dashboard ID:** `14282` (Cadvisor exporter)
- **วิธี Import:**
1. ใส่เลข `14282` กด **Load**
2. เลือก Data source: **Prometheus**
3. กด **Import**
#### 6.3 Logs Monitoring (Loki Integration)
เพื่อให้ Dashboard ของ Container แสดง Logs จาก Loki ได้ด้วย:
1. เปิด Dashboard **Cadvisor exporter** ที่เพิ่ง Import มา
2. กดปุ่ม **Add visualization** (หรือ Edit dashboard)
3. เลือก Data source: **Loki**
4. ในช่อง Query ใส่: `{container="$name"}`
* *(Note: `$name` มาจาก Variable ของ Dashboard 14282)*
- _(Note: `$name` มาจาก Variable ของ Dashboard 14282)_
5. ปรับ Visualization type เป็น **Logs**
6. ตั้งชื่อ Panel ว่า **"Container Logs"**
7. กด **Apply** และ **Save Dashboard**
@@ -1316,8 +1311,6 @@ curl -s http://localhost:9090/api/v1/targets | grep -E '"qnap-(node|cadvisor)"'
> 📝 **หมายเหตุ**: เอกสารนี้อ้างอิงจาก Architecture Document **v1.8.0** - Monitoring Stack deploy บน ASUSTOR AS5403T
---
## 📈 Document Numbering Specific Monitoring
@@ -1389,9 +1382,9 @@ groups:
severity: critical
component: document-numbering
annotations:
summary: "Redis is unavailable for document numbering"
description: "System is falling back to DB-only locking. Performance degraded by 30-50%."
runbook_url: "https://wiki.lcbp3/runbooks/redis-unavailable"
summary: 'Redis is unavailable for document numbering'
description: 'System is falling back to DB-only locking. Performance degraded by 30-50%.'
runbook_url: 'https://wiki.lcbp3/runbooks/redis-unavailable'
# CRITICAL: High lock failure rate
- alert: HighLockFailureRate
@@ -1402,9 +1395,9 @@ groups:
severity: critical
component: document-numbering
annotations:
summary: "Lock acquisition failure rate > 10%"
description: "Check Redis and database performance immediately"
runbook_url: "https://wiki.lcbp3/runbooks/high-lock-failure"
summary: 'Lock acquisition failure rate > 10%'
description: 'Check Redis and database performance immediately'
runbook_url: 'https://wiki.lcbp3/runbooks/high-lock-failure'
# WARNING: Elevated lock failure rate
- alert: ElevatedLockFailureRate
@@ -1415,8 +1408,8 @@ groups:
severity: warning
component: document-numbering
annotations:
summary: "Lock acquisition failure rate > 5%"
description: "Monitor closely. May escalate to critical soon."
summary: 'Lock acquisition failure rate > 5%'
description: 'Monitor closely. May escalate to critical soon.'
# WARNING: Slow lock acquisition
- alert: SlowLockAcquisition
@@ -1429,8 +1422,8 @@ groups:
severity: warning
component: document-numbering
annotations:
summary: "P95 lock acquisition time > 1 second"
description: "Lock acquisition is slower than expected. Check Redis latency."
summary: 'P95 lock acquisition time > 1 second'
description: 'Lock acquisition is slower than expected. Check Redis latency.'
# WARNING: High retry count
- alert: HighRetryCount
@@ -1443,8 +1436,8 @@ groups:
severity: warning
component: document-numbering
annotations:
summary: "Retry count > 100 per hour in project {{ $labels.project }}"
description: "High contention detected. Consider scaling."
summary: 'Retry count > 100 per hour in project {{ $labels.project }}'
description: 'High contention detected. Consider scaling.'
# WARNING: Slow generation
- alert: SlowDocumentNumberGeneration
@@ -1457,8 +1450,8 @@ groups:
severity: warning
component: document-numbering
annotations:
summary: "P95 generation time > 2 seconds"
description: "Document number generation is slower than SLA target"
summary: 'P95 generation time > 2 seconds'
description: 'Document number generation is slower than SLA target'
```
### 3.3. AlertManager Configuration
@@ -1553,4 +1546,3 @@ Dashboard panels ที่สำคัญ:
6. **DB Connection Pool Usage** (Gauge)
- Query: `docnum_db_connection_pool_usage`
- Alert threshold: > 80%
@@ -319,7 +319,7 @@ services:
- discovery.type=single-node
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ELASTICSEARCH_PASSWORD}
- "ES_JAVA_OPTS=-Xms2g -Xmx2g"
- 'ES_JAVA_OPTS=-Xms2g -Xmx2g'
volumes:
- /volume1/lcbp3/volumes/elastic-data:/usr/share/elasticsearch/data
networks:
@@ -348,8 +348,8 @@ services:
container_name: lcbp3-nginx
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- '80:80'
- '443:443'
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
@@ -977,6 +977,7 @@ chmod -R 750 /share/np-dms/data/uploads
5. กด **Create**
ตรวจสอบ Container Status: Applications → `lcbp3-app`
- ✅ `backend` → Running (healthy)
- ✅ `frontend` → Running (healthy)
@@ -382,10 +382,7 @@ docker exec lcbp3-redis redis-cli FLUSHDB
async function bootstrap() {
const app = await NestFactory.create(AppModule, {
logger:
process.env.NODE_ENV === 'production'
? ['error', 'warn']
: ['log', 'error', 'warn', 'debug'],
logger: process.env.NODE_ENV === 'production' ? ['error', 'warn'] : ['log', 'error', 'warn', 'debug'],
});
// Enable compression
@@ -465,18 +462,15 @@ echo "Security maintenance completed: $(date)"
### Unplanned Maintenance Procedures
1. **Assess Urgency**
- Can it wait for scheduled maintenance?
- Is it causing active issues?
2. **Communicate Impact**
- Notify stakeholders immediately
- Estimate downtime
- Provide updates every 30 minutes
3. **Execute Carefully**
- Always backup first
- Have rollback plan ready
- Test in staging if possible
@@ -11,6 +11,7 @@
This document outlines security monitoring, access control management, vulnerability management, and security incident response for LCBP3-DMS.
**Security Status as of 2026-03-19:**
- ✅ **0 known vulnerabilities** (Backend dependencies fully patched)
- ✅ **52 vulnerabilities resolved** (27 high + 20 moderate + 5 low severity)
- ✅ **Major security updates applied**: Elasticsearch 9.3.4, Nodemailer 8.0.3, UUID 13.0.0
@@ -319,7 +320,6 @@ FLUSH PRIVILEGES;
```
3. **Notify stakeholders**
- Security officer
- Management
- Affected users (if applicable)
@@ -624,4 +624,3 @@ Configure at: Gitea → Repository → Settings → Actions → Secrets
| `PORT` | SSH Port (`22`) |
| `USERNAME` | SSH user with Docker access |
| `PASSWORD` | SSH password (prefer SSH Key) |
@@ -401,22 +401,18 @@ Database connection pool was exhausted due to slow queries not releasing connect
### PIR Meeting Agenda
1. **Timeline Review** (10 min)
- What happened and when?
- What was the impact?
2. **Root Cause Analysis** (15 min)
- Why did it happen?
- What were the contributing factors?
3. **What Went Well** (10 min)
- What did we do right?
- What helped us resolve quickly?
4. **What Went Wrong** (15 min)
- What could we have done better?
- What slowed us down?
@@ -1,16 +1,19 @@
# 🚀 Release Management Policy — LCBP3-DMS v1.8.0
---
title: 'Release Management Policy, Versioning Strategy, and Deployment Gates'
version: 1.0.0
status: DRAFT
owner: Nattanin Peancharoen (System Architect / Release Manager)
last_updated: 2026-03-11
related:
- specs/04-Infrastructure-OPS/04-04-deployment-guide.md ← Blue-Green Deployment Detail
- specs/04-Infrastructure-OPS/04-07-incident-response.md
- specs/06-Decision-Records/ADR-015-deployment.md
- specs/00-Overview/00-04-stakeholder-signoff-and-risk.md
- specs/04-Infrastructure-OPS/04-04-deployment-guide.md ← Blue-Green Deployment Detail
- specs/04-Infrastructure-OPS/04-07-incident-response.md
- specs/06-Decision-Records/ADR-015-deployment.md
- specs/00-Overview/00-04-stakeholder-signoff-and-risk.md
---
> [!IMPORTANT]
@@ -30,11 +33,11 @@ v1.8.1
└────── MAJOR: Breaking Changes, Architectural Shift (กำหนดโดย PO)
```
| Type | ตัวอย่าง | เมื่อไหร่ |
|------|---------|---------|
| Type | ตัวอย่าง | เมื่อไหร่ |
| --------- | --------------- | ------------------------------------------ |
| **MAJOR** | v1.0.0 → v2.0.0 | Breaking Change, Major Architecture Change |
| **MINOR** | v1.8.0 → v1.9.0 | New Feature หลังจาก Sprint สำเร็จ |
| **PATCH** | v1.8.0 → v1.8.1 | Bug Fix, Security Patch |
| **MINOR** | v1.8.0 → v1.9.0 | New Feature หลังจาก Sprint สำเร็จ |
| **PATCH** | v1.8.0 → v1.8.1 | Bug Fix, Security Patch |
### Branch Strategy (Git Flow)
@@ -69,12 +72,12 @@ lcbp3-backend:v1.8.0-rc.1 ← Release Candidate
## 2. 📋 Release Types & Cadence
| Release Type | Cadence | Who Approves | Notes |
|-------------|---------|-------------|-------|
| **Sprint Release** (Minor) | ทุก 2 สัปดาห์ | PO + Lead Dev | ตามแผน Sprint |
| **Hotfix** (Patch) | ตามเหตุการณ์ | Lead Dev (P0/P1) → PO Notify | ไม่รอ Sprint |
| **Emergency Hotfix** | ทันที (P0) | Lead Dev → แจ้ง PO พร้อมกัน | Security, System Down |
| **Major Release** | กำหนดโดย PO | PO + กทท. Sign-off | Phase Change |
| Release Type | Cadence | Who Approves | Notes |
| -------------------------- | ------------- | ---------------------------- | --------------------- |
| **Sprint Release** (Minor) | ทุก 2 สัปดาห์ | PO + Lead Dev | ตามแผน Sprint |
| **Hotfix** (Patch) | ตามเหตุการณ์ | Lead Dev (P0/P1) → PO Notify | ไม่รอ Sprint |
| **Emergency Hotfix** | ทันที (P0) | Lead Dev → แจ้ง PO พร้อมกัน | Security, System Down |
| **Major Release** | กำหนดโดย PO | PO + กทท. Sign-off | Phase Change |
### Sprint Release Calendar (ตัวอย่าง)
@@ -89,6 +92,7 @@ Sprint 2: 1528 มี.ค. 2569 → Release v1.10.0 (11 เม.ย.)
## 3. 🚦 Release Gate Process
### Gate 1: Code Complete (วันสุดท้ายของ Sprint)
```
✅ Feature Freeze — ไม่รับ Feature ใหม่เข้า Release Branch
✅ All PRs Merged to release/vX.Y.Z
@@ -97,15 +101,15 @@ Sprint 2: 1528 มี.ค. 2569 → Release v1.10.0 (11 เม.ย.)
### Gate 2: Quality Gate (T-3 วันก่อน Release)
| Checkpoint | Tool | Threshold |
|-----------|------|----------|
| **TypeScript Compile** | `tsc --noEmit` | 0 Errors |
| **Unit Tests Pass** | Jest | ≥ 80% Pass Rate |
| **E2E Tests (Core Flows)** | Playwright/Cypress | 100% Core Flows ผ่าน |
| **Security Scan** | `npm audit` | 0 Critical/High Vulnerabilities |
| **Lint** | ESLint | 0 Errors (Warnings ยอมรับได้) |
| **Build Success** | Docker Build | Exit 0 |
| **Image Size** | Docker inspect | < 2GB (Backend), < 1.5GB (Frontend) |
| Checkpoint | Tool | Threshold |
| -------------------------- | ------------------ | ----------------------------------- |
| **TypeScript Compile** | `tsc --noEmit` | 0 Errors |
| **Unit Tests Pass** | Jest | ≥ 80% Pass Rate |
| **E2E Tests (Core Flows)** | Playwright/Cypress | 100% Core Flows ผ่าน |
| **Security Scan** | `npm audit` | 0 Critical/High Vulnerabilities |
| **Lint** | ESLint | 0 Errors (Warnings ยอมรับได้) |
| **Build Success** | Docker Build | Exit 0 |
| **Image Size** | Docker inspect | < 2GB (Backend), < 1.5GB (Frontend) |
**Owner:** Lead Dev
**Tool:** Gitea CI/CD Pipeline (ADR-015)
@@ -114,13 +118,13 @@ Sprint 2: 1528 มี.ค. 2569 → Release v1.10.0 (11 เม.ย.)
### Gate 3: Staging Validation (T-2 วันก่อน Release)
| Checkpoint | ผ่านเมื่อ | Owner |
|-----------|---------|-------|
| Deploy to Staging Environment | สำเร็จ, ไม่มี Error | DevOps |
| Health Check `/health` → 200 | ✅ | Automated |
| Smoke Test (Manual): Login → Create Correspondence → Submit | ผ่าน | Dev หรือ QA |
| Migration Script (ถ้ามี Schema Change) | รันสำเร็จบน Staging Schema | DBA / Dev |
| Rollback Test: Deploy → Rollback → Verify | ระบบ Rollback ได้ใน < 5 นาที | DevOps |
| Checkpoint | ผ่านเมื่อ | Owner |
| ----------------------------------------------------------- | ---------------------------- | ----------- |
| Deploy to Staging Environment | สำเร็จ, ไม่มี Error | DevOps |
| Health Check `/health` → 200 | ✅ | Automated |
| Smoke Test (Manual): Login → Create Correspondence → Submit | ผ่าน | Dev หรือ QA |
| Migration Script (ถ้ามี Schema Change) | รันสำเร็จบน Staging Schema | DBA / Dev |
| Rollback Test: Deploy → Rollback → Verify | ระบบ Rollback ได้ใน < 5 นาที | DevOps |
**Owner:** Nattanin P.
@@ -164,12 +168,12 @@ PO Sign-off: ✅ อนุมัติ Release
### เมื่อไหร่ต้อง Hotfix
| Priority | ตัวอย่าง | SLA Start Hotfix | Deploy Target |
|---------|---------|-----------------|--------------|
| **P0 — Critical** | ระบบล่ม, Data Corruption, Security Breach | ทันที (< 30 นาที) | < 4 ชั่วโมง |
| **P1 — High** | Feature หลักทำงานผิด, Login Fail | < 2 ชั่วโมง | < 24 ชั่วโมง |
| **P2 — Medium** | Feature รองทำงานผิด | ใน Sprint ถัดไป | Sprint Release |
| **P3 — Low** | UI Cosmetic, Minor UX | Backlog | Sprint Release |
| Priority | ตัวอย่าง | SLA Start Hotfix | Deploy Target |
| ----------------- | ----------------------------------------- | ----------------- | -------------- |
| **P0 — Critical** | ระบบล่ม, Data Corruption, Security Breach | ทันที (< 30 นาที) | < 4 ชั่วโมง |
| **P1 — High** | Feature หลักทำงานผิด, Login Fail | < 2 ชั่วโมง | < 24 ชั่วโมง |
| **P2 — Medium** | Feature รองทำงานผิด | ใน Sprint ถัดไป | Sprint Release |
| **P3 — Low** | UI Cosmetic, Minor UX | Backlog | Sprint Release |
### Hotfix Workflow
@@ -211,22 +215,22 @@ cd /volume1/lcbp3/scripts
### เมื่อไหร่ต้อง Rollback
| Trigger | Threshold | Action |
|---------|----------|--------|
| Health Check Fail หลัง Deploy | 3 consecutive failures | Auto-rollback |
| Error Rate สูง | > 5% ใน 15 นาทีแรก | Manual Rollback (DevOps trigger) |
| P90 Response Time สูงมาก | > 2000ms ต่อเนื่อง 5 นาที | Manual Rollback |
| Critical Bug พบใน Production | P0 Bug | Manual Rollback ทันที |
| Migration Fail | Error Rate > 20% | Manual Rollback + Notify |
| Trigger | Threshold | Action |
| ----------------------------- | ------------------------- | -------------------------------- |
| Health Check Fail หลัง Deploy | 3 consecutive failures | Auto-rollback |
| Error Rate สูง | > 5% ใน 15 นาทีแรก | Manual Rollback (DevOps trigger) |
| P90 Response Time สูงมาก | > 2000ms ต่อเนื่อง 5 นาที | Manual Rollback |
| Critical Bug พบใน Production | P0 Bug | Manual Rollback ทันที |
| Migration Fail | Error Rate > 20% | Manual Rollback + Notify |
### Rollback SLA
| Scenario | Target Rollback Time |
|----------|---------------------|
| Blue-Green Switch (nginx reload) | < 30 วินาที |
| Full Container Restart | < 5 นาที |
| Database Rollback (SQL Revert) | < 30 นาที |
| Full System Restore (Backup) | < 4 ชั่วโมง (RTO) |
| Scenario | Target Rollback Time |
| -------------------------------- | -------------------- |
| Blue-Green Switch (nginx reload) | < 30 วินาที |
| Full Container Restart | < 5 นาที |
| Database Rollback (SQL Revert) | < 30 นาที |
| Full System Restore (Backup) | < 4 ชั่วโมง (RTO) |
### Rollback Decision Tree
@@ -286,40 +290,48 @@ Security Check: npm audit (ถ้าเป็น Security Bug)
```markdown
# Release Notes — LCBP3-DMS v[X.Y.Z]
**Date:** YYYY-MM-DD | **Type:** Sprint Release / Hotfix
## 🆕 New Features
- [Feature Name]: [Brief description]
## 🐛 Bug Fixes
- **[BUG-ID]** [Screen/Module]: [What was wrong → What's fixed]
## 🔒 Security Updates
- [CVE/Issue]: [Description]
## ⚠️ Breaking Changes
- [If any — ระบุชัดเจน]
## 📋 Schema Changes
- [Table]: [Column added/modified/removed]
- **Action Required:** Admin ต้อง Apply SQL ใน `deltas/XX-description.sql`
## 🔧 Configuration Changes
- [Env Var]: [Change description]
## 📊 Performance Impact
- [Module]: [Expected improvement/change]
```
### Communication Channels
| Release Type | Channel | ผู้รับ | Timing |
|-------------|---------|-------|--------|
| **Sprint Release** | LINE Group (Support) | Org Admin ทุกองค์กร | T-1 วัน (แจ้งล่วงหน้า) |
| **Sprint Release** | Email | ผู้บริหาร + PO | หลัง Deploy เสร็จ |
| **Hotfix (P1)** | LINE Group | Org Admin | พร้อมกับ Deploy |
| **Hotfix (P0)** | LINE Direct | กทท. IT + NAP On-call | ก่อน Deploy (แจ้งว่ากำลังแก้) |
| **Maintenance Window** | Email + LINE | ทุก User | T-24 ชั่วโมง |
| Release Type | Channel | ผู้รับ | Timing |
| ---------------------- | -------------------- | --------------------- | ----------------------------- |
| **Sprint Release** | LINE Group (Support) | Org Admin ทุกองค์กร | T-1 วัน (แจ้งล่วงหน้า) |
| **Sprint Release** | Email | ผู้บริหาร + PO | หลัง Deploy เสร็จ |
| **Hotfix (P1)** | LINE Group | Org Admin | พร้อมกับ Deploy |
| **Hotfix (P0)** | LINE Direct | กทท. IT + NAP On-call | ก่อน Deploy (แจ้งว่ากำลังแก้) |
| **Maintenance Window** | Email + LINE | ทุก User | T-24 ชั่วโมง |
### Maintenance Window Policy
@@ -337,13 +349,13 @@ Security Check: npm audit (ถ้าเป็น Security Bug)
## 8. 📊 Release Metrics & Tracking
| Metric | Target | วิธีวัด |
|--------|--------|--------|
| **Deployment Frequency** | 1 ครั้ง/สองสัปดาห์ | Gitea Release History |
| **Lead Time for Change** | < 3 วัน (code → production) | Commit Date → Deploy Date |
| **Change Failure Rate** | < 5% (% Release ที่ต้อง Rollback) | Rollback Log |
| **Mean Time to Restore (MTTR)** | < 4 ชั่วโมง (P0) / < 8 ชั่วโมง (P1) | Incident Log |
| **Time to Rollback** | < 5 นาที (Blue-Green Switch) | Deploy Log |
| Metric | Target | วิธีวัด |
| ------------------------------- | ----------------------------------- | ------------------------- |
| **Deployment Frequency** | 1 ครั้ง/สองสัปดาห์ | Gitea Release History |
| **Lead Time for Change** | < 3 วัน (code → production) | Commit Date → Deploy Date |
| **Change Failure Rate** | < 5% (% Release ที่ต้อง Rollback) | Rollback Log |
| **Mean Time to Restore (MTTR)** | < 4 ชั่วโมง (P0) / < 8 ชั่วโมง (P1) | Incident Log |
| **Time to Rollback** | < 5 นาที (Blue-Green Switch) | Deploy Log |
> **หมายเหตุ:** Metrics เหล่านี้คือ **DORA Metrics** (DevOps Research and Assessment)
> ติดตามใน Monthly Engineering Review
@@ -434,14 +446,14 @@ STAGING_URL=https://staging.lcbp3-dms.internal
### สิ่งที่ต้องสร้างทุก Release
| Artifact | Location | Owner | Retention |
|----------|---------|-------|----------|
| Release Notes | `specs/99-archives/releases/v{X.Y.Z}.md` | PO | ตลอดไป |
| Docker Images | Internal Registry (Gitea) | DevOps | ล่าสุด 5 Versions |
| DB Backup (Pre-deploy) | QNAP `/volume1/lcbp3/shared/backups/` | DevOps | 30 วัน |
| Delta SQL File | `specs/03-Data-and-Storage/deltas/` | Dev | ตลอดไป (Git) |
| CHANGELOG.md Update | Root of Repo | Dev | ตลอดไป |
| Deploy Log | `/volume1/lcbp3/shared/logs/deploy.log` | DevOps (Auto) | 90 วัน |
| Artifact | Location | Owner | Retention |
| ---------------------- | ---------------------------------------- | ------------- | ----------------- |
| Release Notes | `specs/99-archives/releases/v{X.Y.Z}.md` | PO | ตลอดไป |
| Docker Images | Internal Registry (Gitea) | DevOps | ล่าสุด 5 Versions |
| DB Backup (Pre-deploy) | QNAP `/volume1/lcbp3/shared/backups/` | DevOps | 30 วัน |
| Delta SQL File | `specs/03-Data-and-Storage/deltas/` | Dev | ตลอดไป (Git) |
| CHANGELOG.md Update | Root of Repo | Dev | ตลอดไป |
| Deploy Log | `/volume1/lcbp3/shared/logs/deploy.log` | DevOps (Auto) | 90 วัน |
---
@@ -450,6 +462,7 @@ STAGING_URL=https://staging.lcbp3-dms.internal
### Sprint Release Checklist
**T-3 วัน (Quality Gate)**
- [ ] All Unit Tests pass ≥ 80% coverage
- [ ] TypeScript 0 Errors
- [ ] ESLint 0 Errors
@@ -459,14 +472,16 @@ STAGING_URL=https://staging.lcbp3-dms.internal
- [ ] Delta SQL file ready (ถ้ามี Schema Change)
**T-1 วัน (Staging + Approval)**
- [ ] Deploy to Staging สำเร็จ
- [ ] Smoke Test on Staging ผ่าน
- [ ] Schema Migration Test on Staging ผ่าน (ถ้ามี)
- [ ] PO Review Complete
- [ ] PO Sign-off: "___ วันที่ ___"
- [ ] PO Sign-off: "**_ วันที่ _**"
- [ ] Org Admin Notification ส่งแล้ว (LINE)
**Release Day**
- [ ] DB Backup created + verified
- [ ] Schema Delta Applied (ถ้ามี) — แจ้ง Admin ทำ Manual
- [ ] `./deploy.sh` รัน (Blue-Green)
+10 -10
View File
@@ -16,16 +16,16 @@ It consolidates what was previously split across multiple operations and specifi
## 📂 Document Index
| File | Purpose | Key Contents |
| ------------------------------------------------------------------------ | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| **[04-01-docker-compose.md](./04-01-docker-compose.md)** | Core Environment Setup | `.env` configs, Blue/Green Docker Compose, MariaDB & Redis optimization, **Appendix A: Live QNAP configs** (MariaDB, Redis/ES, NPM, Gitea, n8n) |
| **[04-02-backup-recovery.md](./04-02-backup-recovery.md)** | Disaster Recovery | RTO/RPO strategies, QNAP to ASUSTOR backup scripts, Restic/Mysqldump config |
| **[04-03-monitoring.md](./04-03-monitoring.md)** | Observability | Prometheus metrics, AlertManager rules, Grafana alerts |
| **[04-04-deployment-guide.md](./04-04-deployment-guide.md)** | Production Rollout | Blue-Green deployment scripts, **Appendix A: QNAP Container Station**, **Appendix B: Gitea Actions CI/CD**, **Appendix C: act_runner setup** |
| **[04-05-maintenance-procedures.md](./04-05-maintenance-procedures.md)** | Routine Care | Log rotation, dependency updates, scheduled DB optimizations |
| **[04-06-security-operations.md](./04-06-security-operations.md)** | Hardening & Audit | User access review, SSL renewals, vulnerability scanning, **Appendix A: SSH Setup**, **Appendix B: Secrets Management** |
| **[04-07-incident-response.md](./04-07-incident-response.md)** | Escalation | P0-P3 classifications, incident commander roles, Post-Incident Review |
| **[🚀 04-08-release-management-policy.md](./04-08-release-management-policy.md)** | Release Policy | SemVer, Git Flow, 5 Release Gates, Hotfix Process, Rollback Policy, CI/CD Pipeline |
| File | Purpose | Key Contents |
| --------------------------------------------------------------------------------- | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| **[04-01-docker-compose.md](./04-01-docker-compose.md)** | Core Environment Setup | `.env` configs, Blue/Green Docker Compose, MariaDB & Redis optimization, **Appendix A: Live QNAP configs** (MariaDB, Redis/ES, NPM, Gitea, n8n) |
| **[04-02-backup-recovery.md](./04-02-backup-recovery.md)** | Disaster Recovery | RTO/RPO strategies, QNAP to ASUSTOR backup scripts, Restic/Mysqldump config |
| **[04-03-monitoring.md](./04-03-monitoring.md)** | Observability | Prometheus metrics, AlertManager rules, Grafana alerts |
| **[04-04-deployment-guide.md](./04-04-deployment-guide.md)** | Production Rollout | Blue-Green deployment scripts, **Appendix A: QNAP Container Station**, **Appendix B: Gitea Actions CI/CD**, **Appendix C: act_runner setup** |
| **[04-05-maintenance-procedures.md](./04-05-maintenance-procedures.md)** | Routine Care | Log rotation, dependency updates, scheduled DB optimizations |
| **[04-06-security-operations.md](./04-06-security-operations.md)** | Hardening & Audit | User access review, SSL renewals, vulnerability scanning, **Appendix A: SSH Setup**, **Appendix B: Secrets Management** |
| **[04-07-incident-response.md](./04-07-incident-response.md)** | Escalation | P0-P3 classifications, incident commander roles, Post-Incident Review |
| **[🚀 04-08-release-management-policy.md](./04-08-release-management-policy.md)** | Release Policy | SemVer, Git Flow, 5 Release Gates, Hotfix Process, Rollback Policy, CI/CD Pipeline |
### 🐳 Live Docker Compose Files (QNAP)
@@ -666,9 +666,7 @@
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -729,11 +727,7 @@
],
"refresh": "10s",
"schemaVersion": 38,
"tags": [
"docker",
"monitoring",
"lcbp3"
],
"tags": ["docker", "monitoring", "lcbp3"],
"templating": {
"list": [
{
@@ -767,12 +761,8 @@
"allValue": ".+",
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
"text": ["All"],
"value": ["$__all"]
},
"datasource": {
"type": "prometheus",
@@ -807,4 +797,4 @@
"uid": "lcbp3-docker-metrics-logs",
"version": 5,
"weekStart": ""
}
}