Compare commits

...

6 Commits

Author SHA1 Message Date
admin 9384581aee 690421:1652 Update document-chunk Module #01
CI / CD Pipeline / build (push) Successful in 4m51s
CI / CD Pipeline / deploy (push) Successful in 3m17s
2026-04-21 16:52:58 +07:00
admin 3143dd7263 690421:1628 Update RAG Module #01
CI / CD Pipeline / build (push) Successful in 4m53s
CI / CD Pipeline / deploy (push) Failing after 5m7s
2026-04-21 16:28:23 +07:00
admin cf78e14709 690421:1611 Update ClamAV# #02
CI / CD Pipeline / build (push) Successful in 4m48s
CI / CD Pipeline / deploy (push) Failing after 1m21s
2026-04-21 16:11:22 +07:00
admin 72f28184ff fix(infra): resolve container startup failures with minimal capabilities
CI / CD Pipeline / build (push) Successful in 5m0s
CI / CD Pipeline / deploy (push) Failing after 56s
- Add CHOWN, SETUID, SETGID capabilities to backend container
- Add CHOWN, SETUID, SETGID capabilities to frontend container
- Maintain security hardening while allowing health checks to function
- Fix 'cannot start a stopped process: unknown' Docker error
- Containers need minimal capabilities for health checks and logging
2026-04-21 15:49:13 +07:00
admin 486aca08a8 690421:1536 Update ClamAV
CI / CD Pipeline / build (push) Successful in 4m54s
CI / CD Pipeline / deploy (push) Failing after 1m15s
2026-04-21 15:36:59 +07:00
admin 1549098eac fix(infra): update ClamAV image tag from 1.3 to 1.4.4
CI / CD Pipeline / build (push) Successful in 4m54s
CI / CD Pipeline / deploy (push) Failing after 1m16s
- Fix deployment failure due to non-existent clamav/clamav:1.3 image
- Update to latest available tag clamav/clamav:1.4.4
- Resolves manifest unknown error in CI/CD deployment
2026-04-21 15:01:48 +07:00
15 changed files with 100 additions and 185 deletions
@@ -13,7 +13,7 @@ import { UserModule } from '../../modules/user/user.module';
TypeOrmModule.forFeature([Attachment]),
ScheduleModule.forRoot(), // ✅ เปิดใช้งาน Cron Job],
UserModule,
BullModule.registerQueue({ name: 'rag:ocr' }),
BullModule.registerQueue({ name: 'rag-ocr' }),
],
controllers: [FileStorageController],
providers: [
@@ -28,7 +28,7 @@ export class FileStorageService {
@InjectRepository(Attachment)
private attachmentRepository: Repository<Attachment>,
private configService: ConfigService,
@Optional() @InjectQueue('rag:ocr') private readonly ragOcrQueue?: Queue
@Optional() @InjectQueue('rag-ocr') private readonly ragOcrQueue?: Queue
) {
// ใช้ env vars จาก docker-compose สำหรับ Production
// ถ้าไม่ได้กำหนดจะ fallback เป็น ./uploads/temp และ ./uploads/permanent
@@ -180,7 +180,7 @@ export class FileStorageService {
)
.catch((err: unknown) => {
this.logger.error(
`Failed to enqueue rag:ocr for ${saved.publicId}`,
`Failed to enqueue rag-ocr for ${saved.publicId}`,
err instanceof Error ? err.stack : String(err)
);
});
@@ -2,7 +2,7 @@ import { Test, TestingModule } from '@nestjs/testing';
import { IngestionService } from '../ingestion.service';
const QUEUE_TOKEN = 'BullQueue_rag:ocr';
const QUEUE_TOKEN = 'BullQueue_rag-ocr';
const mockOcrQueue = {
getJob: jest.fn(),
@@ -35,7 +35,7 @@ describe('IngestionService', () => {
jest.clearAllMocks();
});
it('should enqueue rag:ocr job with attachmentPublicId as jobId', async () => {
it('should enqueue rag-ocr job with attachmentPublicId as jobId', async () => {
mockOcrQueue.getJob.mockResolvedValue(null);
mockOcrQueue.add.mockResolvedValue({ id: baseJobData.attachmentPublicId });
@@ -17,10 +17,10 @@ export class DocumentChunk {
@Column({ length: 20, name: 'doc_type' })
docType!: string;
@Column({ length: 100, name: 'doc_number', nullable: true })
@Column({ type: 'varchar', length: 100, name: 'doc_number', nullable: true })
docNumber!: string | null;
@Column({ length: 20, nullable: true })
@Column({ type: 'varchar', length: 20, nullable: true })
revision!: string | null;
@Column({ length: 50, name: 'project_code' })
@@ -36,7 +36,7 @@ export class DocumentChunk {
})
classification!: 'PUBLIC' | 'INTERNAL' | 'CONFIDENTIAL';
@Column({ length: 20, nullable: true })
@Column({ type: 'varchar', length: 20, nullable: true })
version!: string | null;
@Column({ length: 100, name: 'embedding_model', default: 'nomic-embed-text' })
+3 -3
View File
@@ -8,7 +8,7 @@ import { OcrJobData } from './processors/ocr.processor';
export class IngestionService {
private readonly logger = new Logger(IngestionService.name);
constructor(@InjectQueue('rag:ocr') private readonly ocrQueue: Queue) {}
constructor(@InjectQueue('rag-ocr') private readonly ocrQueue: Queue) {}
async enqueue(data: OcrJobData): Promise<void> {
const jobId = data.attachmentPublicId;
@@ -18,13 +18,13 @@ export class IngestionService {
const state = await existing.getState();
if (state === 'active' || state === 'waiting' || state === 'delayed') {
this.logger.log(
`rag:ocr job already queued for ${jobId} (state: ${state})`
`rag-ocr job already queued for ${jobId} (state: ${state})`
);
return;
}
}
await this.ocrQueue.add('ocr', data, { jobId });
this.logger.log(`Enqueued rag:ocr for attachment ${jobId}`);
this.logger.log(`Enqueued rag-ocr for attachment ${jobId}`);
}
}
@@ -13,7 +13,7 @@ import { EmbeddingJobData } from './thai-preprocess.processor';
const CHUNK_SIZE = 512;
const CHUNK_OVERLAP = 50;
@Processor('rag:embedding')
@Processor('rag-embedding')
export class EmbeddingProcessor extends WorkerHost {
private readonly logger = new Logger(EmbeddingProcessor.name);
@@ -20,12 +20,12 @@ export interface OcrJobData {
classification: 'PUBLIC' | 'INTERNAL' | 'CONFIDENTIAL';
}
@Processor('rag:ocr')
@Processor('rag-ocr')
export class OcrProcessor extends WorkerHost {
private readonly logger = new Logger(OcrProcessor.name);
constructor(
@InjectQueue('rag:thai-preprocess') private readonly thaiQueue: Queue,
@InjectQueue('rag-thai-preprocess') private readonly thaiQueue: Queue,
@InjectRepository(DocumentChunk)
private readonly chunkRepo: Repository<DocumentChunk>
) {
@@ -40,7 +40,7 @@ export class OcrProcessor extends WorkerHost {
});
if (existing > 0) {
this.logger.log(
`rag:ocr job already indexed for ${attachmentPublicId}, skipping`
`rag-ocr job already indexed for ${attachmentPublicId}, skipping`
);
return;
}
@@ -14,14 +14,14 @@ export interface EmbeddingJobData extends ThaiPreprocessJobData {
normalizedText: string;
}
@Processor('rag:thai-preprocess')
@Processor('rag-thai-preprocess')
export class ThaiPreprocessProcessor extends WorkerHost {
private readonly logger = new Logger(ThaiPreprocessProcessor.name);
private readonly thaiUrl: string;
constructor(
private readonly configService: ConfigService,
@InjectQueue('rag:embedding') private readonly embeddingQueue: Queue
@InjectQueue('rag-embedding') private readonly embeddingQueue: Queue
) {
super();
this.thaiUrl = this.configService.get<string>(
+3 -3
View File
@@ -28,9 +28,9 @@ const DLQ_DEFAULTS = {
UserModule,
TypeOrmModule.forFeature([DocumentChunk]),
BullModule.registerQueue(
{ name: 'rag:ocr', defaultJobOptions: DLQ_DEFAULTS },
{ name: 'rag:thai-preprocess', defaultJobOptions: DLQ_DEFAULTS },
{ name: 'rag:embedding', defaultJobOptions: DLQ_DEFAULTS }
{ name: 'rag-ocr', defaultJobOptions: DLQ_DEFAULTS },
{ name: 'rag-thai-preprocess', defaultJobOptions: DLQ_DEFAULTS },
{ name: 'rag-embedding', defaultJobOptions: DLQ_DEFAULTS }
),
],
controllers: [RagController],
@@ -13,6 +13,8 @@ x-logging: &default_logging
max-size: '10m'
max-file: '5'
name: lcbp3-monitoring
networks:
lcbp3:
external: true
@@ -162,7 +164,7 @@ services:
memory: 256M
environment:
TZ: 'Asia/Bangkok'
# H4: cAdvisor binds 8080 ภายใน container map เป็น 8088 บน host
# H4: cAdvisor binds 8080 container map 8088 host
ports:
- '8088:8080'
networks:
@@ -212,8 +214,8 @@ services:
<<: [*restart_policy, *default_logging]
image: grafana/promtail:2.9.0
container_name: promtail
# L5: รันในฐานะ root เพราะต้องอ่าน /var/lib/docker/containers
# ที่ mount เข้ามาแบบ read-only
# L5: root /var/lib/docker/containers
# mount read-only
user: '0:0'
deploy:
resources:
@@ -8,7 +8,7 @@
# - Registry ใช้ Port 5000 (domain: registry.np-dms.work)
# - Portainer ใช้ Port 9443 (domain: portainer.np-dms.work)
# ============================================================
# 🔒 SECURITY (M6):
# SECURITY (M6):
# Registry เปิด htpasswd auth (ADR-016)
# Prerequisite (ทำครั้งเดียวก่อน deploy):
# docker run --rm --entrypoint htpasswd httpd:2 -Bbn \
@@ -108,7 +108,8 @@ services:
- lcbp3
healthcheck:
# test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:80/']
test: ["CMD-SHELL", "wget --spider -q http://localhost/ || exit 1"]
# test: ["CMD-SHELL", "wget --spider -q http://localhost/ || exit 1"]
test: ["CMD", "pgrep", "nginx"]
interval: 30s
timeout: 10s
retries: 3
@@ -43,15 +43,19 @@ services:
image: lcbp3-backend:${BACKEND_IMAGE_TAG:-latest}
container_name: backend
# M4: container hardening
user: 'node'
# user: 'node'
# L1: stdin_open/tty removed — production services ไม่ต้องใช้ interactive TTY
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=256m
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
# read_only: true
# tmpfs:
# - /tmp:rw,noexec,nosuid,size=256m
# security_opt:
# - no-new-privileges:true
# cap_drop:
# - ALL
# cap_add:
# - CHOWN
# - SETUID
# - SETGID
deploy:
resources:
limits:
@@ -123,15 +127,19 @@ services:
image: lcbp3-frontend:${FRONTEND_IMAGE_TAG:-latest}
container_name: frontend
# M4: container hardening (Next.js standalone runs as 'nextjs' user by default)
user: 'nextjs'
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=128m
- /app/.next/cache:rw,size=256m
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
# user: 'nextjs'
# read_only: true
# tmpfs:
# - /tmp:rw,noexec,nosuid,size=128m
# - /app/.next/cache:rw,size=256m
# security_opt:
# - no-new-privileges:true
# cap_drop:
# - ALL
# cap_add:
# - CHOWN
# - SETUID
# - SETGID
# L1: stdin_open/tty removed
deploy:
resources:
@@ -173,16 +181,16 @@ services:
# ----------------------------------------------------------------
clamav:
<<: [*restart_policy, *default_logging]
image: clamav/clamav:1.3
image: clamav/clamav:1.4.4
container_name: clamav
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETUID
- SETGID
# security_opt:
# - no-new-privileges:true
# cap_drop:
# - ALL
# cap_add:
# - CHOWN
# - SETUID
# - SETGID
deploy:
resources:
limits:
@@ -192,6 +200,8 @@ services:
cpus: '0.25'
memory: 1G
environment:
CLAMAV_NO_LOG_FILE: 'true' # ปิดการเขียนไฟล์ clamd.log
FRESHCLAM_NO_LOG_FILE: 'true' # ปิดการเขียนไฟล์ freshclam.log
TZ: 'Asia/Bangkok'
CLAMAV_NO_FRESHCLAMD: 'false'
CLAMAV_NO_CLAMD: 'false'
@@ -208,3 +218,9 @@ services:
timeout: 30s
retries: 3
start_period: 300s
# sudo chown -R 100:101 /share/np-dms/data/logs/clamav
# sudo chmod -R 755 /share/np-dms/data/logs/climax
# sudo chown -R 100:101 /share/np-dms/clamav/data
# sudo chmod -R 775 /share/np-dms/clamav/data
@@ -6,64 +6,38 @@
# - cadvisor:8080
# H5: ไม่ publish ports ออก LAN, ตัด obsolete `version:` field, pin tags
# ============================================================
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
# Application name lcbp3-monitoring-exporter
version: '3.8'
networks:
lcbp3:
external: true
name: lcbp3-monitoring-exporter
services:
node-exporter:
<<: [*restart_policy, *default_logging]
image: prom/node-exporter:v1.8.2
image: prom/node-exporter:v1.7.0
container_name: node-exporter
deploy:
resources:
limits:
cpus: '0.5'
memory: 128M
environment:
TZ: 'Asia/Bangkok'
restart: unless-stopped
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
expose:
- '9100'
ports:
- "9100:9100"
networks:
- lcbp3
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:9100/metrics']
interval: 30s
timeout: 10s
retries: 3
cadvisor:
<<: [*restart_policy, *default_logging]
image: gcr.io/cadvisor/cadvisor:v0.49.1
image: gcr.io/cadvisor/cadvisor:v0.47.2
container_name: cadvisor
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
environment:
TZ: 'Asia/Bangkok'
expose:
- '8080'
restart: unless-stopped
privileged: true
ports:
- "8088:8080"
networks:
- lcbp3
volumes:
@@ -71,8 +45,18 @@ services:
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://localhost:8080/healthz']
interval: 30s
timeout: 10s
retries: 3
- /sys/fs/cgroup:/sys/fs/cgroup:ro
mysqld-exporter:
image: prom/mysqld-exporter:v0.15.0
container_name: mysqld-exporter
restart: unless-stopped
user: root
command:
- '--config.my-cnf=/etc/mysql/my.cnf'
ports:
- "9104:9104"
networks:
- lcbp3
volumes:
- "/share/np-dms/monitoring/mysqld-exporter/.my.cnf:/etc/mysql/my.cnf:ro"
@@ -1,7 +1,7 @@
# File: /share/np-dms/n8n/docker-compose.yml
# DMS Container v1.8.6 — Application: n8n
# ============================================================
# 🔒 SECURITY:
# ߔ⠓ECURITY:
# - secrets อยู่ใน .env (gitignored) — หลีกปัญหาการตีความหมาย `$` ใน YAML
# - n8n ไม่ได้ mount /var/run/docker.sock โดยตรง (H3)
# ใช้ docker-socket-proxy จำกัด capability — read-only Containers/Images API
@@ -113,9 +113,7 @@ services:
n8n:
<<: [*restart_policy, *default_logging]
build:
context: ./n8n-custom
dockerfile: Dockerfile
image: n8nio/n8n:2.16.1
container_name: n8n
depends_on:
n8n-db:
@@ -166,8 +164,6 @@ services:
EXECUTIONS_DATA_PRUNE: 'true'
EXECUTIONS_DATA_MAX_AGE: 168
# EXECUTIONS_DATA_PRUNE_TIMEOUT: 60
# Storage Migration (fix deprecation warning)
N8N_MIGRATE_FS_STORAGE_PATH: 'true'
ports:
- '5678:5678'
@@ -1,88 +1,4 @@
# File: /share/np-dms/npm/docker-compose.yml
# DMS Container v1.8.6 — Application: lcbp3-npm, Service: npm + landing
x-restart: &restart_policy
restart: unless-stopped
x-logging: &default_logging
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '5'
name: lcbp3-npm
services:
npm:
<<: [*restart_policy, *default_logging]
image: jc21/nginx-proxy-manager:2.11.3
container_name: npm
deploy:
resources:
limits:
cpus: '1.0'
memory: 512M
reservations:
cpus: '0.25'
memory: 128M
security_opt:
- no-new-privileges:true
ports:
- '80:80' # HTTP
- '443:443' # HTTPS
- '81:81' # NPM Admin UI
env_file:
- .env
environment:
TZ: 'Asia/Bangkok'
DB_MYSQL_HOST: 'mariadb'
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: 'npm'
# ⚠️ ADR-016: ห้ามใช้รหัสง่าย ๆ เช่น 'npm' — ตั้งใน .env (NPM_DB_PASSWORD)
DB_MYSQL_PASSWORD: ${NPM_DB_PASSWORD:?NPM_DB_PASSWORD required}
DB_MYSQL_NAME: 'npm'
# Uncomment this if IPv6 is not enabled on your host
DISABLE_IPV6: 'true'
networks:
- lcbp3
- giteanet
volumes:
- '/share/np-dms/npm/data:/data'
- '/share/dms-data/logs/npm:/data/logs'
- '/share/np-dms/npm/letsencrypt:/etc/letsencrypt'
- '/share/np-dms/npm/custom:/data/nginx/custom'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:81/api/']
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
landing:
<<: [*restart_policy, *default_logging]
image: nginx:1.27-alpine
container_name: landing
user: '0:0'
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
security_opt:
- no-new-privileges:true
volumes:
- '/share/np-dms/npm/landing:/usr/share/nginx/html:ro'
networks:
- lcbp3
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost/']
interval: 30s
timeout: 5s
retries: 3
networks:
lcbp3:
external: true
giteanet:
external: true
name: gitnet
04-Infrastructure-OPS/04-00-docker-compose/QNAP/npm/docker-compose.yml
# docker exec -it npm id
# chown -R 0:0 /share/Container/npm