Compare commits
24 Commits
d7e48448e0
..
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 83b6620093 | |||
| a57fef4d44 | |||
| 9384581aee | |||
| 3143dd7263 | |||
| cf78e14709 | |||
| 72f28184ff | |||
| 486aca08a8 | |||
| 1549098eac | |||
| 486bf3b9a4 | |||
| e2753e4eac | |||
| 2e89761b0f | |||
| 13745e5874 | |||
| 733f3c3987 | |||
| c894c08fb8 | |||
| 657698558b | |||
| 844caf477d | |||
| feb1319fb3 | |||
| d422b040d9 | |||
| 29a6509c58 | |||
| 8b658e8530 | |||
| 0b7dd466ec | |||
| e5db7511c6 | |||
| b7d637642a | |||
| 5e4e0444ed |
@@ -3,7 +3,8 @@
|
||||
# audit-skills.sh - Verify skill completeness and health
|
||||
# Part of LCBP3-DMS Phase 2 improvements
|
||||
|
||||
set -euo pipefail
|
||||
set -uo pipefail
|
||||
# Note: no -e — we let per-skill checks accumulate issues without terminating
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
@@ -13,7 +14,7 @@ BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Base directory
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
|
||||
AGENTS_DIR="$BASE_DIR/.agents"
|
||||
SKILLS_DIR="$AGENTS_DIR/skills"
|
||||
|
||||
@@ -25,9 +26,9 @@ echo
|
||||
check_skill_health() {
|
||||
local skill_dir="$1"
|
||||
local skill_name="$(basename "$skill_dir")"
|
||||
|
||||
|
||||
local issues=0
|
||||
|
||||
|
||||
# Check for SKILL.md
|
||||
if [[ -f "$skill_dir/SKILL.md" ]]; then
|
||||
echo -e "${GREEN} OK${NC}: $skill_name/SKILL.md"
|
||||
@@ -35,7 +36,7 @@ check_skill_health() {
|
||||
echo -e "${RED} MISSING${NC}: $skill_name/SKILL.md"
|
||||
((issues++))
|
||||
fi
|
||||
|
||||
|
||||
# Check for templates directory (optional)
|
||||
if [[ -d "$skill_dir/templates" ]]; then
|
||||
template_count=$(find "$skill_dir/templates" -name "*.md" -type f | wc -l)
|
||||
@@ -45,7 +46,7 @@ check_skill_health() {
|
||||
echo -e "${YELLOW} EMPTY${NC}: $skill_name/templates (no files)"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Check SKILL.md content if exists
|
||||
local skill_file="$skill_dir/SKILL.md"
|
||||
if [[ -f "$skill_file" ]]; then
|
||||
@@ -56,27 +57,21 @@ check_skill_health() {
|
||||
echo -e " ${GREEN} FIELD${NC}: $field"
|
||||
else
|
||||
echo -e " ${RED} MISSING FIELD${NC}: $field"
|
||||
((issues++))
|
||||
((issues++)) || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Check for Role section
|
||||
if grep -q "^## Role$" "$skill_file"; then
|
||||
echo -e " ${GREEN} SECTION${NC}: Role"
|
||||
else
|
||||
echo -e " ${YELLOW} MISSING SECTION${NC}: Role"
|
||||
((issues++))
|
||||
fi
|
||||
|
||||
# Check for Task section
|
||||
if grep -q "^## Task$" "$skill_file"; then
|
||||
echo -e " ${GREEN} SECTION${NC}: Task"
|
||||
else
|
||||
echo -e " ${YELLOW} MISSING SECTION${NC}: Task"
|
||||
((issues++))
|
||||
|
||||
# Check for LCBP3 context reference (speckit-* skills only)
|
||||
if [[ "$skill_name" == speckit-* ]]; then
|
||||
if grep -q '_LCBP3-CONTEXT\.md' "$skill_file"; then
|
||||
echo -e " ${GREEN} CONTEXT${NC}: LCBP3 appendix referenced"
|
||||
else
|
||||
echo -e " ${YELLOW} MISSING${NC}: LCBP3 context reference"
|
||||
((issues++)) || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
return $issues
|
||||
}
|
||||
|
||||
@@ -84,7 +79,15 @@ check_skill_health() {
|
||||
get_skill_version() {
|
||||
local skill_file="$1"
|
||||
if [[ -f "$skill_file" ]]; then
|
||||
grep "^version:" "$skill_file" | head -1 | sed 's/version: *//' || echo "unknown"
|
||||
# Match 'version: X.Y.Z' (or quoted) at a LINE START only; ignore nested ` version:` fields.
|
||||
# Output: bare X.Y.Z with no quotes/whitespace.
|
||||
local raw
|
||||
raw=$(grep -E "^version:[[:space:]]*['\"]?[0-9]+\.[0-9]+\.[0-9]+" "$skill_file" | head -1 || true)
|
||||
if [[ -n "$raw" ]]; then
|
||||
printf '%s' "$raw" | sed -E "s/^version:[[:space:]]*['\"]?([0-9]+\.[0-9]+\.[0-9]+).*/\1/"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
else
|
||||
echo "no_file"
|
||||
fi
|
||||
@@ -114,15 +117,19 @@ SKILL_SUMMARY=()
|
||||
|
||||
for skill_dir in "${SKILL_DIRS[@]}"; do
|
||||
skill_name="$(basename "$skill_dir")"
|
||||
# Skip non-skill entries (e.g. _LCBP3-CONTEXT.md would not match here; safe)
|
||||
[[ "$skill_name" == _* ]] && continue
|
||||
echo "Auditing: $skill_name"
|
||||
echo "------------------------"
|
||||
|
||||
|
||||
set +e
|
||||
check_skill_health "$skill_dir"
|
||||
issues=$?
|
||||
|
||||
set -u
|
||||
|
||||
skill_version=$(get_skill_version "$skill_dir/SKILL.md")
|
||||
SKILL_SUMMARY+=("$skill_name:$issues:$skill_version")
|
||||
|
||||
|
||||
TOTAL_ISSUES=$((TOTAL_ISSUES + issues))
|
||||
echo
|
||||
done
|
||||
@@ -147,15 +154,15 @@ echo
|
||||
# Check skills.md version consistency
|
||||
SKILLS_VERSION_FILE="$SKILLS_DIR/VERSION"
|
||||
if [[ -f "$SKILLS_VERSION_FILE" ]]; then
|
||||
global_version=$(grep "^version:" "$SKILLS_VERSION_FILE" | sed 's/version: *//')
|
||||
global_version=$(grep "^version:" "$SKILLS_VERSION_FILE" | sed 's/version: *//' | tr -d '\r\n ')
|
||||
echo "Global skills version: v$global_version"
|
||||
echo
|
||||
|
||||
|
||||
# Check for version mismatches
|
||||
echo "Version Consistency Check:"
|
||||
echo "------------------------"
|
||||
VERSION_MISMATCHES=0
|
||||
|
||||
|
||||
for summary in "${SKILL_SUMMARY[@]}"; do
|
||||
IFS=':' read -r name issues version <<< "$summary"
|
||||
if [[ "$version" != "unknown" && "$version" != "no_file" && "$version" != "$global_version" ]]; then
|
||||
@@ -163,7 +170,7 @@ if [[ -f "$SKILLS_VERSION_FILE" ]]; then
|
||||
((VERSION_MISMATCHES++))
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [[ $VERSION_MISMATCHES -eq 0 ]]; then
|
||||
echo -e "${GREEN} All skills match global version${NC}"
|
||||
fi
|
||||
|
||||
@@ -12,11 +12,11 @@ YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Base directory
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
|
||||
AGENTS_DIR="$BASE_DIR/.agents"
|
||||
|
||||
# Expected version (should match LCBP3 version)
|
||||
EXPECTED_VERSION="1.8.6"
|
||||
EXPECTED_VERSION="1.8.9"
|
||||
|
||||
echo "=== .agents Version Validation ==="
|
||||
echo "Base directory: $BASE_DIR"
|
||||
@@ -27,7 +27,7 @@ echo
|
||||
extract_version() {
|
||||
local file="$1"
|
||||
local pattern="$2"
|
||||
|
||||
|
||||
if [[ -f "$file" ]]; then
|
||||
grep -o "$pattern" "$file" | head -1 | sed 's/.*\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/\1/' || echo "NOT_FOUND"
|
||||
else
|
||||
@@ -37,10 +37,8 @@ extract_version() {
|
||||
|
||||
# Files to check
|
||||
declare -A FILES_TO_CHECK=(
|
||||
["$AGENTS_DIR/README.md"]="Version: \([0-9]\+\.[0-9]\+\.[0-9]\+\)"
|
||||
["$AGENTS_DIR/skills/VERSION"]="version: \([0-9]\+\.[0-9]\+\.[0-9]\+\)"
|
||||
["$AGENTS_DIR/rules/00-project-context.md"]="Version: \([0-9]\+\.[0-9]\+\.[0-9]\+\)"
|
||||
["$AGENTS_DIR/skills/skills.md"]="V\([0-9]\+\.[0-9]\+\.[0-9]\+\)"
|
||||
["$AGENTS_DIR/skills/skills.md"]="[Vv]\([0-9]\+\.[0-9]\+\.[0-9]\+\)"
|
||||
)
|
||||
|
||||
# Track issues
|
||||
@@ -52,9 +50,9 @@ echo
|
||||
for file in "${!FILES_TO_CHECK[@]}"; do
|
||||
pattern="${FILES_TO_CHECK[$file]}"
|
||||
relative_path="${file#$BASE_DIR/}"
|
||||
|
||||
|
||||
version=$(extract_version "$file" "$pattern")
|
||||
|
||||
|
||||
if [[ "$version" == "NOT_FOUND" ]] || [[ "$version" == "FILE_NOT_FOUND" ]]; then
|
||||
echo -e "${RED} ERROR${NC}: $relative_path - Version not found"
|
||||
((ISSUES++))
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
# Part of LCBP3-DMS Phase 2 improvements
|
||||
|
||||
param(
|
||||
[string]$BaseDir = (Split-Path -Parent (Split-Path -Parent $PSScriptRoot))
|
||||
[string]$BaseDir = (Split-Path -Parent (Split-Path -Parent (Split-Path -Parent $PSScriptRoot)))
|
||||
)
|
||||
|
||||
# Colors for output
|
||||
# Map to ConsoleColor enum (Write-Host expects enum, not ANSI strings)
|
||||
$Colors = @{
|
||||
Red = "`e[0;31m"
|
||||
Green = "`e[0;32m"
|
||||
Yellow = "`e[1;33m"
|
||||
Blue = "`e[0;34m"
|
||||
NoColor = "`e[0m"
|
||||
Red = 'Red'
|
||||
Green = 'Green'
|
||||
Yellow = 'Yellow'
|
||||
Blue = 'Blue'
|
||||
NoColor = 'Gray'
|
||||
}
|
||||
|
||||
$AgentsDir = Join-Path $BaseDir ".agents"
|
||||
@@ -26,10 +26,10 @@ function Test-SkillHealth {
|
||||
param(
|
||||
[string]$SkillDir
|
||||
)
|
||||
|
||||
|
||||
$skillName = Split-Path $SkillDir -Leaf
|
||||
$issues = 0
|
||||
|
||||
|
||||
# Check for SKILL.md
|
||||
$skillFile = Join-Path $SkillDir "SKILL.md"
|
||||
if (Test-Path $skillFile) {
|
||||
@@ -38,7 +38,7 @@ function Test-SkillHealth {
|
||||
Write-Host " MISSING: $skillName/SKILL.md" -ForegroundColor $Colors.Red
|
||||
$issues++
|
||||
}
|
||||
|
||||
|
||||
# Check for templates directory (optional)
|
||||
$templatesDir = Join-Path $SkillDir "templates"
|
||||
if (Test-Path $templatesDir) {
|
||||
@@ -49,39 +49,34 @@ function Test-SkillHealth {
|
||||
Write-Host " EMPTY: $skillName/templates (no files)" -ForegroundColor $Colors.Yellow
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Check SKILL.md content if exists
|
||||
if (Test-Path $skillFile) {
|
||||
$content = Get-Content $skillFile -Raw
|
||||
|
||||
|
||||
# Check for required front matter fields
|
||||
$requiredFields = @("name", "description", "version")
|
||||
$requiredFields = @('name', 'description', 'version')
|
||||
foreach ($field in $requiredFields) {
|
||||
if ($content -match "^$field:") {
|
||||
$pattern = "(?m)^${field}:"
|
||||
if ($content -match $pattern) {
|
||||
Write-Host " FIELD: $field" -ForegroundColor $Colors.Green
|
||||
} else {
|
||||
Write-Host " MISSING FIELD: $field" -ForegroundColor $Colors.Red
|
||||
$issues++
|
||||
}
|
||||
}
|
||||
|
||||
# Check for Role section
|
||||
if ($content -match "^## Role$") {
|
||||
Write-Host " SECTION: Role" -ForegroundColor $Colors.Green
|
||||
} else {
|
||||
Write-Host " MISSING SECTION: Role" -ForegroundColor $Colors.Yellow
|
||||
$issues++
|
||||
}
|
||||
|
||||
# Check for Task section
|
||||
if ($content -match "^## Task$") {
|
||||
Write-Host " SECTION: Task" -ForegroundColor $Colors.Green
|
||||
} else {
|
||||
Write-Host " MISSING SECTION: Task" -ForegroundColor $Colors.Yellow
|
||||
$issues++
|
||||
|
||||
# Check for LCBP3 context reference (speckit-* skills)
|
||||
if ($skillName -like 'speckit-*') {
|
||||
if ($content -match '_LCBP3-CONTEXT\.md') {
|
||||
Write-Host " CONTEXT: LCBP3 appendix referenced" -ForegroundColor $Colors.Green
|
||||
} else {
|
||||
Write-Host " MISSING: LCBP3 context reference" -ForegroundColor $Colors.Yellow
|
||||
$issues++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return $issues
|
||||
}
|
||||
|
||||
@@ -90,11 +85,11 @@ function Get-SkillVersion {
|
||||
param(
|
||||
[string]$SkillFile
|
||||
)
|
||||
|
||||
|
||||
if (Test-Path $SkillFile) {
|
||||
try {
|
||||
$content = Get-Content $SkillFile -Raw
|
||||
if ($content -match "^version:\s*(.+)") {
|
||||
if ($content -match "(?m)^version:\s*['""]?([0-9]+\.[0-9]+\.[0-9]+)['""]?") {
|
||||
return $matches[1].Trim()
|
||||
}
|
||||
} catch {
|
||||
@@ -127,16 +122,16 @@ foreach ($skillDir in $skillDirs) {
|
||||
$skillName = $skillDir.Name
|
||||
Write-Host "Auditing: $skillName"
|
||||
Write-Host "------------------------"
|
||||
|
||||
|
||||
$issues = Test-SkillHealth -SkillDir $skillDir.FullName
|
||||
|
||||
|
||||
$skillVersion = Get-SkillVersion -SkillFile (Join-Path $skillDir.FullName "SKILL.md")
|
||||
$skillSummary += @{
|
||||
Name = $skillName
|
||||
Issues = $issues
|
||||
Version = $skillVersion
|
||||
}
|
||||
|
||||
|
||||
$totalIssues += $issues
|
||||
Write-Host ""
|
||||
}
|
||||
@@ -165,19 +160,19 @@ if (Test-Path $skillsVersionFile) {
|
||||
$globalVersion = $matches[1].Trim()
|
||||
Write-Host "Global skills version: v$globalVersion"
|
||||
Write-Host ""
|
||||
|
||||
|
||||
# Check for version mismatches
|
||||
Write-Host "Version Consistency Check:"
|
||||
Write-Host "------------------------"
|
||||
$versionMismatches = 0
|
||||
|
||||
|
||||
foreach ($summary in $skillSummary) {
|
||||
if ($summary.Version -ne "unknown" -and $summary.Version -ne "no_file" -and $summary.Version -ne $globalVersion) {
|
||||
Write-Host " MISMATCH: $($summary.Name) is v$($summary.Version), global is v$globalVersion" -ForegroundColor $Colors.Yellow
|
||||
$versionMismatches++
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ($versionMismatches -eq 0) {
|
||||
Write-Host " All skills match global version" -ForegroundColor $Colors.Green
|
||||
}
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
# Part of LCBP3-DMS Phase 2 improvements
|
||||
|
||||
param(
|
||||
[string]$BaseDir = (Split-Path -Parent (Split-Path -Parent $PSScriptRoot)),
|
||||
[string]$ExpectedVersion = "1.8.6"
|
||||
[string]$BaseDir = (Split-Path -Parent (Split-Path -Parent (Split-Path -Parent $PSScriptRoot))),
|
||||
[string]$ExpectedVersion = "1.8.9"
|
||||
)
|
||||
|
||||
# Colors for output
|
||||
# Map to ConsoleColor enum (Write-Host expects enum, not ANSI)
|
||||
$Colors = @{
|
||||
Red = "`e[0;31m"
|
||||
Green = "`e[0;32m"
|
||||
Yellow = "`e[1;33m"
|
||||
NoColor = "`e[0m"
|
||||
Red = 'Red'
|
||||
Green = 'Green'
|
||||
Yellow = 'Yellow'
|
||||
NoColor = 'Gray'
|
||||
}
|
||||
|
||||
$AgentsDir = Join-Path $BaseDir ".agents"
|
||||
@@ -27,7 +27,7 @@ function Get-VersionFromFile {
|
||||
[string]$FilePath,
|
||||
[string]$Pattern
|
||||
)
|
||||
|
||||
|
||||
if (Test-Path $FilePath) {
|
||||
try {
|
||||
$content = Get-Content $FilePath -Raw
|
||||
@@ -46,9 +46,7 @@ function Get-VersionFromFile {
|
||||
|
||||
# Files to check
|
||||
$FilesToCheck = @{
|
||||
(Join-Path $AgentsDir "README.md") = "Version: ([0-9]+\.[0-9]+\.[0-9]+)"
|
||||
(Join-Path $AgentsDir "skills\VERSION") = "version: ([0-9]+\.[0-9]+\.[0-9]+)"
|
||||
(Join-Path $AgentsDir "rules\00-project-context.md") = "Version: ([0-9]+\.[0-9]+\.[0-9]+)"
|
||||
(Join-Path $AgentsDir "skills\skills.md") = "V([0-9]+\.[0-9]+\.[0-9]+)"
|
||||
}
|
||||
|
||||
@@ -61,9 +59,9 @@ Write-Host ""
|
||||
foreach ($file in $FilesToCheck.Keys) {
|
||||
$pattern = $FilesToCheck[$file]
|
||||
$relativePath = $file.Replace($BaseDir + "\", "")
|
||||
|
||||
|
||||
$version = Get-VersionFromFile -FilePath $file -Pattern $pattern
|
||||
|
||||
|
||||
if ($version -eq "NOT_FOUND" -or $version -eq "FILE_NOT_FOUND") {
|
||||
Write-Host " ERROR: $relativePath - Version not found" -ForegroundColor $Colors.Red
|
||||
$Issues++
|
||||
|
||||
@@ -0,0 +1,109 @@
|
||||
# `.agents/skills/` — LCBP3 Agent Skill Pack
|
||||
|
||||
**Version:** 1.8.9 | **Last Updated:** 2026-04-22 | **Total Skills:** 20
|
||||
|
||||
Agent skills for AI-assisted development in **Windsurf IDE** (and compatible agents: Codex CLI, opencode, Amp, Antigravity, AGENTS.md-aware tools).
|
||||
|
||||
---
|
||||
|
||||
## 📂 Layout
|
||||
|
||||
```
|
||||
.agents/skills/
|
||||
├── VERSION # Single source of truth for skill-pack version
|
||||
├── skills.md # Overview + dependency matrix + health monitoring
|
||||
├── _LCBP3-CONTEXT.md # Shared LCBP3 context injected into every speckit-* skill
|
||||
├── README.md # (this file)
|
||||
├── nestjs-best-practices/ # Backend rules (40 rules across 10 categories)
|
||||
├── next-best-practices/ # Frontend rules (Next.js 15+)
|
||||
└── speckit-*/ # 18 workflow skills (spec → plan → tasks → implement → …)
|
||||
```
|
||||
|
||||
Each skill directory contains:
|
||||
|
||||
- `SKILL.md` — frontmatter (`name`, `description`, `version: 1.8.9`, `scope`, `depends-on`, `handoffs`) + instructions
|
||||
- `templates/` _(optional)_ — artifact templates (spec/plan/tasks/checklist)
|
||||
- `rules/` _(nestjs only)_ — individual rule files grouped by prefix (`arch-`, `security-`, `db-`, etc.)
|
||||
|
||||
---
|
||||
|
||||
## 🚀 How Windsurf Invokes These Skills
|
||||
|
||||
Windsurf exposes two entry points:
|
||||
|
||||
1. **Skill tool** — Windsurf discovers skills by scanning `.agents/skills/*/SKILL.md` frontmatter. Skills marked `user-invocable: false` are used silently by Cascade.
|
||||
2. **Slash commands** — `.windsurf/workflows/*.md` wraps each skill as a slash command (e.g. `/04-speckit.plan`). The workflow file is short; the heavy lifting is delegated to the skill via `skill` tool.
|
||||
|
||||
Both paths end up executing the same `SKILL.md` instructions.
|
||||
|
||||
---
|
||||
|
||||
## 🧭 Typical Flow
|
||||
|
||||
```
|
||||
/01-speckit.constitution → AGENTS.md / product vision
|
||||
/02-speckit.specify → specs/feat-XXX/spec.md
|
||||
/03-speckit.clarify → updates spec.md (up to 5 targeted questions)
|
||||
/04-speckit.plan → specs/feat-XXX/plan.md + data-model.md + contracts/
|
||||
/05-speckit.tasks → specs/feat-XXX/tasks.md
|
||||
/06-speckit.analyze → cross-artifact consistency report (read-only)
|
||||
/07-speckit.implement → executes tasks with Ironclad Protocols (Blast Radius + Strangler + TDD)
|
||||
/08-speckit.checker → pnpm lint / typecheck / markdown-lint
|
||||
/09-speckit.tester → pnpm test + coverage gates (Backend 70%+, Business Logic 80%+)
|
||||
/10-speckit.reviewer → code review with Tier 1/2/3 classification
|
||||
/11-speckit.validate → UAT / acceptance-criteria.md
|
||||
```
|
||||
|
||||
Use `/00-speckit.all` to run specify → clarify → plan → tasks → analyze in one go.
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Helper Scripts
|
||||
|
||||
From repo root:
|
||||
|
||||
| Script | Purpose |
|
||||
| --- | --- |
|
||||
| `./.agents/scripts/bash/check-prerequisites.sh --json` | Emit `FEATURE_DIR` + `AVAILABLE_DOCS` for a feature branch |
|
||||
| `./.agents/scripts/bash/setup-plan.sh --json` | Emit `FEATURE_SPEC`, `IMPL_PLAN`, `SPECS_DIR`, `BRANCH` |
|
||||
| `./.agents/scripts/bash/update-agent-context.sh windsurf` | Append tech entries to `AGENTS.md` |
|
||||
| `./.agents/scripts/bash/audit-skills.sh` | Validate all `SKILL.md` frontmatter + presence |
|
||||
| `./.agents/scripts/bash/validate-versions.sh` | Version consistency check |
|
||||
| `./.agents/scripts/bash/sync-workflows.sh` | Verify every skill has a `.windsurf/workflows/*.md` wrapper |
|
||||
|
||||
All scripts mirror to `.agents/scripts/powershell/*.ps1` for Windows.
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Tier 1 Non-Negotiables (auto-enforced)
|
||||
|
||||
- ADR-019 — `publicId` exposed directly; no `parseInt` / `Number` / `+` on UUID; no `id ?? ''` fallback
|
||||
- ADR-009 — edit SQL schema directly, no TypeORM migrations
|
||||
- ADR-016 — JWT + CASL on every mutation; `Idempotency-Key` required; ClamAV two-phase upload
|
||||
- ADR-018 — AI via DMS API only (Ollama on Admin Desktop; no direct DB/storage)
|
||||
- ADR-007 — layered error classification (Validation / Business / System)
|
||||
- Zero `any`, zero `console.log` (use `Logger`)
|
||||
|
||||
See [`_LCBP3-CONTEXT.md`](./_LCBP3-CONTEXT.md) for the complete list.
|
||||
|
||||
---
|
||||
|
||||
## 🤝 Extending
|
||||
|
||||
To add a new skill:
|
||||
|
||||
1. Create `NAME/SKILL.md` with frontmatter: `name`, `description`, `version: 1.8.9`, `scope`, `depends-on`.
|
||||
2. Append an LCBP3 context reference pointing to `_LCBP3-CONTEXT.md`.
|
||||
3. Wrap with `.windsurf/workflows/NAME.md` so it becomes a slash command.
|
||||
4. Update [`skills.md`](./skills.md) dependency matrix.
|
||||
5. Run `./.agents/scripts/bash/audit-skills.sh` → must pass.
|
||||
|
||||
---
|
||||
|
||||
## 📚 References
|
||||
|
||||
- **Canonical rules:** `AGENTS.md` (repo root)
|
||||
- **Product vision:** `specs/00-Overview/00-03-product-vision.md`
|
||||
- **ADRs:** `specs/06-Decision-Records/`
|
||||
- **Engineering guidelines:** `specs/05-Engineering-Guidelines/`
|
||||
- **Contributing:** `CONTRIBUTING.md`
|
||||
+11
-2
@@ -1,10 +1,19 @@
|
||||
# Speckit Skills Version
|
||||
|
||||
version: 1.8.6
|
||||
release_date: 2026-04-14
|
||||
version: 1.8.9
|
||||
release_date: 2026-04-22
|
||||
|
||||
## Changelog
|
||||
|
||||
### 1.8.9 (2026-04-22)
|
||||
- Full LCBP3-native rebuild of `.agents/skills/`
|
||||
- Fixed ADR-019 drift (removed `@Expose({ name: 'id' })` and `id ?? ''` fallback patterns)
|
||||
- Replaced all dead references (`GEMINI.md` → `AGENTS.md`, v1.7.0 → v1.8.0 schema, `.specify/memory/` → `AGENTS.md`)
|
||||
- Added real helper scripts under `.agents/scripts/bash/` and `.agents/scripts/powershell/`
|
||||
- Added ADR-007/008/020/021 coverage
|
||||
- New rules: workflow-engine, file-two-phase-upload, ai-boundary, i18n, file-upload, workflow-banner
|
||||
- Standardized frontmatter across all 20 skills (`version: 1.8.9`)
|
||||
|
||||
### 1.8.6 (2026-04-14)
|
||||
- Version alignment with LCBP3-DMS v1.8.6
|
||||
- Complete skill implementations for all 20 skills
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
# 🧭 LCBP3-DMS Context Appendix (Shared)
|
||||
|
||||
> This file is included/referenced by every Speckit skill as the authoritative project context.
|
||||
> Skills **must** load it (or the files it links to) before generating any artifact.
|
||||
|
||||
**Project:** NAP-DMS (LCBP3) — Laem Chabang Port Phase 3 Document Management System
|
||||
**Stack:** NestJS 11 + Next.js 16 + TypeScript + MariaDB 11.8 + Redis + BullMQ + Elasticsearch + Ollama (on-prem AI)
|
||||
**Version:** 1.8.9 (2026-04-18)
|
||||
|
||||
---
|
||||
|
||||
## 📌 Canonical Rule Sources (read in this order)
|
||||
|
||||
1. **`AGENTS.md`** (repo root) — primary rule file for AI agents; supersedes legacy `GEMINI.md`.
|
||||
2. **`specs/06-Decision-Records/`** — architectural decisions (22 ADRs); ADR priority > Engineering Guidelines.
|
||||
3. **`specs/05-Engineering-Guidelines/`** — backend/frontend/testing/i18n/git patterns.
|
||||
4. **`specs/00-Overview/00-02-glossary.md`** — domain terminology (Correspondence / RFA / Transmittal / Circulation).
|
||||
5. **`specs/00-Overview/00-03-product-vision.md`** — project constitution (Vision, Strategic Pillars, Guardrails).
|
||||
6. **`CONTRIBUTING.md`** — spec writing standards, PR template, review levels.
|
||||
7. **`README.md`** — technology stack + getting started.
|
||||
|
||||
---
|
||||
|
||||
## 🔴 Tier 1 Non-Negotiables
|
||||
|
||||
- **ADR-019 UUID:** `publicId: string` exposed directly — **no** `@Expose({ name: 'id' })` rename; **no** `parseInt`/`Number`/`+` on UUID; **no** `id ?? ''` fallback in frontend.
|
||||
- **ADR-009:** No TypeORM migrations — edit `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql` or add a `deltas/*.sql` file.
|
||||
- **ADR-016 Security:** JWT + CASL 4-Level RBAC; `@UseGuards(JwtAuthGuard, CaslAbilityGuard)` on every mutation controller; `ThrottlerGuard` on auth; bcrypt 12 rounds; `Idempotency-Key` required on POST/PUT/PATCH.
|
||||
- **ADR-002 Document Numbering:** Redis Redlock + TypeORM `@VersionColumn` (double-lock). Never use application-side counter alone.
|
||||
- **ADR-008 Notifications:** BullMQ queue — never inline email/notification in a request thread.
|
||||
- **ADR-018 AI Boundary:** Ollama on Admin Desktop only; AI → DMS API → DB (never direct DB/storage). Human-in-the-loop validation required.
|
||||
- **ADR-007 Error Handling:** Layered (Validation / Business / System); `BusinessException` hierarchy; user-friendly `userMessage` + `recoveryAction`; technical stack only in logs.
|
||||
- **TypeScript Strict:** Zero `any`, zero `console.log` (use NestJS `Logger`).
|
||||
- **i18n:** No hardcoded Thai/English strings in components — use i18n keys (see `05-08-i18n-guidelines.md`).
|
||||
- **File Upload:** Two-phase (Temp → ClamAV → Permanent), whitelist `PDF/DWG/DOCX/XLSX/ZIP`, max 50MB, `StorageService` only.
|
||||
|
||||
---
|
||||
|
||||
## 🏷️ Domain Glossary (reject generic terms)
|
||||
|
||||
| ✅ Use | ❌ Don't Use |
|
||||
| --- | --- |
|
||||
| Correspondence | Letter, Communication, Document |
|
||||
| RFA | Approval Request, Submit for Approval |
|
||||
| Transmittal | Delivery Note, Cover Letter |
|
||||
| Circulation | Distribution, Routing |
|
||||
| Shop Drawing | Construction Drawing |
|
||||
| Contract Drawing | Design Drawing, Blueprint |
|
||||
| Workflow Engine | Approval Flow, Process Engine |
|
||||
| Document Numbering | Document ID, Auto Number |
|
||||
|
||||
---
|
||||
|
||||
## 📁 Key Files for Generating / Validating Artifacts
|
||||
|
||||
| When you need... | Read |
|
||||
| --- | --- |
|
||||
| A new feature spec | `.agents/skills/speckit-specify/templates/spec-template.md` + `specs/01-Requirements/01-06-edge-cases-and-rules.md` |
|
||||
| A plan | `.agents/skills/speckit-plan/templates/plan-template.md` + relevant ADRs |
|
||||
| Task breakdown | `.agents/skills/speckit-tasks/templates/tasks-template.md` + existing patterns in `specs/08-Tasks/` |
|
||||
| Acceptance criteria / UAT | `specs/01-Requirements/01-05-acceptance-criteria.md` |
|
||||
| Schema / table definition | `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql` + `03-01-data-dictionary.md` |
|
||||
| RBAC / permissions | `specs/03-Data-and-Storage/lcbp3-v1.8.0-seed-permissions.sql` + `01-02-01-rbac-matrix.md` |
|
||||
| Release / hotfix | `specs/04-Infrastructure-OPS/04-08-release-management-policy.md` |
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Helper Scripts (real paths in this repo)
|
||||
|
||||
- `./.agents/scripts/bash/check-prerequisites.sh` / `powershell/*.ps1`
|
||||
- `./.agents/scripts/bash/setup-plan.sh`
|
||||
- `./.agents/scripts/bash/update-agent-context.sh windsurf`
|
||||
- `./.agents/scripts/bash/audit-skills.sh`
|
||||
- `./.agents/scripts/bash/validate-versions.sh`
|
||||
- `./.agents/scripts/bash/sync-workflows.sh`
|
||||
|
||||
---
|
||||
|
||||
## ✅ Commit Checklist (applied automatically by speckit-implement)
|
||||
|
||||
- [ ] UUID pattern verified (no `parseInt` / `Number` / `+` on UUID, no `id ?? ''` fallback)
|
||||
- [ ] No `any`, no `console.log` in committed code
|
||||
- [ ] Business comments in Thai, code identifiers in English
|
||||
- [ ] Schema changes via SQL directly (not migration)
|
||||
- [ ] Test coverage meets targets (Backend 70%+, Business Logic 80%+)
|
||||
- [ ] Relevant ADRs referenced (007/008/009/016/018/019/020/021)
|
||||
- [ ] Domain glossary terms used correctly
|
||||
- [ ] Error handling: `Logger` + `HttpException` / `BusinessException`
|
||||
- [ ] i18n keys used (no hardcode text)
|
||||
- [ ] Cache invalidation when data mutated
|
||||
- [ ] OWASP Top 10 review passed
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,12 @@
|
||||
---
|
||||
name: nestjs-best-practices
|
||||
description: NestJS best practices and architecture patterns for building production-ready applications. This skill should be used when writing, reviewing, or refactoring NestJS code to ensure proper patterns for modules, dependency injection, security, and performance.
|
||||
description: NestJS best practices and architecture patterns for building production-ready LCBP3-DMS backend code. Enforces ADR-009 (no TypeORM migrations), ADR-019 (hybrid UUID), ADR-016 (security), ADR-007 (error handling), ADR-008 (BullMQ), ADR-001/002 (workflow + numbering), ADR-018/020 (AI boundary), and ADR-021 (workflow context).
|
||||
version: 1.8.9
|
||||
scope: backend
|
||||
user-invocable: false
|
||||
license: MIT
|
||||
metadata:
|
||||
author: Kadajett
|
||||
version: '1.1.0'
|
||||
upstream: 'Kadajett/nestjs-best-practices v1.1.0 (forked + LCBP3-aligned)'
|
||||
---
|
||||
|
||||
# NestJS Best Practices
|
||||
@@ -110,6 +112,13 @@ Reference these guidelines when:
|
||||
- `devops-use-logging` - Structured logging
|
||||
- `devops-graceful-shutdown` - Zero-downtime deployments
|
||||
|
||||
### 11. LCBP3-Specific (CRITICAL — Project Overrides)
|
||||
|
||||
- `db-no-typeorm-migrations` — **CRITICAL** ADR-009: edit SQL directly
|
||||
- `lcbp3-workflow-engine` — **CRITICAL** ADR-001/002/021: DSL state machine + double-lock numbering + workflow context
|
||||
- `security-file-two-phase-upload` — **CRITICAL** ADR-016: Upload → Temp → ClamAV → Commit
|
||||
- `lcbp3-ai-boundary` — **CRITICAL** ADR-018/020: Ollama on-prem only, human-in-the-loop
|
||||
|
||||
## NAP-DMS Project-Specific Rules (MUST FOLLOW)
|
||||
|
||||
These rules override general NestJS best practices for the NAP-DMS project:
|
||||
@@ -120,21 +129,62 @@ These rules override general NestJS best practices for the NAP-DMS project:
|
||||
- แก้ไข schema โดยตรงที่: `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql`
|
||||
- ใช้ n8n workflow สำหรับ data migration ถ้าจำเป็น
|
||||
|
||||
### ADR-019: Hybrid Identifier Strategy (CRITICAL)
|
||||
### ADR-019: Hybrid Identifier Strategy (CRITICAL — March 2026 Pattern)
|
||||
|
||||
> **Updated pattern:** `UuidBaseEntity` exposes `publicId` **directly**. ห้ามใช้ `@Expose({ name: 'id' })` — API จะคืน `publicId` เป็น field name ตรงๆ.
|
||||
|
||||
```typescript
|
||||
// ✅ CORRECT — ใช้ UuidBaseEntity
|
||||
@Entity()
|
||||
export class Project {
|
||||
@PrimaryGeneratedColumn()
|
||||
@Exclude() // ห้ามส่งออกทาง API
|
||||
id: number; // INT AUTO_INCREMENT - internal only
|
||||
export class Project extends UuidBaseEntity {
|
||||
// publicId (string UUIDv7) + id (INT, @Exclude) สืบทอดจาก UuidBaseEntity
|
||||
// API response → { publicId: "019505a1-7c3e-7000-8000-abc123..." }
|
||||
|
||||
@Column({ type: 'uuid' })
|
||||
@Expose({ name: 'id' }) // ส่งออกเป็น 'id' ทาง API
|
||||
publicId: string; // UUIDv7 - public API identifier
|
||||
@Column()
|
||||
projectCode: string;
|
||||
|
||||
@Column()
|
||||
projectName: string;
|
||||
}
|
||||
```
|
||||
|
||||
```typescript
|
||||
// ❌ WRONG — pattern เก่า ห้ามใช้
|
||||
@Entity()
|
||||
export class OldProject {
|
||||
@PrimaryGeneratedColumn()
|
||||
@Exclude()
|
||||
id: number;
|
||||
|
||||
@Column({ type: 'uuid' })
|
||||
@Expose({ name: 'id' }) // ❌ อย่า rename publicId เป็น 'id'
|
||||
publicId: string;
|
||||
}
|
||||
```
|
||||
|
||||
**DTO Input (รับ UUID จาก Frontend):**
|
||||
|
||||
```typescript
|
||||
export class CreateContractDto {
|
||||
@IsUUID('7')
|
||||
projectUuid: string; // รับ UUID string จาก client
|
||||
}
|
||||
|
||||
// Controller resolves UUID → INT internally
|
||||
@Post()
|
||||
async create(@Body() dto: CreateContractDto) {
|
||||
const projectId = await this.projectService.resolveInternalId(dto.projectUuid);
|
||||
return this.contractService.create({ ...dto, projectId });
|
||||
}
|
||||
```
|
||||
|
||||
**ห้ามเด็ดขาด (CI Blocker):**
|
||||
|
||||
- ❌ `parseInt(projectPublicId)` — "019505…" → 19 (silently wrong)
|
||||
- ❌ `Number(publicId)` / `+publicId` — NaN
|
||||
- ❌ `@Expose({ name: 'id' })` บน `publicId` (pattern เก่า)
|
||||
- ❌ Expose INT `id` ใน API response (ต้อง `@Exclude()` เสมอ)
|
||||
|
||||
### Two-Phase File Upload
|
||||
|
||||
```typescript
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"version": "1.8.9",
|
||||
"organization": "**NAP-DMS / LCBP3** — Laem Chabang Port Phase 3 Document Management System",
|
||||
"date": "2026-04-22",
|
||||
"abstract": "Comprehensive NestJS best-practices guide compiled for the LCBP3-DMS backend. Contains 40+ rules across 11 categories (10 general + 1 project-specific), prioritized by impact. Forked from Kadajett/nestjs-best-practices (v1.1.0) and aligned to LCBP3 ADRs: ADR-001 (workflow engine), ADR-002 (document numbering), ADR-007 (error handling), ADR-008 (notifications/BullMQ), ADR-009 (no TypeORM migrations), ADR-016 (security), ADR-018/020 (AI boundary), ADR-019 (hybrid UUID identifier — March 2026 pattern), and ADR-021 (workflow context).\n\nThis document is the single, consolidated reference used by Cascade and other AI coding agents when writing, reviewing, or refactoring backend code in this repository. All LCBP3-specific overrides live in section 11.",
|
||||
"references": [
|
||||
"[AGENTS.md (root)](../../../AGENTS.md) — canonical AI agent rules",
|
||||
"[CONTRIBUTING.md](../../../CONTRIBUTING.md) — spec authoring + PR process",
|
||||
"[ADR-001 Unified Workflow Engine](../../../specs/06-Decision-Records/ADR-001-unified-workflow-engine.md)",
|
||||
"[ADR-002 Document Numbering Strategy](../../../specs/06-Decision-Records/ADR-002-document-numbering-strategy.md)",
|
||||
"[ADR-007 Error Handling Strategy](../../../specs/06-Decision-Records/ADR-007-error-handling-strategy.md)",
|
||||
"[ADR-008 Email/Notification Strategy](../../../specs/06-Decision-Records/ADR-008-email-notification-strategy.md)",
|
||||
"[ADR-009 Database Migration Strategy](../../../specs/06-Decision-Records/ADR-009-database-migration-strategy.md)",
|
||||
"[ADR-016 Security & Authentication](../../../specs/06-Decision-Records/ADR-016-security-authentication.md)",
|
||||
"[ADR-018 AI Boundary](../../../specs/06-Decision-Records/ADR-018-ai-boundary.md)",
|
||||
"[ADR-019 Hybrid Identifier Strategy](../../../specs/06-Decision-Records/ADR-019-hybrid-identifier-strategy.md)",
|
||||
"[ADR-020 AI Intelligence Integration](../../../specs/06-Decision-Records/ADR-020-ai-intelligence-integration.md)",
|
||||
"[ADR-021 Workflow Context](../../../specs/06-Decision-Records/ADR-021-workflow-context.md)",
|
||||
"[Backend Engineering Guidelines](../../../specs/05-Engineering-Guidelines/05-02-backend-guidelines.md)",
|
||||
"[Schema — v1.8.0 Tables](../../../specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql)",
|
||||
"[Data Dictionary](../../../specs/03-Data-and-Storage/03-01-data-dictionary.md)",
|
||||
"Upstream: [Kadajett/nestjs-best-practices](https://github.com/Kadajett/nestjs-best-practices) v1.1.0"
|
||||
]
|
||||
}
|
||||
@@ -5,20 +5,22 @@ impactDescription: Use INT PK internally + UUID for public API per project ADR-0
|
||||
tags: database, uuid, identifier, adr-019, api-design, typeorm
|
||||
---
|
||||
|
||||
## Hybrid Identifier Strategy (ADR-019)
|
||||
## Hybrid Identifier Strategy (ADR-019) — March 2026 Pattern
|
||||
|
||||
**This project follows ADR-019: INT Primary Key (internal) + UUIDv7 (public API)**
|
||||
|
||||
Unlike standard practices that use UUID as the primary key, this project uses a **hybrid approach** optimized for MariaDB performance and API consistency.
|
||||
|
||||
> **Updated pattern (March 2026):** Entities extend `UuidBaseEntity`. The `publicId` column is exposed **directly** in API responses — ห้ามใช้ `@Expose({ name: 'id' })` เพื่อ rename.
|
||||
|
||||
### The Strategy
|
||||
|
||||
| Layer | Field | Type | Usage |
|
||||
|-------|-------|------|-------|
|
||||
| **Database PK** | `id` | `INT AUTO_INCREMENT` | Internal foreign keys only |
|
||||
| **Public API** | `uuid` | `MariaDB UUID` (native) | External references, URLs |
|
||||
| **DTO Input** | `xxxUuid` | `string` | Accept UUID in create/update |
|
||||
| **DTO Output** | `id` | `string` | API returns UUID as `id` via `@Expose` |
|
||||
| Layer | Field | Type | Usage |
|
||||
| --------------- | ---------- | ----------------------------------- | ------------------------------------------------- |
|
||||
| **Database PK** | `id` | `INT AUTO_INCREMENT` | Internal foreign keys only (marked `@Exclude()`) |
|
||||
| **Public API** | `publicId` | `MariaDB UUID` (native, BINARY(16)) | External references, URLs — exposed as-is |
|
||||
| **DTO Input** | `xxxUuid` | `string` (UUIDv7) | Accept UUID in create/update DTOs |
|
||||
| **DTO Output** | `publicId` | `string` (UUIDv7) | API returns `publicId` field directly (no rename) |
|
||||
|
||||
### Why Hybrid IDs?
|
||||
|
||||
@@ -27,31 +29,51 @@ Unlike standard practices that use UUID as the primary key, this project uses a
|
||||
- **Compatibility**: UUID works well with distributed systems and external integrations
|
||||
- **MariaDB Native**: Uses MariaDB's native UUID type (stored as BINARY(16), auto-converts to string)
|
||||
|
||||
### Entity Definition
|
||||
### Entity Definition (Current Pattern)
|
||||
|
||||
```typescript
|
||||
import { Entity, PrimaryGeneratedColumn, Column, Index } from 'typeorm';
|
||||
import { Exclude, Expose } from 'class-transformer';
|
||||
import { Entity, Column } from 'typeorm';
|
||||
import { UuidBaseEntity } from '@/common/entities/uuid-base.entity';
|
||||
|
||||
@Entity('contracts')
|
||||
export class Contract {
|
||||
@PrimaryGeneratedColumn()
|
||||
@Exclude() // Never expose in API response
|
||||
id: number; // Internal INT PK - used for FK relationships
|
||||
|
||||
@Column({ type: 'uuid', unique: true })
|
||||
@Expose({ name: 'id' }) // Exposed as 'id' in API
|
||||
uuid: string; // Public UUIDv7 - what API consumers see
|
||||
export class Contract extends UuidBaseEntity {
|
||||
// publicId (string UUIDv7) + id (INT, @Exclude) สืบทอดจาก UuidBaseEntity
|
||||
// API response → { publicId: "019505a1-7c3e-7000-8000-abc123...", contractCode: ..., ... }
|
||||
|
||||
@Column()
|
||||
contractCode: string;
|
||||
|
||||
@Column()
|
||||
contractName: string;
|
||||
|
||||
@Column({ name: 'project_id' })
|
||||
projectId: number; // INT FK — internal, not exposed if marked @Exclude in UuidBaseEntity
|
||||
}
|
||||
```
|
||||
|
||||
### DTO Pattern (Accept UUID, Resolve to INT)
|
||||
**`UuidBaseEntity` (shared base):**
|
||||
|
||||
```typescript
|
||||
import { PrimaryGeneratedColumn, Column, CreateDateColumn, UpdateDateColumn } from 'typeorm';
|
||||
import { Exclude } from 'class-transformer';
|
||||
|
||||
export abstract class UuidBaseEntity {
|
||||
@PrimaryGeneratedColumn()
|
||||
@Exclude() // ❗ CRITICAL: INT id must never leak to API
|
||||
id: number;
|
||||
|
||||
@Column({ type: 'uuid', unique: true, generated: 'uuid' })
|
||||
publicId: string; // UUIDv7, exposed as-is
|
||||
|
||||
@CreateDateColumn()
|
||||
createdAt: Date;
|
||||
|
||||
@UpdateDateColumn()
|
||||
updatedAt: Date;
|
||||
}
|
||||
```
|
||||
|
||||
### DTO Pattern (Accept UUID, Resolve to INT Internally)
|
||||
|
||||
```typescript
|
||||
// dto/create-contract.dto.ts
|
||||
@@ -59,8 +81,8 @@ import { IsUUID, IsNotEmpty } from 'class-validator';
|
||||
|
||||
export class CreateContractDto {
|
||||
@IsNotEmpty()
|
||||
@IsUUID('4')
|
||||
projectUuid: string; // Accept UUID from client
|
||||
@IsUUID('7') // UUIDv7 (MariaDB native)
|
||||
projectUuid: string; // Accept UUID from client
|
||||
|
||||
@IsNotEmpty()
|
||||
contractCode: string;
|
||||
@@ -69,48 +91,38 @@ export class CreateContractDto {
|
||||
contractName: string;
|
||||
}
|
||||
|
||||
// dto/contract-response.dto.ts
|
||||
import { Exclude, Expose } from 'class-transformer';
|
||||
|
||||
export class ContractResponseDto {
|
||||
@Expose({ name: 'id' })
|
||||
uuid: string; // Returned as 'id' field in JSON
|
||||
|
||||
contractCode: string;
|
||||
contractName: string;
|
||||
}
|
||||
// ❌ NO Response DTO with @Expose rename needed.
|
||||
// Entity class_transformer via TransformInterceptor will serialize publicId directly.
|
||||
```
|
||||
|
||||
### Service/Controller Pattern
|
||||
|
||||
```typescript
|
||||
@Controller('contracts')
|
||||
@UseGuards(JwtAuthGuard, CaslAbilityGuard)
|
||||
export class ContractsController {
|
||||
constructor(
|
||||
private contractsService: ContractsService,
|
||||
private uuidResolver: UuidResolver, // Helper to convert UUID → INT
|
||||
private uuidResolver: UuidResolver
|
||||
) {}
|
||||
|
||||
@Post()
|
||||
async create(@Body() dto: CreateContractDto) {
|
||||
// Resolve UUID to INT PK for database operations
|
||||
// Resolve UUID → INT PK for FK relationship
|
||||
const projectId = await this.uuidResolver.resolveProject(dto.projectUuid);
|
||||
|
||||
// Create with INT FK
|
||||
|
||||
const contract = await this.contractsService.create({
|
||||
...dto,
|
||||
projectId, // INT for database
|
||||
projectId,
|
||||
});
|
||||
|
||||
// Response automatically transforms via @Expose
|
||||
// Response: TransformInterceptor + @Exclude on id → publicId exposed directly
|
||||
return contract;
|
||||
}
|
||||
|
||||
@Get(':id')
|
||||
async findOne(@Param('id') uuid: string) {
|
||||
// Controller receives UUID string
|
||||
// Service handles UUID → INT resolution internally
|
||||
return this.contractsService.findByUuid(uuid);
|
||||
@Get(':publicId')
|
||||
async findOne(@Param('publicId', ParseUuidPipe) publicId: string) {
|
||||
return this.contractsService.findOneByPublicId(publicId);
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -124,21 +136,21 @@ export class UuidResolver {
|
||||
@InjectRepository(Project)
|
||||
private projectRepo: Repository<Project>,
|
||||
@InjectRepository(Contract)
|
||||
private contractRepo: Repository<Contract>,
|
||||
private contractRepo: Repository<Contract>
|
||||
) {}
|
||||
|
||||
async resolveProject(uuid: string): Promise<number> {
|
||||
async resolveProject(publicId: string): Promise<number> {
|
||||
const project = await this.projectRepo.findOne({
|
||||
where: { uuid },
|
||||
select: ['id'], // Only fetch INT PK
|
||||
where: { publicId },
|
||||
select: ['id'], // Only INT PK for FK
|
||||
});
|
||||
if (!project) throw new NotFoundException('Project not found');
|
||||
return project.id;
|
||||
}
|
||||
|
||||
async resolveContract(uuid: string): Promise<number> {
|
||||
async resolveContract(publicId: string): Promise<number> {
|
||||
const contract = await this.contractRepo.findOne({
|
||||
where: { uuid },
|
||||
where: { publicId },
|
||||
select: ['id'],
|
||||
});
|
||||
if (!contract) throw new NotFoundException('Contract not found');
|
||||
@@ -147,20 +159,20 @@ export class UuidResolver {
|
||||
}
|
||||
```
|
||||
|
||||
### TransformInterceptor (Required)
|
||||
### TransformInterceptor (Required — register ONCE)
|
||||
|
||||
```typescript
|
||||
// Must be configured globally to handle @Exclude/@Expose
|
||||
// Register via APP_INTERCEPTOR in CommonModule — ห้ามซ้ำใน main.ts
|
||||
@Injectable()
|
||||
export class TransformInterceptor implements NestInterceptor {
|
||||
intercept(context: ExecutionContext, next: CallHandler): Observable<any> {
|
||||
return next.handle().pipe(
|
||||
map((data) => instanceToPlain(data)), // Applies class-transformer decorators
|
||||
map((data) => instanceToPlain(data)) // Applies @Exclude / @Expose
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// app.module.ts
|
||||
// common.module.ts
|
||||
@Module({
|
||||
providers: [
|
||||
{
|
||||
@@ -169,40 +181,42 @@ export class TransformInterceptor implements NestInterceptor {
|
||||
},
|
||||
],
|
||||
})
|
||||
export class AppModule {}
|
||||
export class CommonModule {}
|
||||
```
|
||||
|
||||
> **Warning:** ห้ามเรียก `app.useGlobalInterceptors(new TransformInterceptor())` ใน `main.ts` ซ้ำ — จะทำให้ response double-wrap `{ data: { data: ... } }`.
|
||||
|
||||
### Critical: NEVER ParseInt on UUID
|
||||
|
||||
```typescript
|
||||
// ❌ WRONG - parseInt on UUID gives garbage value
|
||||
const id = parseInt(projectUuid); // "0195a1b2-..." → 195 (wrong!)
|
||||
const id = parseInt(projectPublicId); // "0195a1b2-..." → 195 (wrong!)
|
||||
|
||||
// ❌ WRONG - Number() on UUID
|
||||
const id = Number(projectUuid); // NaN
|
||||
const id = Number(projectPublicId); // NaN
|
||||
|
||||
// ❌ WRONG - Unary plus on UUID
|
||||
const id = +projectUuid; // NaN
|
||||
const id = +projectPublicId; // NaN
|
||||
|
||||
// ✅ CORRECT - Resolve via database lookup
|
||||
const projectId = await uuidResolver.resolveProject(projectUuid);
|
||||
const projectId = await uuidResolver.resolveProject(projectPublicId);
|
||||
|
||||
// ✅ CORRECT - Use TypeORM find with UUID column
|
||||
const project = await projectRepo.findOne({ where: { uuid: projectUuid } });
|
||||
const id = project.id; // Get INT PK from entity
|
||||
// ✅ CORRECT - Use TypeORM find with publicId column
|
||||
const project = await projectRepo.findOne({ where: { publicId: projectPublicId } });
|
||||
const id = project.id; // Get INT PK from entity
|
||||
```
|
||||
|
||||
### Query with UUID (No Resolution Needed)
|
||||
### Query with publicId (No Resolution Needed)
|
||||
|
||||
```typescript
|
||||
// Direct UUID lookup in TypeORM
|
||||
const project = await this.projectRepo.findOne({
|
||||
where: { uuid: projectUuid }, // Query by UUID column
|
||||
where: { publicId: projectPublicId },
|
||||
});
|
||||
|
||||
// Relations use INT FK internally
|
||||
const contracts = await this.contractRepo.find({
|
||||
where: { projectId: project.id }, // INT for FK query
|
||||
where: { projectId: project.id }, // INT for FK query
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
---
|
||||
title: No TypeORM Migrations (ADR-009)
|
||||
impact: CRITICAL
|
||||
impactDescription: Edit SQL schema files directly; n8n handles data migration. Do not generate TypeORM migration files.
|
||||
tags: database, schema, migration, adr-009, sql, n8n
|
||||
---
|
||||
|
||||
## No TypeORM Migrations (ADR-009)
|
||||
|
||||
**This project does NOT use TypeORM migration files.**
|
||||
|
||||
All schema changes must be made **directly** in the canonical SQL file:
|
||||
|
||||
- `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql`
|
||||
|
||||
Delta scripts (for incremental rollout to existing environments) go under:
|
||||
|
||||
- `specs/03-Data-and-Storage/deltas/YYYY-MM-DD-descriptive-name.sql`
|
||||
|
||||
Data migration (e.g., backfilling a new column) is handled by **n8n workflows**, not TypeORM's `QueryRunner`.
|
||||
|
||||
---
|
||||
|
||||
## Why No Migrations?
|
||||
|
||||
1. **Single source of truth** — The full SQL schema is always readable as one file. No need to replay a migration chain to understand current state.
|
||||
2. **Review friendly** — Schema diff = git diff on the SQL file. Reviewers see the complete picture.
|
||||
3. **Ops alignment** — DBAs and operators work in SQL, not TypeScript.
|
||||
4. **n8n for data** — Business-meaningful data transforms live in n8n where they can be versioned, retried, and orchestrated with monitoring.
|
||||
|
||||
---
|
||||
|
||||
## ✅ Workflow for a Schema Change
|
||||
|
||||
1. **Update Data Dictionary** first:
|
||||
- `specs/03-Data-and-Storage/03-01-data-dictionary.md` — add field meaning + business rules.
|
||||
2. **Update the canonical schema**:
|
||||
- Edit `lcbp3-v1.8.0-schema-02-tables.sql` — add/alter column, constraint, index.
|
||||
3. **Add a delta script** (if deploying to existing env):
|
||||
- `specs/03-Data-and-Storage/deltas/2026-04-22-add-rfa-revision-column.sql`
|
||||
|
||||
```sql
|
||||
-- Delta: Add revision column to rfa table
|
||||
ALTER TABLE rfa
|
||||
ADD COLUMN revision INT NOT NULL DEFAULT 1 AFTER status;
|
||||
|
||||
CREATE INDEX idx_rfa_revision ON rfa(revision);
|
||||
```
|
||||
4. **Update the Entity** (`backend/src/.../entities/rfa.entity.ts`):
|
||||
|
||||
```typescript
|
||||
@Column({ type: 'int', default: 1 })
|
||||
revision: number;
|
||||
```
|
||||
5. **If data backfill needed** → create n8n workflow, not TypeScript migration.
|
||||
|
||||
---
|
||||
|
||||
## ❌ Forbidden
|
||||
|
||||
```bash
|
||||
# ❌ DO NOT generate migrations
|
||||
pnpm typeorm migration:generate ./src/migrations/AddRevision
|
||||
|
||||
# ❌ DO NOT run migrations
|
||||
pnpm typeorm migration:run
|
||||
```
|
||||
|
||||
```typescript
|
||||
// ❌ DO NOT write migration classes
|
||||
export class AddRevision1730000000000 implements MigrationInterface {
|
||||
async up(queryRunner: QueryRunner): Promise<void> { /* ... */ }
|
||||
async down(queryRunner: QueryRunner): Promise<void> { /* ... */ }
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ TypeORM Config (runtime only)
|
||||
|
||||
```typescript
|
||||
// ormconfig.ts
|
||||
export default {
|
||||
type: 'mariadb',
|
||||
// ...
|
||||
synchronize: false, // ❗ NEVER true (would auto-sync entity ↔ schema)
|
||||
migrationsRun: false, // ❗ NEVER true
|
||||
// ❌ Do NOT specify `migrations:` entries
|
||||
};
|
||||
```
|
||||
|
||||
`synchronize: false` is mandatory because the canonical SQL file is authoritative — TypeORM should never mutate the schema.
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
- [ADR-009 Database Migration Strategy](../../../../specs/06-Decision-Records/ADR-009-database-migration-strategy.md)
|
||||
- [Data Dictionary](../../../../specs/03-Data-and-Storage/03-01-data-dictionary.md)
|
||||
- [Schema Tables](../../../../specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql)
|
||||
@@ -0,0 +1,157 @@
|
||||
---
|
||||
title: AI Integration Boundary (ADR-018 / ADR-020)
|
||||
impact: CRITICAL
|
||||
impactDescription: AI runs on Admin Desktop only; AI → DMS API → DB (never direct); human-in-the-loop validation mandatory; full audit trail.
|
||||
tags: ai, ollama, boundary, adr-018, adr-020, privacy, audit
|
||||
---
|
||||
|
||||
## AI Integration Boundary
|
||||
|
||||
LCBP3 uses **on-premises AI only** (Ollama on Admin Desktop) with strict isolation from data layers.
|
||||
|
||||
---
|
||||
|
||||
## The Boundary
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────┐
|
||||
│ User Browser (Next.js) │
|
||||
└─────────────────────────┬──────────────────────────────────┘
|
||||
│ (authenticated HTTPS)
|
||||
┌─────────────────────────▼──────────────────────────────────┐
|
||||
│ DMS API (NestJS) ◀── enforces CASL, validation, audit │
|
||||
│ ├─ AiGateway (proxies to Ollama) │
|
||||
│ └─ DB + Storage (Elasticsearch, MariaDB, File System) │
|
||||
└─────────────────────────┬──────────────────────────────────┘
|
||||
│ (HTTP → Admin Desktop, internal)
|
||||
┌─────────────────────────▼──────────────────────────────────┐
|
||||
│ Admin Desktop (Desk-5439) │
|
||||
│ ├─ Ollama (Gemma 4) │
|
||||
│ ├─ PaddleOCR (Thai + English) │
|
||||
│ └─ n8n orchestration │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**❗ Admin Desktop has NO network access to MariaDB, no SMB to storage, no shared secrets.** It receives base64-encoded file bytes over HTTPS and returns extracted text + suggestions.
|
||||
|
||||
---
|
||||
|
||||
## Required Patterns
|
||||
|
||||
### 1. AiGateway Module (backend)
|
||||
|
||||
```typescript
|
||||
@Module({
|
||||
controllers: [AiController],
|
||||
providers: [AiService, AiGateway, AiAuditLogger],
|
||||
exports: [AiService],
|
||||
})
|
||||
export class AiModule {}
|
||||
|
||||
@Injectable()
|
||||
export class AiService {
|
||||
async extractMetadata(fileId: number, user: User): Promise<ExtractedMetadata> {
|
||||
// 1. Authorize (CASL: user can read this file)
|
||||
await this.ability.ensureCan(user, 'read', File, fileId);
|
||||
|
||||
// 2. Load file (DMS API, inside the boundary)
|
||||
const fileBytes = await this.storageService.read(fileId);
|
||||
|
||||
// 3. Call Admin Desktop AI over HTTP
|
||||
const raw = await this.aiGateway.extract(fileBytes);
|
||||
|
||||
// 4. Validate AI output schema (Zod)
|
||||
const parsed = ExtractedMetadataSchema.parse(raw);
|
||||
|
||||
// 5. Audit log (who, what, when, model, confidence)
|
||||
await this.auditLogger.log({
|
||||
userId: user.id,
|
||||
action: 'ai.extract_metadata',
|
||||
fileId,
|
||||
model: raw.model,
|
||||
confidence: parsed.confidence,
|
||||
});
|
||||
|
||||
// 6. Return — frontend MUST render for human confirmation
|
||||
return parsed;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Human-in-the-Loop
|
||||
|
||||
AI output is **never persisted directly**. Users must confirm via `DocumentReviewForm`:
|
||||
|
||||
```tsx
|
||||
<DocumentReviewForm
|
||||
document={doc}
|
||||
aiSuggestions={suggestions}
|
||||
onConfirm={(reviewed) => saveMetadata(reviewed)} // user edits applied
|
||||
/>
|
||||
```
|
||||
|
||||
The `user_confirmed_at` timestamp and diff (AI suggestion → final value) are stored in the audit log.
|
||||
|
||||
### 3. Rate Limiting
|
||||
|
||||
```typescript
|
||||
@Post('ai/extract')
|
||||
@UseGuards(JwtAuthGuard, CaslAbilityGuard, ThrottlerGuard)
|
||||
@Throttle({ default: { limit: 10, ttl: 60_000 } }) // 10 req/min/user
|
||||
async extract(@Body() dto: ExtractDto) { /* ... */ }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ❌ Forbidden
|
||||
|
||||
```typescript
|
||||
// ❌ AI container connecting to DB
|
||||
// docker-compose.yml inside ai-service:
|
||||
// environment:
|
||||
// DATABASE_URL: mysql://... ← NEVER
|
||||
|
||||
// ❌ AI SDK calling cloud API
|
||||
import OpenAI from 'openai'; // ❌ No cloud AI SDKs in production code
|
||||
const client = new OpenAI({ apiKey: ... });
|
||||
|
||||
// ❌ Persisting AI output without human confirm
|
||||
async extractAndSave(fileId: number) {
|
||||
const metadata = await this.ai.extract(fileId);
|
||||
await this.repo.save({ fileId, ...metadata }); // ❌ skips human review
|
||||
}
|
||||
|
||||
// ❌ Skipping audit log
|
||||
const result = await this.aiGateway.extract(bytes); // no logging
|
||||
return result;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Audit Log Schema
|
||||
|
||||
```sql
|
||||
CREATE TABLE ai_audit_log (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
public_id UUID UNIQUE NOT NULL,
|
||||
user_id INT NOT NULL,
|
||||
action VARCHAR(64) NOT NULL, -- 'ai.extract_metadata', 'ai.classify', etc.
|
||||
file_id INT,
|
||||
model VARCHAR(64), -- 'gemma-4:7b', 'paddleocr-v3'
|
||||
confidence DECIMAL(4,3),
|
||||
input_hash CHAR(64), -- SHA-256 of input for replay detection
|
||||
output_summary JSON,
|
||||
human_confirmed_at DATETIME,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
INDEX idx_user_created (user_id, created_at),
|
||||
INDEX idx_file (file_id)
|
||||
);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
- [ADR-018 AI Boundary](../../../../specs/06-Decision-Records/ADR-018-ai-boundary.md)
|
||||
- [ADR-020 AI Intelligence Integration](../../../../specs/06-Decision-Records/ADR-020-ai-intelligence-integration.md)
|
||||
- [ADR-017 Ollama Data Migration](../../../../specs/06-Decision-Records/ADR-017-ollama-data-migration.md)
|
||||
@@ -0,0 +1,181 @@
|
||||
---
|
||||
title: Workflow Engine + Document Numbering + Workflow Context (ADR-001 / 002 / 021)
|
||||
impact: CRITICAL
|
||||
impactDescription: DSL-based state machine; double-lock numbering; integrated workflow context exposed to clients.
|
||||
tags: workflow, numbering, redlock, version-column, adr-001, adr-002, adr-021
|
||||
---
|
||||
|
||||
## Workflow Engine + Numbering + Context
|
||||
|
||||
LCBP3 uses a **unified workflow engine** (DSL-based state machine) across RFA, Transmittal, Correspondence, Circulation, and Shop Drawing. Every state transition goes through the same engine — no per-type routing tables.
|
||||
|
||||
---
|
||||
|
||||
## ADR-001: Unified Workflow Engine
|
||||
|
||||
### State Transition Pattern
|
||||
|
||||
```typescript
|
||||
@Injectable()
|
||||
export class WorkflowEngine {
|
||||
async transition(
|
||||
instanceId: string,
|
||||
action: WorkflowAction,
|
||||
actor: User,
|
||||
context?: WorkflowContext,
|
||||
): Promise<WorkflowInstance> {
|
||||
// 1. Load current state from DB (never trust client-provided state)
|
||||
const instance = await this.repo.findOneByPublicId(instanceId);
|
||||
if (!instance) throw new NotFoundException();
|
||||
|
||||
// 2. Validate transition against DSL
|
||||
const dsl = await this.dslService.load(instance.workflowTypeId);
|
||||
const nextState = dsl.resolve(instance.currentState, action);
|
||||
if (!nextState) {
|
||||
throw new BusinessException(
|
||||
`Action ${action} not allowed from state ${instance.currentState}`,
|
||||
'ไม่สามารถดำเนินการนี้ได้ในสถานะปัจจุบัน',
|
||||
'กรุณาตรวจสอบขั้นตอนการอนุมัติ',
|
||||
'WF_INVALID_TRANSITION',
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Apply transition atomically (optimistic lock via @VersionColumn)
|
||||
instance.currentState = nextState;
|
||||
await this.repo.save(instance); // throws OptimisticLockVersionMismatchError on race
|
||||
|
||||
// 4. Emit event for listeners (notifications via BullMQ — ADR-008)
|
||||
this.eventBus.publish(new WorkflowTransitionedEvent(instance, action, actor));
|
||||
|
||||
return instance;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Anti-Patterns
|
||||
|
||||
- ❌ Hard-coded `switch (state)` in controllers/services
|
||||
- ❌ Trusting `currentState` from request body
|
||||
- ❌ Creating separate routing tables per document type
|
||||
|
||||
---
|
||||
|
||||
## ADR-002: Document Numbering (Double-Lock)
|
||||
|
||||
Concurrent requests for a new document number **must** use both:
|
||||
|
||||
1. **Redis Redlock** — distributed lock across app instances
|
||||
2. **TypeORM `@VersionColumn`** — optimistic lock on counter row
|
||||
|
||||
### Counter Entity
|
||||
|
||||
```typescript
|
||||
@Entity('document_number_counters')
|
||||
@Unique(['projectId', 'documentTypeId'])
|
||||
export class DocumentNumberCounter extends UuidBaseEntity {
|
||||
@Column({ name: 'project_id' })
|
||||
projectId: number;
|
||||
|
||||
@Column({ name: 'document_type_id' })
|
||||
documentTypeId: number;
|
||||
|
||||
@Column({ name: 'last_number', default: 0 })
|
||||
lastNumber: number;
|
||||
|
||||
@VersionColumn()
|
||||
version: number; // ❗ Optimistic lock — do not rename, do not remove
|
||||
}
|
||||
```
|
||||
|
||||
### Service Pattern
|
||||
|
||||
```typescript
|
||||
@Injectable()
|
||||
export class DocumentNumberingService {
|
||||
constructor(
|
||||
@InjectRepository(DocumentNumberCounter)
|
||||
private counterRepo: Repository<DocumentNumberCounter>,
|
||||
private redlock: RedlockService,
|
||||
private readonly logger: Logger,
|
||||
) {}
|
||||
|
||||
async generateNext(ctx: NumberingContext): Promise<string> {
|
||||
const lockKey = `doc_num:${ctx.projectId}:${ctx.documentTypeId}`;
|
||||
|
||||
// Distributed lock — 3s TTL, up to 5 retries
|
||||
const lock = await this.redlock.acquire([lockKey], 3000);
|
||||
|
||||
try {
|
||||
// Optimistic lock via @VersionColumn
|
||||
const counter = await this.counterRepo.findOne({
|
||||
where: { projectId: ctx.projectId, documentTypeId: ctx.documentTypeId },
|
||||
});
|
||||
|
||||
if (!counter) {
|
||||
throw new NotFoundException('Counter not initialized for this project/type');
|
||||
}
|
||||
|
||||
counter.lastNumber += 1;
|
||||
await this.counterRepo.save(counter); // may throw OptimisticLockVersionMismatchError
|
||||
|
||||
return this.formatNumber(ctx, counter.lastNumber);
|
||||
} catch (err) {
|
||||
if (err instanceof OptimisticLockVersionMismatchError) {
|
||||
this.logger.warn(`Numbering race detected for ${lockKey}, retrying`);
|
||||
// Let caller retry via BullMQ retry policy
|
||||
}
|
||||
throw err;
|
||||
} finally {
|
||||
await lock.release();
|
||||
}
|
||||
}
|
||||
|
||||
private formatNumber(ctx: NumberingContext, seq: number): string {
|
||||
// e.g. "LCBP3-RFA-0042"
|
||||
return `${ctx.projectCode}-${ctx.typeCode}-${String(seq).padStart(4, '0')}`;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Anti-Patterns
|
||||
|
||||
- ❌ App-side counter only (`let counter = 0; counter++`)
|
||||
- ❌ Using `findOne` + `update` without `@VersionColumn`
|
||||
- ❌ Using only Redis lock without DB optimistic lock (race if Redis fails)
|
||||
|
||||
---
|
||||
|
||||
## ADR-021: Integrated Workflow Context
|
||||
|
||||
Every workflow-aware API response **must** expose:
|
||||
|
||||
```typescript
|
||||
export class WorkflowEnvelope<T> {
|
||||
data: T;
|
||||
|
||||
workflow: {
|
||||
instancePublicId: string;
|
||||
currentState: string; // e.g. 'pending_review'
|
||||
availableActions: string[]; // e.g. ['approve', 'reject', 'request-revision']
|
||||
canEdit: boolean; // computed from CASL + current state
|
||||
lastTransitionAt: string; // ISO 8601
|
||||
};
|
||||
|
||||
stepAttachments?: Array<{ // files produced by the current/previous step
|
||||
publicId: string;
|
||||
fileName: string;
|
||||
stepCode: string;
|
||||
downloadUrl: string;
|
||||
}>;
|
||||
}
|
||||
```
|
||||
|
||||
Frontend uses `workflow.availableActions` to render buttons — no client-side state machine logic.
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
- [ADR-001 Unified Workflow Engine](../../../../specs/06-Decision-Records/ADR-001-unified-workflow-engine.md)
|
||||
- [ADR-002 Document Numbering Strategy](../../../../specs/06-Decision-Records/ADR-002-document-numbering-strategy.md)
|
||||
- [ADR-021 Workflow Context](../../../../specs/06-Decision-Records/ADR-021-workflow-context.md)
|
||||
@@ -0,0 +1,137 @@
|
||||
---
|
||||
title: Two-Phase File Upload + ClamAV (ADR-016)
|
||||
impact: CRITICAL
|
||||
impactDescription: Upload → Temp → ClamAV scan → Commit → Permanent. Whitelist + 50MB cap. StorageService only.
|
||||
tags: file-upload, clamav, security, adr-016, storage
|
||||
---
|
||||
|
||||
## Two-Phase File Upload (ADR-016)
|
||||
|
||||
**Never write uploaded files directly to permanent storage.** All uploads must go through:
|
||||
|
||||
```
|
||||
Client → Upload endpoint → Temp storage → ClamAV scan → Commit endpoint → Permanent storage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Constraints (non-negotiable)
|
||||
|
||||
| Rule | Value |
|
||||
| --- | --- |
|
||||
| Allowed MIME types | `application/pdf`, `image/vnd.dwg`, `application/vnd.openxmlformats-officedocument.wordprocessingml.document`, `application/vnd.openxmlformats-officedocument.spreadsheetml.sheet`, `application/zip` |
|
||||
| Allowed extensions | `.pdf`, `.dwg`, `.docx`, `.xlsx`, `.zip` |
|
||||
| Max size | 50 MB |
|
||||
| Temp TTL | 24 h (purged by cron) |
|
||||
| Virus scan | ClamAV (blocking) |
|
||||
| Mover | `StorageService` only — never `fs.rename` directly from controller |
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Upload to Temp
|
||||
|
||||
```typescript
|
||||
@Post('upload')
|
||||
@UseGuards(JwtAuthGuard, ThrottlerGuard)
|
||||
@UseInterceptors(FileInterceptor('file', {
|
||||
limits: { fileSize: 50 * 1024 * 1024 }, // 50 MB
|
||||
}))
|
||||
async uploadTemp(
|
||||
@UploadedFile() file: Express.Multer.File,
|
||||
@CurrentUser() user: User,
|
||||
): Promise<{ tempId: string; expiresAt: string }> {
|
||||
// 1. Validate MIME + extension (defense in depth)
|
||||
this.fileValidator.assertAllowed(file);
|
||||
|
||||
// 2. Scan with ClamAV
|
||||
const scanResult = await this.clamavService.scan(file.buffer);
|
||||
if (!scanResult.clean) {
|
||||
throw new BusinessException(
|
||||
`ClamAV rejected: ${scanResult.signature}`,
|
||||
'ไฟล์ไม่ปลอดภัย ระบบตรวจพบความเสี่ยง',
|
||||
'กรุณาตรวจสอบไฟล์และลองใหม่อีกครั้ง',
|
||||
'FILE_INFECTED',
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Save to temp (encrypted at rest)
|
||||
const tempId = await this.storageService.saveToTemp(file, user.id);
|
||||
|
||||
return {
|
||||
tempId,
|
||||
expiresAt: addHours(new Date(), 24).toISOString(),
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Commit in Transaction
|
||||
|
||||
The business operation (e.g., creating a Correspondence) promotes temp files to permanent **in the same DB transaction**.
|
||||
|
||||
```typescript
|
||||
async createCorrespondence(dto: CreateCorrespondenceDto, user: User) {
|
||||
return this.dataSource.transaction(async (manager) => {
|
||||
// 1. Create domain entity
|
||||
const entity = await manager.save(Correspondence, {
|
||||
...dto,
|
||||
createdById: user.id,
|
||||
});
|
||||
|
||||
// 2. Commit temp files → permanent (ACID together with entity)
|
||||
await this.storageService.commitFiles(
|
||||
dto.tempFileIds,
|
||||
{ entityId: entity.id, entityType: 'correspondence' },
|
||||
manager,
|
||||
);
|
||||
|
||||
return entity;
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
If the transaction rolls back, temp files remain and expire in 24h — no orphaned permanent files.
|
||||
|
||||
---
|
||||
|
||||
## StorageService Contract
|
||||
|
||||
```typescript
|
||||
export interface StorageService {
|
||||
saveToTemp(file: Express.Multer.File, ownerId: number): Promise<string>;
|
||||
commitFiles(
|
||||
tempIds: string[],
|
||||
target: { entityId: number; entityType: string },
|
||||
manager: EntityManager,
|
||||
): Promise<FileRecord[]>;
|
||||
purgeExpiredTemp(): Promise<number>; // called by cron
|
||||
getPermanentPath(fileId: number): Promise<string>;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ❌ Forbidden
|
||||
|
||||
```typescript
|
||||
// ❌ Direct write to permanent
|
||||
fs.writeFileSync(`/var/storage/${file.originalname}`, file.buffer);
|
||||
|
||||
// ❌ Skip ClamAV
|
||||
await this.storageService.savePermanent(file);
|
||||
|
||||
// ❌ Non-whitelist MIME
|
||||
@UseInterceptors(FileInterceptor('file')) // no size or type limit
|
||||
|
||||
// ❌ Commit outside transaction
|
||||
const entity = await this.repo.save(...);
|
||||
await this.storageService.commitFiles(tempIds, ...); // race: entity exists, files may fail
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
- [ADR-016 Security & Authentication](../../../../specs/06-Decision-Records/ADR-016-security-authentication.md)
|
||||
- [Edge Cases](../../../../specs/01-Requirements/01-06-edge-cases-and-rules.md) — file upload scenarios
|
||||
@@ -32,6 +32,7 @@ const CATEGORIES = [
|
||||
{ prefix: 'api-', name: 'API Design', impact: 'MEDIUM', section: 8 },
|
||||
{ prefix: 'micro-', name: 'Microservices', impact: 'MEDIUM', section: 9 },
|
||||
{ prefix: 'devops-', name: 'DevOps & Deployment', impact: 'LOW-MEDIUM', section: 10 },
|
||||
{ prefix: 'lcbp3-', name: 'LCBP3 Project-Specific', impact: 'CRITICAL', section: 11 },
|
||||
];
|
||||
|
||||
interface RuleFrontmatter {
|
||||
@@ -50,8 +51,10 @@ interface Rule {
|
||||
}
|
||||
|
||||
function parseFrontmatter(content: string): { frontmatter: RuleFrontmatter | null; body: string } {
|
||||
// Normalize CRLF → LF so the regex works on Windows-authored files
|
||||
const normalized = content.replace(/\r\n/g, '\n');
|
||||
const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/;
|
||||
const match = content.match(frontmatterRegex);
|
||||
const match = normalized.match(frontmatterRegex);
|
||||
|
||||
if (!match) {
|
||||
return { frontmatter: null, body: content };
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
name: next-best-practices
|
||||
description: Next.js best practices - file conventions, RSC boundaries, data patterns, async APIs, metadata, error handling, route handlers, image/font optimization, bundling
|
||||
description: Next.js best practices for LCBP3-DMS frontend. Enforces ADR-019 (publicId only, no parseInt/id fallback), TanStack Query + RHF + Zod, shadcn/ui, i18n, ADR-007 error UX, ADR-021 IntegratedBanner/WorkflowLifecycle, two-phase file upload.
|
||||
version: 1.8.9
|
||||
scope: frontend
|
||||
user-invocable: false
|
||||
---
|
||||
|
||||
@@ -157,6 +159,24 @@ See [parallel-routes.md](./parallel-routes.md) for:
|
||||
- `default.tsx` for fallbacks
|
||||
- Closing modals correctly with `router.back()`
|
||||
|
||||
## i18n (Thai / English)
|
||||
|
||||
See [i18n.md](./i18n.md) for:
|
||||
|
||||
- `useTranslations('namespace')` pattern
|
||||
- Key naming (kebab-case, feature-namespaced)
|
||||
- When Zod messages stay inline vs i18n
|
||||
- Server-side `userMessage` passthrough
|
||||
|
||||
## Two-Phase File Upload
|
||||
|
||||
See [two-phase-upload.md](./two-phase-upload.md) for:
|
||||
|
||||
- `useDropzone` + `useMutation` hook
|
||||
- `tempFileIds` form-state pattern
|
||||
- Whitelist MIME / max-size (must mirror backend)
|
||||
- Clear-on-submit / expired-temp handling
|
||||
|
||||
## Self-Hosting
|
||||
|
||||
See [self-hosting.md](./self-hosting.md) for:
|
||||
@@ -204,28 +224,38 @@ const form = useForm({
|
||||
});
|
||||
```
|
||||
|
||||
### ADR-019 UUID Handling (CRITICAL)
|
||||
### ADR-019 UUID Handling (CRITICAL — March 2026 Pattern)
|
||||
|
||||
> **Updated:** ใช้ `publicId` ตรงๆ — ห้ามใช้ `id ?? ''` fallback หรือ `uuid` ร่วม.
|
||||
|
||||
```tsx
|
||||
// Interface ต้องมีทั้ง id และ publicId
|
||||
// ✅ CORRECT — Interface มีแค่ publicId
|
||||
interface Contract {
|
||||
id?: number; // Internal (อาจ undefined)
|
||||
publicId?: string; // UUID - ใช้ตัวนี้
|
||||
publicId?: string; // UUID from API — ใช้ตัวนี้
|
||||
contractCode: string;
|
||||
contractName: string;
|
||||
}
|
||||
|
||||
// Select options - ใช้ pattern นี้เสมอ
|
||||
// ✅ CORRECT — Select options (ไม่มี fallback)
|
||||
const options = contracts.map((c) => ({
|
||||
label: `${c.contractName} (${c.contractCode})`,
|
||||
value: String(c.publicId ?? c.id ?? ''), // fallback pattern
|
||||
key: String(c.publicId ?? c.id ?? ''),
|
||||
value: c.publicId ?? '', // ใช้ publicId ล้วน
|
||||
key: c.publicId ?? c.contractCode, // fallback ไป business field ได้
|
||||
}));
|
||||
|
||||
// ❌ ห้ามใช้ parseInt บน UUID
|
||||
// const id = parseInt(projectId); // WRONG!
|
||||
// ❌ WRONG — pattern เก่า (ห้าม)
|
||||
interface OldContract {
|
||||
id?: number; // ❌ อย่า expose INT id
|
||||
uuid?: string; // ❌ ใช้ชื่อ uuid
|
||||
publicId?: string;
|
||||
}
|
||||
const oldValue = String(c.publicId ?? c.id ?? ''); // ❌ `id ?? ''` fallback ห้าม
|
||||
|
||||
// ✅ ส่ง UUID string ตรงๆ
|
||||
apiClient.get(`/projects/${projectId}`); // projectId is UUID string
|
||||
// ❌ NEVER parseInt on UUID
|
||||
// const badId = parseInt(projectPublicId); // "019505..." → 19 (WRONG!)
|
||||
|
||||
// ✅ ส่ง UUID string ตรงๆ ไป API
|
||||
apiClient.get(`/projects/${projectPublicId}`);
|
||||
```
|
||||
|
||||
### Naming Conventions
|
||||
@@ -312,13 +342,17 @@ apiClient.interceptors.request.use((config) => {
|
||||
|
||||
### Anti-Patterns (ห้ามทำ)
|
||||
|
||||
- ❌ Fetch data ใน useEffect โดยตรง
|
||||
- ❌ Fetch data ใน useEffect โดยตรง (ใช้ TanStack Query)
|
||||
- ❌ Props drilling ลึกเกิน 3 levels
|
||||
- ❌ Inline styles (ใช้ Tailwind)
|
||||
- ❌ console.log ใน production
|
||||
- ❌ parseInt() บน UUID values
|
||||
- ❌ `console.log` ใน committed code
|
||||
- ❌ `parseInt()` / `Number()` / `+` บน UUID values (ADR-019)
|
||||
- ❌ `id ?? ''` fallback บน `publicId` (ใช้ `publicId ?? ''` หรือ fallback ไป business field)
|
||||
- ❌ Expose `uuid` คู่กับ `publicId` ใน interface (ใช้ `publicId` อย่างเดียว)
|
||||
- ❌ ใช้ index เป็น key ใน list
|
||||
- ❌ Snake_case ใน form field names (ใช้ camelCase)
|
||||
- ❌ Hardcode Thai/English string ใน component (ใช้ i18n keys)
|
||||
- ❌ `any` type (strict mode)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
# i18n (Thai / English)
|
||||
|
||||
LCBP3 frontend **must not** hardcode Thai or English UI strings in components.
|
||||
|
||||
## Rules
|
||||
|
||||
1. **All user-facing strings go through the i18n layer** (`next-intl` / `i18next` — check `frontend/package.json`).
|
||||
2. **Keys use kebab-case**, namespaced by feature:
|
||||
- `correspondence.list.title`
|
||||
- `correspondence.form.submit`
|
||||
- `common.actions.cancel`
|
||||
3. **Comments in code remain Thai** (business logic explanation); **only UI copy** goes through i18n.
|
||||
4. **Error messages** from backend (via ADR-007 `userMessage`) are already localized server-side — render them directly, don't translate client-side.
|
||||
|
||||
---
|
||||
|
||||
## ❌ Wrong
|
||||
|
||||
```tsx
|
||||
export function CorrespondenceHeader() {
|
||||
return <h1>รายการหนังสือติดต่อ</h1>; // ❌ hardcoded Thai
|
||||
}
|
||||
|
||||
toast.success('บันทึกสำเร็จ'); // ❌ hardcoded
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Right
|
||||
|
||||
```tsx
|
||||
import { useTranslations } from 'next-intl';
|
||||
|
||||
export function CorrespondenceHeader() {
|
||||
const t = useTranslations('correspondence.list');
|
||||
return <h1>{t('title')}</h1>;
|
||||
}
|
||||
|
||||
toast.success(t('save.success'));
|
||||
```
|
||||
|
||||
Translation files:
|
||||
|
||||
```json
|
||||
// messages/th.json
|
||||
{
|
||||
"correspondence": {
|
||||
"list": { "title": "รายการหนังสือติดต่อ" },
|
||||
"save": { "success": "บันทึกสำเร็จ" }
|
||||
}
|
||||
}
|
||||
|
||||
// messages/en.json
|
||||
{
|
||||
"correspondence": {
|
||||
"list": { "title": "Correspondence List" },
|
||||
"save": { "success": "Saved successfully" }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Zod Error Messages
|
||||
|
||||
Zod error messages shown in forms **do** stay in Thai inline (per `specs/05-Engineering-Guidelines/05-03-frontend-guidelines.md`), because they're schema-bound and rarely need translation. If dual-language support becomes required, wrap with an i18n-aware resolver:
|
||||
|
||||
```ts
|
||||
const schema = z.object({
|
||||
projectUuid: z.string().uuid(t('validation.project.required')),
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
- [i18n Guidelines](../../../specs/05-Engineering-Guidelines/05-08-i18n-guidelines.md)
|
||||
- [Frontend Guidelines](../../../specs/05-Engineering-Guidelines/05-03-frontend-guidelines.md)
|
||||
@@ -0,0 +1,100 @@
|
||||
# Two-Phase File Upload (Frontend)
|
||||
|
||||
Pair with [backend two-phase upload rule](../nestjs-best-practices/rules/security-file-two-phase-upload.md).
|
||||
|
||||
## Flow
|
||||
|
||||
```
|
||||
User drops file
|
||||
→ POST /files/upload (temp) → { tempId, expiresAt }
|
||||
→ store tempId in form state
|
||||
→ user submits form
|
||||
→ POST /correspondences (with tempFileIds) → backend commits in transaction
|
||||
```
|
||||
|
||||
## Hook Pattern
|
||||
|
||||
```tsx
|
||||
'use client';
|
||||
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import { useMutation } from '@tanstack/react-query';
|
||||
|
||||
export function useTwoPhaseUpload() {
|
||||
const uploadTemp = useMutation({
|
||||
mutationFn: async (file: File) => {
|
||||
const fd = new FormData();
|
||||
fd.append('file', file);
|
||||
const { data } = await apiClient.post<{ tempId: string; expiresAt: string }>(
|
||||
'/files/upload',
|
||||
fd,
|
||||
);
|
||||
return data;
|
||||
},
|
||||
});
|
||||
|
||||
return uploadTemp;
|
||||
}
|
||||
```
|
||||
|
||||
## Form Integration (RHF)
|
||||
|
||||
```tsx
|
||||
export function CorrespondenceForm() {
|
||||
const form = useForm<FormData>({ resolver: zodResolver(schema) });
|
||||
const uploadTemp = useTwoPhaseUpload();
|
||||
const [tempFileIds, setTempFileIds] = useState<string[]>([]);
|
||||
|
||||
const { getRootProps, getInputProps } = useDropzone({
|
||||
accept: {
|
||||
'application/pdf': ['.pdf'],
|
||||
'image/vnd.dwg': ['.dwg'],
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': ['.docx'],
|
||||
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': ['.xlsx'],
|
||||
'application/zip': ['.zip'],
|
||||
},
|
||||
maxSize: 50 * 1024 * 1024, // 50 MB — must match backend
|
||||
onDrop: async (files) => {
|
||||
const results = await Promise.all(files.map((f) => uploadTemp.mutateAsync(f)));
|
||||
setTempFileIds((prev) => [...prev, ...results.map((r) => r.tempId)]);
|
||||
},
|
||||
});
|
||||
|
||||
const onSubmit = async (values: FormData) => {
|
||||
await correspondenceService.create({
|
||||
...values,
|
||||
tempFileIds, // committed server-side in the same DB transaction
|
||||
});
|
||||
setTempFileIds([]);
|
||||
};
|
||||
|
||||
return (
|
||||
<form onSubmit={form.handleSubmit(onSubmit)}>
|
||||
<div {...getRootProps()} className="dropzone">
|
||||
<input {...getInputProps()} />
|
||||
<p>{t('upload.dragDrop')}</p>
|
||||
</div>
|
||||
{/* other fields */}
|
||||
</form>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- **Whitelist MIME types** — must mirror backend ADR-016 whitelist (`.pdf`, `.dwg`, `.docx`, `.xlsx`, `.zip`).
|
||||
- **50 MB cap** — enforce client-side too (better UX) plus server-side (authoritative).
|
||||
- **Show temp-file pills** with remove button — users see what will be attached.
|
||||
- **Clear `tempFileIds` on success/cancel** — prevent stale IDs on subsequent submits.
|
||||
- **No retry of expired temps** — if `expiresAt` passed, prompt re-upload.
|
||||
|
||||
## ❌ Forbidden
|
||||
|
||||
- ❌ Uploading directly to permanent storage endpoint (no commit phase)
|
||||
- ❌ Hardcoded MIME list in component (keep in shared constant file mirrored from backend)
|
||||
- ❌ Ignoring `maxSize` — backend will reject but UX suffers
|
||||
|
||||
## Reference
|
||||
|
||||
- [ADR-016 Security](../../../specs/06-Decision-Records/ADR-016-security-authentication.md)
|
||||
- Backend rule: [`security-file-two-phase-upload.md`](../nestjs-best-practices/rules/security-file-two-phase-upload.md)
|
||||
@@ -1,17 +1,19 @@
|
||||
# UUID Handling (ADR-019)
|
||||
# UUID Handling (ADR-019) — March 2026 Pattern
|
||||
|
||||
**Project-specific: Hybrid Identifier Strategy for NAP-DMS**
|
||||
|
||||
This project uses ADR-019: INT Primary Key (internal) + UUIDv7 (public API). Frontend code must handle this correctly.
|
||||
|
||||
> **Updated pattern:** Backend exposes `publicId` directly — ไม่มี `@Expose({ name: 'id' })` rename แล้ว. Frontend ใช้ `publicId` ตรงๆ — ห้าม fallback ไป `id`.
|
||||
|
||||
## The Pattern
|
||||
|
||||
| Source | Field Name | Type | Notes |
|
||||
|--------|------------|------|-------|
|
||||
| **API Response** | `id` | `string` (UUID) | Actually `publicId` exposed via `@Expose({ name: 'id' })` |
|
||||
| **TypeScript Interface** | `publicId?: string` | UUID string | Use this for all references |
|
||||
| **Fallback** | `id?: number` | INT (internal) | May be undefined due to `@Exclude()` |
|
||||
| **Form Values** | `xxxUuid` | `string` | DTO field names: `projectUuid`, `contractUuid` |
|
||||
| Source | Field Name | Type | Notes |
|
||||
| ------------------------ | ------------------- | ----------------- | ----------------------------------------------------------- |
|
||||
| **API Response** | `publicId` | `string` (UUIDv7) | Exposed directly (no rename) |
|
||||
| **TypeScript Interface** | `publicId?: string` | UUID string | ใช้ตัวนี้เท่านั้น |
|
||||
| **Form DTO** | `xxxUuid` | `string` | DTO field names: `projectUuid`, `contractUuid` (input only) |
|
||||
| **URL param** | `[publicId]` | `string` (UUID) | e.g. `/correspondences/[publicId]/page.tsx` |
|
||||
|
||||
## Critical Rules
|
||||
|
||||
@@ -31,22 +33,26 @@ const id = +projectId; // NaN
|
||||
apiClient.get(`/projects/${projectId}`); // projectId is already UUID string
|
||||
```
|
||||
|
||||
### 2. Use `publicId ?? id` Pattern
|
||||
### 2. Use `publicId` Only — NO `id ?? ''` Fallback
|
||||
|
||||
```tsx
|
||||
// types/project.ts
|
||||
// ✅ CORRECT — types/project.ts
|
||||
interface Project {
|
||||
id?: number; // Internal INT (may be undefined)
|
||||
publicId?: string; // UUID from API (use this)
|
||||
publicId?: string; // UUID from API — ใช้ตัวนี้เท่านั้น
|
||||
projectCode: string;
|
||||
projectName: string;
|
||||
}
|
||||
|
||||
// Component usage
|
||||
// ✅ CORRECT — Component usage
|
||||
const projectOptions = projects.map((p) => ({
|
||||
label: `${p.projectName} (${p.projectCode})`,
|
||||
value: String(p.publicId ?? p.id ?? ''), // ADR-019 pattern
|
||||
key: String(p.publicId ?? p.id ?? ''),
|
||||
value: p.publicId ?? '', // ADR-019 — ไม่ต้อง String() และไม่ไป id
|
||||
key: p.publicId ?? p.projectCode, // fallback ไป business field ได้
|
||||
}));
|
||||
|
||||
// ❌ WRONG — pattern เก่า
|
||||
const oldOptions = projects.map((p) => ({
|
||||
value: String(p.publicId ?? p.id ?? ''), // ❌ `id ?? ''` fallback
|
||||
}));
|
||||
```
|
||||
|
||||
@@ -84,14 +90,13 @@ export function ContractSelect({ contracts, value, onChange }: ContractSelectPro
|
||||
<SelectValue placeholder="เลือกสัญญา" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{contracts.map((c) => (
|
||||
<SelectItem
|
||||
key={String(c.publicId ?? c.id ?? '')}
|
||||
value={String(c.publicId ?? c.id ?? '')}
|
||||
>
|
||||
{c.contractName} ({c.contractCode})
|
||||
</SelectItem>
|
||||
))}
|
||||
{contracts
|
||||
.filter((c) => !!c.publicId) // กรอง contract ที่มี publicId เท่านั้น
|
||||
.map((c) => (
|
||||
<SelectItem key={c.publicId} value={c.publicId!}>
|
||||
{c.contractName} ({c.contractCode})
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
);
|
||||
@@ -113,7 +118,9 @@ const columns: ColumnDef<Discipline>[] = [
|
||||
cell: ({ row }) => {
|
||||
const contract = row.original.contract;
|
||||
return contract ? (
|
||||
<span>{contract.contractName} ({contract.contractCode})</span>
|
||||
<span>
|
||||
{contract.contractName} ({contract.contractCode})
|
||||
</span>
|
||||
) : (
|
||||
<span className="text-muted-foreground">-</span>
|
||||
);
|
||||
@@ -153,10 +160,9 @@ export const contractService = {
|
||||
## TypeScript Interfaces
|
||||
|
||||
```tsx
|
||||
// types/entities.ts
|
||||
// ✅ CORRECT — types/entities.ts
|
||||
export interface BaseEntity {
|
||||
id?: number; // Internal INT - may be undefined
|
||||
publicId?: string; // UUID - use this for API calls
|
||||
publicId?: string; // UUID — ใช้ตัวนี้เท่านั้น (ไม่มี INT id ใน interface)
|
||||
createdAt?: string;
|
||||
updatedAt?: string;
|
||||
}
|
||||
@@ -170,14 +176,12 @@ export interface Project extends BaseEntity {
|
||||
export interface Contract extends BaseEntity {
|
||||
contractCode: string;
|
||||
contractName: string;
|
||||
projectId?: number; // Internal INT FK
|
||||
projectUuid?: string; // UUID for DTOs
|
||||
project?: Project; // Relation
|
||||
project?: Project; // Relation (nested entity)
|
||||
}
|
||||
|
||||
// DTOs
|
||||
// DTO (input only — รับ UUID จาก form)
|
||||
export interface CreateContractDto {
|
||||
projectUuid: string; // Accept UUID from form
|
||||
projectUuid: string; // UUID string from select
|
||||
contractCode: string;
|
||||
contractName: string;
|
||||
}
|
||||
@@ -215,9 +219,7 @@ export function ContractForm() {
|
||||
|
||||
return (
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)}>
|
||||
{/* Form fields */}
|
||||
</form>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)}>{/* Form fields */}</form>
|
||||
</Form>
|
||||
);
|
||||
}
|
||||
@@ -231,19 +233,20 @@ export default async function ContractPage({ params }: { params: Promise<{ id: s
|
||||
const { id } = await params;
|
||||
// id is UUID string from URL
|
||||
const contract = await contractService.getById(id);
|
||||
|
||||
|
||||
return <ContractDetail contract={contract} />;
|
||||
}
|
||||
```
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
| Pitfall | Wrong | Right |
|
||||
|---------|-------|-------|
|
||||
| Assuming `entity.id` exists | `key={entity.id}` | `key={entity.publicId ?? entity.id}` |
|
||||
| parseInt on UUID | `parseInt(projectId)` | `projectId` (string) |
|
||||
| Field name mismatch | `name="project_id"` | `name="projectUuid"` |
|
||||
| Missing fallback | `value={entity.publicId}` | `value={entity.publicId ?? entity.id ?? ''}` |
|
||||
| Pitfall | ❌ Wrong | ✅ Right |
|
||||
| ---------------------------- | ------------------------------------------------ | --------------------------------- |
|
||||
| Using INT `id` | `key={entity.id}` | `key={entity.publicId}` |
|
||||
| parseInt on UUID | `parseInt(projectId)` | `projectId` (string) |
|
||||
| Field name mismatch | `name="project_id"` | `name="projectUuid"` |
|
||||
| `id ?? ''` fallback | `value={publicId ?? id ?? ''}` | `value={publicId ?? ''}` |
|
||||
| `uuid` + `publicId` together | `interface { uuid?: string; publicId?: string }` | `interface { publicId?: string }` |
|
||||
|
||||
## Reference
|
||||
|
||||
|
||||
+17
-14
@@ -1,17 +1,20 @@
|
||||
# 🧠 NAP-DMS Agent Skills (v1.8.6)
|
||||
# 🧠 NAP-DMS Agent Skills (v1.8.9)
|
||||
|
||||
ไฟล์นี้กำหนดทักษะและความสามารถเฉพาะทางของ Document Intelligence Engine สำหรับโครงการ LCBP3 v1.8.6 เพื่อรักษามาตรฐานสูงสุดด้าน Security และ Data Integrity
|
||||
ไฟล์นี้กำหนดทักษะและความสามารถเฉพาะทางของ Document Intelligence Engine สำหรับโครงการ LCBP3 v1.8.9 เพื่อรักษามาตรฐานสูงสุดด้าน Security และ Data Integrity
|
||||
|
||||
**Status**: Production Ready | **Last Updated**: 2026-04-14 | **Total Skills**: 20
|
||||
**Status**: Production Ready | **Last Updated**: 2026-04-22 | **Total Skills**: 20
|
||||
|
||||
> 📌 Shared context for all speckit-\* skills: see [`_LCBP3-CONTEXT.md`](./_LCBP3-CONTEXT.md).
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architectural & Data Integrity
|
||||
|
||||
- **Identifier Strategy Mastery (ADR-019):**
|
||||
- บังคับใช้ **UUIDv7** เป็น Public ID ใน API และ URL เสมอ
|
||||
- ตรวจสอบและป้องกันการใช้ `parseInt()`, `Number()`, หรือตัวดำเนินการทางคณิตศาสตร์ (`+`) กับ UUID
|
||||
- ตรวจสอบว่า Entity มีการใช้ `@Exclude()` บน Primary Key ที่เป็น `INT AUTO_INCREMENT` เพื่อไม่ให้หลุดออกไปยัง API
|
||||
- **Identifier Strategy Mastery (ADR-019 — March 2026):**
|
||||
- บังคับใช้ **UUIDv7** เป็น Public ID; entity สืบทอดจาก `UuidBaseEntity` และเปิด `publicId` **ตรงๆ** (ห้ามใช้ `@Expose({ name: 'id' })` rename)
|
||||
- ตรวจสอบและป้องกันการใช้ `parseInt()`, `Number()`, หรือ `+` กับ UUID ทั้ง backend/frontend
|
||||
- ตรวจสอบว่า Entity มีการใช้ `@Exclude()` บน Primary Key `INT AUTO_INCREMENT` เพื่อไม่ให้หลุดออกไปยัง API
|
||||
- Frontend ใช้ `publicId` ตรงๆ — **ห้าม** `id ?? ''` fallback หรือมี `uuid?: string` คู่กับ `publicId` ใน interface
|
||||
- **Strict Validation Engine:**
|
||||
- บังคับใช้ **Zod** สำหรับการทำ Form Validation ฝั่ง Frontend
|
||||
- บังคับใช้ **class-validator** สำหรับ Backend DTOs
|
||||
@@ -81,22 +84,22 @@
|
||||
|
||||
## 🛠️ Skill Health Monitoring
|
||||
|
||||
### Health Check Scripts
|
||||
### Health Check Scripts (from repo root)
|
||||
|
||||
- **Bash**: `./scripts/bash/audit-skills.sh` - Comprehensive skill health audit
|
||||
- **PowerShell**: `./scripts/powershell/audit-skills.ps1` - Windows equivalent
|
||||
- **Bash**: `./.agents/scripts/bash/audit-skills.sh` - Comprehensive skill health audit
|
||||
- **PowerShell**: `./.agents/scripts/powershell/audit-skills.ps1` - Windows equivalent
|
||||
|
||||
### Validation Scripts
|
||||
|
||||
- **Version Check**: `./scripts/bash/validate-versions.sh` - Ensure version consistency
|
||||
- **Workflow Sync**: `./scripts/bash/sync-workflows.sh` - Verify workflow integration
|
||||
- **Version Check**: `./.agents/scripts/bash/validate-versions.sh` - Ensure version consistency
|
||||
- **Workflow Sync**: `./.agents/scripts/bash/sync-workflows.sh` - Verify workflow integration
|
||||
|
||||
### Health Metrics
|
||||
|
||||
- **Total Skills**: 20 implemented
|
||||
- **Version Alignment**: v1.8.6 across all skills
|
||||
- **Version Alignment**: v1.8.9 across all skills
|
||||
- **Template Coverage**: 100% for skills requiring templates
|
||||
- **Documentation**: Complete front matter and sections
|
||||
- **Documentation**: Complete front matter + shared `_LCBP3-CONTEXT.md` appendix
|
||||
|
||||
### Maintenance Schedule
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-analyze
|
||||
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
---
|
||||
@@ -28,7 +28,7 @@ Identify inconsistencies, duplications, ambiguities, and underspecified items ac
|
||||
|
||||
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
||||
|
||||
**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit-analyze`.
|
||||
**Constitution Authority**: The project constitution (`AGENTS.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit-analyze`.
|
||||
|
||||
### Steps
|
||||
|
||||
@@ -72,7 +72,7 @@ Load only the minimal necessary context from each artifact:
|
||||
|
||||
**From constitution:**
|
||||
|
||||
- Load `.specify/memory/constitution.md` for principle validation
|
||||
- Load `AGENTS.md` for principle validation
|
||||
|
||||
### 3. Build Semantic Models
|
||||
|
||||
@@ -192,3 +192,15 @@ Ask the user: "Would you like me to suggest concrete remediation edits for the t
|
||||
## Context
|
||||
|
||||
{{args}}
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-checker
|
||||
description: Run static analysis tools and aggregate results.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -157,3 +157,15 @@ Auto-detect available tools, run them, and aggregate results into a prioritized
|
||||
- **Be Actionable**: Every issue should have a clear fix path
|
||||
- **Don't Duplicate**: Dedupe issues found by multiple tools
|
||||
- **Respect Configs**: Honor project's existing linter configs
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-checklist
|
||||
description: Generate a custom checklist for the current feature based on user requirements.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
---
|
||||
|
||||
## Checklist Purpose: "Unit Tests for English"
|
||||
@@ -300,3 +300,15 @@ Sample items:
|
||||
- Correct: Validation of requirement quality
|
||||
- Wrong: "Does it do X?"
|
||||
- Correct: "Is X clearly specified?"
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-clarify
|
||||
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-specify
|
||||
handoffs:
|
||||
@@ -189,3 +189,15 @@ Behavior rules:
|
||||
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
||||
|
||||
Context for prioritization: {{args}}
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-constitution
|
||||
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Build Specification
|
||||
agent: speckit-specify
|
||||
@@ -24,11 +24,11 @@ You are the **Antigravity Governance Architect**. Your role is to establish and
|
||||
|
||||
### Outline
|
||||
|
||||
You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
||||
You are updating the project constitution at `AGENTS.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
||||
|
||||
Follow this execution flow:
|
||||
|
||||
1. Load the existing constitution template at `memory/constitution.md`.
|
||||
1. Load the existing constitution template at `AGENTS.md`.
|
||||
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
|
||||
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
|
||||
|
||||
@@ -49,10 +49,10 @@ Follow this execution flow:
|
||||
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
|
||||
|
||||
4. Consistency propagation checklist (convert prior checklist into active validations):
|
||||
- Read `.specify/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
||||
- Read `.specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
||||
- Read `.specify/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
||||
- Read each command file in `.specify/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
||||
- Read `.agents/skills/speckit-plan/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
||||
- Read `.agents/skills/speckit-specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
||||
- Read `.agents/skills/speckit-tasks/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
||||
- Read each command file in `.agents/skills/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
||||
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
|
||||
|
||||
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
|
||||
@@ -69,7 +69,7 @@ Follow this execution flow:
|
||||
- Dates ISO format YYYY-MM-DD.
|
||||
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
|
||||
|
||||
7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite).
|
||||
7. Write the completed constitution back to `AGENTS.md` (overwrite).
|
||||
|
||||
8. Output a final summary to the user with:
|
||||
- New version and bump rationale.
|
||||
@@ -87,4 +87,16 @@ If the user supplies partial updates (e.g., only one principle revision), still
|
||||
|
||||
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
||||
|
||||
Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file.
|
||||
Do not create a new template; always operate on the existing `AGENTS.md` file.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-diff
|
||||
description: Compare two versions of a spec or plan to highlight changes.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -84,3 +84,15 @@ Compare two versions of a specification artifact and produce a structured diff r
|
||||
- **Highlight Impact**: Explain what each change means for implementation
|
||||
- **Flag Breaking Changes**: Any change that invalidates existing work
|
||||
- **Ignore Whitespace**: Focus on semantic changes, not formatting
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-implement
|
||||
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md (with Ironclad Anti-Regression Protocols)
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
---
|
||||
@@ -81,7 +81,7 @@ At the start of execution and after every 3 modifications:
|
||||
|
||||
### Outline
|
||||
|
||||
1. Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
1. Run `../scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Check checklists status** (if FEATURE_DIR/checklists/ exists):
|
||||
- Scan all checklist files in the checklists/ directory
|
||||
@@ -246,3 +246,15 @@ At the start of execution and after every 3 modifications:
|
||||
---
|
||||
|
||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit-tasks` first to regenerate the task list.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-migrate
|
||||
description: Migrate existing projects into the speckit structure by generating spec.md, plan.md, and tasks.md from existing code.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -116,3 +116,15 @@ Analyze an existing codebase and generate speckit artifacts (spec.md, plan.md, t
|
||||
- **Preserve Intent**: Use code comments and naming to understand purpose
|
||||
- **Flag TODOs**: Any TODO/FIXME/HACK in code becomes an open task
|
||||
- **Be Conservative**: When unsure, ask rather than assume
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-plan
|
||||
description: Execute the implementation planning workflow using the plan template to generate design artifacts.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-specify
|
||||
handoffs:
|
||||
@@ -32,7 +32,7 @@ You are the **Antigravity System Architect**. Your role is to bridge the gap bet
|
||||
|
||||
1. **Setup**: Run `../scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template from `templates/plan-template.md`.
|
||||
2. **Load context**: Read FEATURE_SPEC and `AGENTS.md`. Load IMPL_PLAN template from `templates/plan-template.md`.
|
||||
|
||||
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
|
||||
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
|
||||
@@ -85,7 +85,7 @@ You are the **Antigravity System Architect**. Your role is to bridge the gap bet
|
||||
- Output OpenAPI/GraphQL schema to `/contracts/`
|
||||
|
||||
3. **Agent context update**:
|
||||
- Run `../scripts/bash/update-agent-context.sh gemini`
|
||||
- Run `../scripts/bash/update-agent-context.sh windsurf`
|
||||
- These scripts detect which AI agent is in use
|
||||
- Update the appropriate agent-specific context file
|
||||
- Add only new technology from current plan
|
||||
@@ -97,3 +97,15 @@ You are the **Antigravity System Architect**. Your role is to bridge the gap bet
|
||||
|
||||
- Use absolute paths
|
||||
- ERROR on gate failures or unresolved clarifications
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -3,7 +3,7 @@
|
||||
**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
|
||||
**Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
|
||||
|
||||
**Note**: This template is filled in by the `/speckit-plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
|
||||
**Note**: This template is filled in by the `/speckit-plan` command. See `.agents/skills/plan.md` for the execution workflow.
|
||||
|
||||
## Summary
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-quizme
|
||||
description: Challenge the specification with Socratic questioning to identify logical gaps, unhandled edge cases, and robustness issues.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Clarify Spec Requirements
|
||||
agent: speckit-clarify
|
||||
@@ -65,3 +65,15 @@ Execution steps:
|
||||
- **Be a Skeptic**: Don't assume the happy path works.
|
||||
- **Focus on "When" and "If"**: When high load, If network drops, When concurrent edits.
|
||||
- **Don't be annoying**: Focus on _critical_ flaws, not nitpicks.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-reviewer
|
||||
description: Perform code review with actionable feedback and suggestions.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -142,3 +142,15 @@ Review code changes and provide structured feedback with severity levels.
|
||||
- **Be Balanced**: Mention what's good, not just what's wrong
|
||||
- **Prioritize**: Focus on real issues, not style nitpicks
|
||||
- **Be Educational**: Explain WHY something is an issue
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-security-audit
|
||||
description: Perform a security-focused audit of the codebase against OWASP Top 10, CASL authorization, and LCBP3-DMS security requirements.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-checker
|
||||
---
|
||||
@@ -12,16 +12,16 @@ You are the **Antigravity Security Sentinel**. Your mission is to identify secur
|
||||
|
||||
## Task
|
||||
|
||||
Perform a comprehensive security audit covering OWASP Top 10, CASL permission enforcement, file upload safety, and project-specific security rules defined in `specs/06-Decision-Records/ADR-016-security.md`.
|
||||
Perform a comprehensive security audit covering OWASP Top 10, CASL permission enforcement, file upload safety, and project-specific security rules defined in `specs/06-Decision-Records/ADR-016-security-authentication.md`.
|
||||
|
||||
## Context Loading
|
||||
|
||||
Before auditing, load the security context:
|
||||
|
||||
1. Read `specs/06-Decision-Records/ADR-016-security.md` for project security decisions
|
||||
1. Read `specs/06-Decision-Records/ADR-016-security-authentication.md` for project security decisions
|
||||
2. Read `specs/05-Engineering-Guidelines/05-02-backend-guidelines.md` for backend security patterns
|
||||
3. Read `specs/03-Data-and-Storage/lcbp3-v1.7.0-seed-permissions.sql` for CASL permission definitions
|
||||
4. Read `GEMINI.md` for security rules (Section: Security & Integrity Rules)
|
||||
3. Read `specs/03-Data-and-Storage/lcbp3-v1.8.0-seed-permissions.sql` for CASL permission definitions
|
||||
4. Read `AGENTS.md` for security rules (Section: Security Rules Non-Negotiable + Security & Integrity Audit Protocol)
|
||||
|
||||
## Execution Steps
|
||||
|
||||
@@ -44,7 +44,7 @@ Scan the `backend/src/` directory for each OWASP category:
|
||||
|
||||
### Phase 2: CASL Authorization Audit
|
||||
|
||||
1. **Load permission matrix** from `specs/03-Data-and-Storage/lcbp3-v1.7.0-seed-permissions.sql`
|
||||
1. **Load permission matrix** from `specs/03-Data-and-Storage/lcbp3-v1.8.0-seed-permissions.sql`
|
||||
2. **Scan all controllers** for `@UseGuards(CaslAbilityGuard)` coverage:
|
||||
|
||||
```bash
|
||||
@@ -197,3 +197,15 @@ Generate a structured report:
|
||||
- **No False Confidence**: If a check is inconclusive, mark it as "⚠️ Needs Manual Review" rather than passing.
|
||||
- **LCBP3-Specific**: Prioritize project-specific rules (idempotency, ClamAV, Redlock) over generic checks.
|
||||
- **Frontend Too**: If scope includes frontend, also check for XSS in React components, unescaped user data, and exposed API keys.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-specify
|
||||
description: Create or update the feature specification from a natural language feature description.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Build Technical Plan
|
||||
agent: speckit-plan
|
||||
@@ -64,8 +64,8 @@ Given that feature description, do this:
|
||||
|
||||
d. Run the script `../scripts/bash/create-new-feature.sh --json "{{args}}"` with the calculated number and short-name:
|
||||
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||
- Bash example: `.specify/scripts/bash/create-new-feature.sh --json "{{args}}" --json --number 5 --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `.specify/scripts/bash/create-new-feature.sh --json "{{args}}" -Json -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||
- Bash example: `.agents/scripts/bash/create-new-feature.sh --json "{{args}}" --number 5 --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `.agents/scripts/powershell/create-new-feature.ps1 -Json -Args '{{args}}' -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||
|
||||
**IMPORTANT**:
|
||||
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||
@@ -262,3 +262,15 @@ Success criteria must be:
|
||||
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
||||
- "React components render efficiently" (framework-specific)
|
||||
- "Redis cache hit rate above 80%" (technology-specific)
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../\_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-status
|
||||
description: Display a dashboard showing feature status, completion percentage, and blockers.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -109,3 +109,15 @@ Generate a dashboard view of all features and their completion status.
|
||||
- **Be Visual**: Use progress bars and tables
|
||||
- **Be Actionable**: Every status should have a "next action"
|
||||
- **Be Fast**: Cache nothing, always recalculate
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-tasks
|
||||
description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-plan
|
||||
handoffs:
|
||||
@@ -145,3 +145,15 @@ Every task MUST strictly follow this format:
|
||||
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
||||
- Each phase should be a complete, independently testable increment
|
||||
- **Final Phase**: Polish & Cross-Cutting Concerns
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-taskstoissues
|
||||
description: Convert existing tasks into actionable, dependency-ordered issues for the feature based on available design artifacts.
|
||||
version: 1.1.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
tools: ['github/github-mcp-server/issue_write']
|
||||
@@ -204,3 +204,15 @@ Convert all tasks from `tasks.md` into well-structured issues on the appropriate
|
||||
- **Label Consistency**: Use a consistent label taxonomy across all issues
|
||||
- **Platform Safety**: Never create issues on repos that don't match the git remote
|
||||
- **Dry Run Support**: Always support `--dry-run` to preview before creating
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-tester
|
||||
description: Execute tests, measure coverage, and report results.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
@@ -120,3 +120,15 @@ Detect the project's test framework, execute tests, and generate a comprehensive
|
||||
- **Preserve Output**: Keep full test output for debugging
|
||||
- **Be Helpful**: Suggest fixes for common failure patterns
|
||||
- **Respect Timeouts**: Set reasonable timeout (5 min default)
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: speckit-validate
|
||||
description: Validate that implementation matches specification requirements.
|
||||
version: 1.0.0
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-implement
|
||||
---
|
||||
@@ -92,3 +92,15 @@ Post-implementation validation that compares code against spec requirements.
|
||||
- **Be Fair**: Semantic matching, not just keyword matching
|
||||
- **Be Actionable**: Every gap should have a clear fix recommendation
|
||||
- **Don't Block on Style**: Focus on functional coverage, not code style
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -2,7 +2,7 @@ name: CI / CD Pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
branches: [ main, develop ]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -11,10 +11,10 @@ jobs:
|
||||
# JOB 1 : CI & Quality Gate
|
||||
# ============================================================
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: self-hosted
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: 📥 Checkout
|
||||
- name: Checkout (HTTPS)
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# ── [1] pnpm ต้องมาก่อน setup-node เสมอ ────────────────
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
node-version: 20
|
||||
# ลบ cache: "pnpm" ออก — ใช้ volume mount บน runner แทน
|
||||
|
||||
# ── [2] ชี้ store ไปที่ volume ที่ mount ไว้ ─────────────
|
||||
# ── [2] ชี้ store ไปที่ volume ที่ mount ไว้ ─────────────
|
||||
- name: 🔧 Set pnpm store path
|
||||
run: pnpm config set store-dir /root/.local/share/pnpm
|
||||
|
||||
@@ -68,87 +68,34 @@ jobs:
|
||||
if: github.ref == 'refs/heads/main'
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: � Checkout
|
||||
- name: " Checkout"
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: 🔐 Setup SSH and Deploy to QNAP
|
||||
- name: "🚀 Deploy to QNAP"
|
||||
run: |
|
||||
# Ensure sshpass is available (install if needed)
|
||||
if ! command -v sshpass &> /dev/null; then
|
||||
apt-get update -qq && apt-get install -y -qq sshpass
|
||||
fi
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.SSH_KEY }}" > ~/.ssh/id_rsa
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
ssh-keyscan -p ${{ secrets.PORT }} ${{ secrets.HOST }} >> ~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
# Create remote deployment script
|
||||
REMOTE_SCRIPT=$(cat << 'SCRIPT_EOF'
|
||||
set -e
|
||||
export PATH="/share/CACHEDEV1_DATA/.qpkg/container-station/bin:/opt/bin:/usr/local/bin:/usr/bin:/bin:$PATH"
|
||||
ssh -o StrictHostKeyChecking=no \
|
||||
-o ConnectTimeout=30 \
|
||||
-o BatchMode=yes \
|
||||
-o ServerAliveInterval=30 \
|
||||
-o ServerAliveCountMax=10 \
|
||||
-i ~/.ssh/id_rsa \
|
||||
-p ${{ secrets.PORT }} ${{ secrets.USERNAME }}@${{ secrets.HOST }} bash << 'REMOTE_EOF'
|
||||
set -e
|
||||
export PATH="/share/CACHEDEV1_DATA/.qpkg/container-station/bin:/opt/bin:/usr/local/bin:/usr/bin:/bin:$PATH"
|
||||
|
||||
echo "=========================================="
|
||||
echo "Starting QNAP Deployment Process"
|
||||
echo "=========================================="
|
||||
cd /share/np-dms/app/source/lcbp3
|
||||
[ -d .git ] || { echo "✗ Git repo not found"; exit 1; }
|
||||
|
||||
# Verify Docker is accessible
|
||||
if ! docker version > /dev/null 2>&1; then
|
||||
echo "✗ Docker not accessible. Check Container Station."
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Docker accessible"
|
||||
git fetch origin main
|
||||
git reset --hard origin/main
|
||||
chmod +x scripts/deploy.sh scripts/rollback.sh 2>/dev/null || true
|
||||
mkdir -p /share/np-dms/app/logs
|
||||
|
||||
# Sync scripts first
|
||||
echo "📂 Syncing deployment scripts..."
|
||||
cd /share/np-dms/app/source/lcbp3
|
||||
|
||||
# Check if directory exists
|
||||
if [ ! -d ".git" ]; then
|
||||
echo "✗ Git repository not found at expected path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git fetch origin main
|
||||
git reset --hard origin/main
|
||||
echo "✓ Code synced"
|
||||
|
||||
# Ensure scripts are executable
|
||||
chmod +x scripts/deploy.sh scripts/rollback.sh 2>/dev/null || true
|
||||
|
||||
mkdir -p /share/np-dms/app/logs
|
||||
|
||||
# Note: Docker build cache is preserved for faster builds
|
||||
# Only prune cache manually when needed: docker builder prune -f
|
||||
|
||||
echo "🚀 Executing deployment..."
|
||||
./scripts/deploy.sh
|
||||
|
||||
echo "✓ Deployment completed successfully"
|
||||
SCRIPT_EOF
|
||||
)
|
||||
|
||||
# Retry logic for SSH connection
|
||||
max_attempts=3
|
||||
attempt=1
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
echo "🚀 Deployment attempt $attempt/$max_attempts..."
|
||||
|
||||
if echo "$REMOTE_SCRIPT" | sshpass -p "${{ secrets.PASSWORD }}" ssh -o StrictHostKeyChecking=no \
|
||||
-o ConnectTimeout=60 \
|
||||
-o ServerAliveInterval=30 \
|
||||
-o ServerAliveCountMax=60 \
|
||||
-o TCPKeepAlive=yes \
|
||||
-p ${{ secrets.PORT }} ${{ secrets.USERNAME }}@${{ secrets.HOST }} 'bash -s'; then
|
||||
echo "✅ Deployment successful!"
|
||||
exit 0
|
||||
else
|
||||
echo "⚠️ Attempt $attempt failed"
|
||||
if [ $attempt -lt $max_attempts ]; then
|
||||
echo "⏳ Retrying in 10 seconds..."
|
||||
sleep 10
|
||||
fi
|
||||
fi
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
echo "❌ All deployment attempts failed"
|
||||
exit 1
|
||||
./scripts/deploy.sh
|
||||
REMOTE_EOF
|
||||
timeout-minutes: 20
|
||||
|
||||
@@ -10,27 +10,27 @@ This meta-workflow orchestrates the **complete development lifecycle**, from spe
|
||||
## Preparation Phase (Steps 1-5)
|
||||
|
||||
1. **Specify** (`/speckit.specify`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.specify/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-specify/SKILL.md`
|
||||
- Execute with user's feature description
|
||||
- Creates: `spec.md`
|
||||
|
||||
2. **Clarify** (`/speckit.clarify`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.clarify/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-clarify/SKILL.md`
|
||||
- Execute to resolve ambiguities
|
||||
- Updates: `spec.md`
|
||||
|
||||
3. **Plan** (`/speckit.plan`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.plan/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-plan/SKILL.md`
|
||||
- Execute to create technical design
|
||||
- Creates: `plan.md`
|
||||
|
||||
4. **Tasks** (`/speckit.tasks`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.tasks/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-tasks/SKILL.md`
|
||||
- Execute to generate task breakdown
|
||||
- Creates: `tasks.md`
|
||||
|
||||
5. **Analyze** (`/speckit.analyze`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.analyze/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-analyze/SKILL.md`
|
||||
- Execute to validate consistency across spec, plan, and tasks
|
||||
- Output: Analysis report
|
||||
- **Gate**: If critical issues found, stop and fix before proceeding
|
||||
@@ -38,29 +38,29 @@ This meta-workflow orchestrates the **complete development lifecycle**, from spe
|
||||
## Implementation Phase (Steps 6-7)
|
||||
|
||||
6. **Implement** (`/speckit.implement`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.implement/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-implement/SKILL.md`
|
||||
- Execute all tasks from `tasks.md` with anti-regression protocols
|
||||
- Output: Working implementation
|
||||
|
||||
7. **Check** (`/speckit.checker`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.checker/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-checker/SKILL.md`
|
||||
- Run static analysis (linters, type checkers, security scanners)
|
||||
- Output: Checker report
|
||||
|
||||
## Verification Phase (Steps 8-10)
|
||||
|
||||
8. **Test** (`/speckit.tester`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.tester/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-tester/SKILL.md`
|
||||
- Run tests with coverage
|
||||
- Output: Test + coverage report
|
||||
|
||||
9. **Review** (`/speckit.reviewer`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.reviewer/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-reviewer/SKILL.md`
|
||||
- Perform code review
|
||||
- Output: Review report with findings
|
||||
|
||||
10. **Validate** (`/speckit.validate`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit.validate/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-validate/SKILL.md`
|
||||
- Verify implementation matches spec requirements
|
||||
- Output: Validation report (pass/fail)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Create or update the project constitution from interactive or provi
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.constitution/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-constitution/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -10,7 +10,7 @@ description: Create or update the feature specification from a natural language
|
||||
- This is typically the starting point of a new feature.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.specify/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-specify/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Identify underspecified areas in the current feature spec by asking
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.clarify/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-clarify/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Execute the implementation planning workflow using the plan templat
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.plan/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-plan/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Generate an actionable, dependency-ordered tasks.md for the feature
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.tasks/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-tasks/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Perform a non-destructive cross-artifact consistency and quality an
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.analyze/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-analyze/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Execute the implementation plan by processing and executing all tas
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.implement/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-implement/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Run static analysis tools and aggregate results.
|
||||
- The user may specify paths to check or run on entire project.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.checker/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-checker/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Execute tests, measure coverage, and report results.
|
||||
- The user may specify test paths, options, or just run all tests.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.tester/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-tester/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Perform code review with actionable feedback and suggestions.
|
||||
- The user may specify files to review, "staged" for git staged changes, or "branch" for branch diff.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.reviewer/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-reviewer/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Validate that implementation matches specification requirements.
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.validate/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-validate/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Perform a security-focused audit of the codebase against OWASP Top 10, CASL authorization, and LCBP3-DMS security requirements.
|
||||
---
|
||||
|
||||
# Workflow: speckit.security-audit
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user may pass a scope hint: `backend`, `frontend`, `both`, or specific module paths (defaults to `both`).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-security-audit/SKILL.md`
|
||||
- Also load `.agents/skills/_LCBP3-CONTEXT.md` for project-specific rules.
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- This is READ-ONLY — never modify code during the audit.
|
||||
- Output a structured report with Critical / High / Medium / Low severity.
|
||||
|
||||
4. **On Error**:
|
||||
- If scope unclear: Default to `both` (backend + frontend)
|
||||
- If `specs/06-Decision-Records/ADR-016-security-authentication.md` missing: Warn and proceed with OWASP Top 10 + CASL checks only
|
||||
@@ -9,20 +9,20 @@ This workflow orchestrates the sequential execution of the Speckit preparation p
|
||||
|
||||
1. **Step 1: Specify (Skill 02)**
|
||||
- Goal: Create or update the `spec.md` based on user input.
|
||||
- Action: Read and execute `.agents/skills/speckit.specify/SKILL.md`.
|
||||
- Action: Read and execute `.agents/skills/speckit-specify/SKILL.md`.
|
||||
|
||||
2. **Step 2: Clarify (Skill 03)**
|
||||
- Goal: Refine the `spec.md` by identifying and resolving ambiguities.
|
||||
- Action: Read and execute `.agents/skills/speckit.clarify/SKILL.md`.
|
||||
- Action: Read and execute `.agents/skills/speckit-clarify/SKILL.md`.
|
||||
|
||||
3. **Step 3: Plan (Skill 04)**
|
||||
- Goal: Generate `plan.md` from the finalized spec.
|
||||
- Action: Read and execute `.agents/skills/speckit.plan/SKILL.md`.
|
||||
- Action: Read and execute `.agents/skills/speckit-plan/SKILL.md`.
|
||||
|
||||
4. **Step 4: Tasks (Skill 05)**
|
||||
- Goal: Generate actionable `tasks.md` from the plan.
|
||||
- Action: Read and execute `.agents/skills/speckit.tasks/SKILL.md`.
|
||||
- Action: Read and execute `.agents/skills/speckit-tasks/SKILL.md`.
|
||||
|
||||
5. **Step 5: Analyze (Skill 06)**
|
||||
- Goal: Validate consistency across all design artifacts (spec, plan, tasks).
|
||||
- Action: Read and execute `.agents/skills/speckit.analyze/SKILL.md`.
|
||||
- Action: Read and execute `.agents/skills/speckit-analyze/SKILL.md`.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Generate a custom checklist for the current feature based on user r
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.checklist/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-checklist/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Compare two versions of a spec or plan to highlight changes.
|
||||
- The user has provided an input prompt (optional file paths or version references).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.diff/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-diff/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Migrate existing projects into the speckit structure by generating
|
||||
- The user has provided an input prompt (path to analyze, feature name).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.migrate/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-migrate/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Challenge the specification with Socratic questioning to identify l
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.quizme/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-quizme/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Display a dashboard showing feature status, completion percentage,
|
||||
- The user may optionally specify a feature to focus on.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit.status/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-status/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Convert existing tasks into actionable, dependency-ordered issues on Gitea for the current feature.
|
||||
---
|
||||
|
||||
# Workflow: speckit.taskstoissues
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user may pass filters (e.g., phase, priority). Default: convert all pending tasks.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-taskstoissues/SKILL.md`
|
||||
- Also load `.agents/skills/_LCBP3-CONTEXT.md` for project conventions (labels, commit format).
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Use Gitea API (not GitHub) — target `git.np-dms.work/np-dms/lcbp3`.
|
||||
- Apply LCBP3 labels: `spec`, `adr`, `security`, `ux`, `backend`, `frontend`, `schema`, etc.
|
||||
- Use commit-format-compatible issue titles (per `specs/05-Engineering-Guidelines/05-05-git-conventions.md`).
|
||||
|
||||
4. **On Error**:
|
||||
- If `tasks.md` missing: Run `/05-speckit.tasks` first
|
||||
- If Gitea credentials missing: Report to user and provide manual issue-creation template
|
||||
@@ -1,8 +1,9 @@
|
||||
# NAP-DMS Project Context & Rules
|
||||
|
||||
- For: Windsurf Cascade (and compatible: Codex CLI, opencode, Amp, Antigravity, AGENTS.md tools)
|
||||
- Version: 1.8.7 | Last synced from repo: 2026-04-14
|
||||
- Version: 1.8.9 | Last synced from repo: 2026-04-22
|
||||
- Repo: [https://git.np-dms.work/np-dms/lcbp3](https://git.np-dms.work/np-dms/lcbp3)
|
||||
- Skill pack: `.agents/skills/` (v1.8.9, 20 skills) — see [`skills/README.md`](./.agents/skills/README.md) + [`skills/_LCBP3-CONTEXT.md`](./.agents/skills/_LCBP3-CONTEXT.md)
|
||||
|
||||
---
|
||||
|
||||
@@ -380,26 +381,30 @@ This file is a **quick reference**. For detailed information:
|
||||
|
||||
- **Architecture:** `specs/02-architecture/`
|
||||
- **Requirements:** `specs/01-requirements/`
|
||||
- **Data & Storage:** `specs/03-Data-and-Storage/`
|
||||
- **Data & Storage:** `specs/03-Data-and-Storage/` (canonical schema + `deltas/` incremental SQL per ADR-009)
|
||||
- **Engineering Guidelines:** `specs/05-Engineering-Guidelines/`
|
||||
- **Decision Records:** `specs/06-Decision-Records/`
|
||||
- **Infrastructure:** `specs/04-Infrastructure-OPS/`
|
||||
- **Agent Skill Pack:** `.agents/skills/` (NestJS/Next.js rules + 18 Speckit workflow skills)
|
||||
- **Helper Scripts:** `.agents/scripts/{bash,powershell}/` (audit, validate, prerequisites, setup-plan)
|
||||
|
||||
---
|
||||
|
||||
## 🔄 Change Log
|
||||
|
||||
| Version | Date | Changes | Updated By |
|
||||
| ------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -------------- |
|
||||
| 1.8.7 | 2026-04-14 | + ADR-021 Workflow Context integration, + ADR-021 Integration Work tier, + Transmittal/Circulation context triggers, updated ADR-020 status | Windsurf AI |
|
||||
| 1.8.6 | 2026-04-10 | + DMS Workflow Engine Protocol, + Security & Integrity Audit Protocol, + 2 Context-Aware Triggers, ADR Status column, Forbidden Why column | Human Dev |
|
||||
| 1.8.5 | 2026-04-04 | Added ADR-007 error handling, ADR-020 AI integration, updated security rules | Windsurf AI |
|
||||
| 1.8.4 | 2026-03-24 | Phase 5.4→✅ DONE, Tailwind 3.4.3, ADR count(16), MariaDB UUID note | Windsurf AI |
|
||||
| 1.8.3 | 2026-03-21 | + Rule Enforcement Tiers (🔴🟡🟢), + Tiered Development Flow | Human Dev + AI |
|
||||
| 1.8.2 | 2026-03-21 | + Context Triggers, + Code Snippets, + Error Handling, + i18n | Human Dev + AI |
|
||||
| 1.8.1 | 2026-03-21 | + ADR-019 UUID patterns, + Phase 5.4 pending files | Claude Sonnet |
|
||||
| 1.8.0 | 2026-03-19 | + Security overrides, + UAT criteria reference | Human Dev |
|
||||
| 1.7.2 | 2026-03-15 | + AI Boundary rules (ADR-018) | Gemini Pro |
|
||||
| Version | Date | Changes | Updated By |
|
||||
| ------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- |
|
||||
| 1.8.9 | 2026-04-22 | `.agents/skills/` LCBP3-native rebuild (20 skills @ v1.8.9) + `_LCBP3-CONTEXT.md` appendix + `specs/03-Data-and-Storage/deltas/` + AGENTS.md sync | Windsurf AI |
|
||||
| 1.8.8 | 2026-04-14 | Workflow attachments (ADR-021) + step-attachment envelope fields | Windsurf AI |
|
||||
| 1.8.7 | 2026-04-14 | + ADR-021 Workflow Context integration, + ADR-021 Integration Work tier, + Transmittal/Circulation context triggers, updated ADR-020 status | Windsurf AI |
|
||||
| 1.8.6 | 2026-04-10 | + DMS Workflow Engine Protocol, + Security & Integrity Audit Protocol, + 2 Context-Aware Triggers, ADR Status column, Forbidden Why column | Human Dev |
|
||||
| 1.8.5 | 2026-04-04 | Added ADR-007 error handling, ADR-020 AI integration, updated security rules | Windsurf AI |
|
||||
| 1.8.4 | 2026-03-24 | Phase 5.4→✅ DONE, Tailwind 3.4.3, ADR count(16), MariaDB UUID note | Windsurf AI |
|
||||
| 1.8.3 | 2026-03-21 | + Rule Enforcement Tiers (🔴🟡🟢), + Tiered Development Flow | Human Dev + AI |
|
||||
| 1.8.2 | 2026-03-21 | + Context Triggers, + Code Snippets, + Error Handling, + i18n | Human Dev + AI |
|
||||
| 1.8.1 | 2026-03-21 | + ADR-019 UUID patterns, + Phase 5.4 pending files | Claude Sonnet |
|
||||
| 1.8.0 | 2026-03-19 | + Security overrides, + UAT criteria reference | Human Dev |
|
||||
| 1.7.2 | 2026-03-15 | + AI Boundary rules (ADR-018) | Gemini Pro |
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,5 +1,91 @@
|
||||
# Version History
|
||||
|
||||
## 1.8.9 (2026-04-18)
|
||||
|
||||
### chore(infra): Docker Compose security hardening — 27 findings (C1–S4) addressed
|
||||
|
||||
#### Summary
|
||||
|
||||
Full security audit and hardening of the production Docker Compose stacks on QNAP and ASUSTOR. 27 findings resolved across 4 phases (Critical / High / Medium / Low + Suggestions), 11 compose files modified, 12 new files created, **zero secrets remain committed**. See `specs/04-Infrastructure-OPS/04-00-docker-compose/SECURITY-MIGRATION-v1.8.6.md` for the complete runbook.
|
||||
|
||||
#### **Phase 1 — Critical (C1–C6) + H6**
|
||||
|
||||
- **C1**: Extracted all secrets from `.env.template` and inline `environment:` blocks → `env_file: .env` + `${VAR:?...}` substitution with `CHANGE_ME_*` placeholders
|
||||
- **C2**: Split `JWT_SECRET` (backend-only) from `AUTH_SECRET` (Next.js NextAuth) — no more identical values
|
||||
- **C3**: Redis enforced `--requirepass $REDIS_PASSWORD` on the server (not just client env)
|
||||
- **C4**: Elasticsearch bound to internal `lcbp3` network only, removed LAN `ports:` exposure
|
||||
- **C5**: MariaDB root and app user split; host loopback bind; `MARIADB_RANDOM_ROOT_PASSWORD` fallback documented
|
||||
- **C6**: ClamAV service added upstream of backend file uploads (ADR-016)
|
||||
- **H6**: Renamed deprecated `QNAP/service/docker-compse.yml` → `docker-compose.yml`
|
||||
|
||||
#### **Phase 2 — High (H1–H5, H7)**
|
||||
|
||||
- **H1**: Backend-only env verified (no `JWT_REFRESH_SECRET` leakage to frontend)
|
||||
- **H2**: n8n + n8n-db secrets moved to `${N8N_DB_PASSWORD}` / `${N8N_ENCRYPTION_KEY}`
|
||||
- **H3**: Removed `/var/run/docker.sock` mount on n8n; added `tecnativa/docker-socket-proxy` (read-only `CONTAINERS/IMAGES/INFO/VERSION` only); n8n uses `DOCKER_HOST=tcp://docker-socket-proxy:2375`
|
||||
- **H4**: ASUSTOR cAdvisor port mapping corrected to `8088:8080`
|
||||
- **H5**: QNAP exporters use `expose:` only (no host ports); resource limits + healthchecks applied
|
||||
- **H7**: All `:latest` tags pinned to verified semver: `gitea:1.22.3-rootless`, `n8n:1.66.0`, `tika:2.9.2.1-full`, `postgres:16.4-alpine`, `mongo:7.0.14`, `rocket.chat:6.10.5`, `nginx-proxy-manager:2.11.3`, `registry-ui:2.5.7`, `act_runner:0.2.11`, `node-exporter:v1.8.2`, `cadvisor:v0.49.1`; app images templated `${BACKEND_IMAGE_TAG:-latest}` / `${FRONTEND_IMAGE_TAG:-latest}` for CI
|
||||
|
||||
#### **Phase 3 — Medium (M1–M9)**
|
||||
|
||||
- **M1**: Removed obsolete `version:` keys from remaining compose files
|
||||
- **M2**: Healthchecks added to `mongodb` (authed mongosh ping), `rocketchat` (`/api/info`), `tika` (`/tika`), `landing`, `registry-ui`, `npm`, `gitea`, `docker-socket-proxy`
|
||||
- **M3**: Resource `reservations` + `limits` filled in on all services
|
||||
- **M4**: Backend / Frontend / ClamAV hardened — `security_opt: [no-new-privileges:true]`, `cap_drop: [ALL]`, `read_only: true` + `tmpfs`, non-root `user:` (`node` / `nextjs`)
|
||||
- **M5**: Elasticsearch `ulimits.memlock: -1` verified (Phase 1)
|
||||
- **M6**: Docker Registry enforces `REGISTRY_AUTH=htpasswd` with mounted `/auth/htpasswd`
|
||||
- **M7**: phpMyAdmin host port `89:80` removed → `expose: 80` only (access via NPM)
|
||||
- **M8**: MongoDB runs with `--auth --keyFile=/etc/mongo/keyfile`; `mongo-init-replica` creates root + limited `rocketchat` user; RocketChat uses authenticated `MONGO_URL` / `MONGO_OPLOG_URL`
|
||||
- **M9**: `x-restart` / `x-logging` anchors applied uniformly
|
||||
|
||||
#### **Phase 4 — Low + Suggestions (L1–L5 + S1–S4)**
|
||||
|
||||
- **L1**: Removed `stdin_open: true` + `tty: true` from all production services
|
||||
- **L2**: Filename strategy documented; existing `docker-compose-*.yml` names kept to not break ops scripts
|
||||
- **L3**: Stale `v1_7_0` / `v1_8_0` version markers bumped to `v1.8.6` (stack-internal)
|
||||
- **L4**: Trimmed ~50 lines of legacy ACL/ops comments from `npm` and `gitea` compose files
|
||||
- **L5**: Documented promtail `user: '0:0'` requirement (reads `/var/lib/docker/containers` read-only)
|
||||
- **S1**: Secret-manager roadmap added (Docker Swarm secrets → Infisical/Vault → SOPS)
|
||||
- **S2**: Created `x-base.yml` with shared YAML anchors for Compose V2.20+ `include:`
|
||||
- **S3**: Per-stack `.env.example` created for 9 stacks (app, service, mariadb, npm, n8n, gitea, rocketchat, ASUSTOR monitoring, ASUSTOR registry)
|
||||
- **S4**: ClamAV scan service already delivered in C6 ✓
|
||||
|
||||
#### **New Documentation**
|
||||
|
||||
- `specs/04-Infrastructure-OPS/04-00-docker-compose/README.md` — stack overview + secret roadmap
|
||||
- `specs/04-Infrastructure-OPS/04-00-docker-compose/SECURITY-MIGRATION-v1.8.6.md` — full migration runbook (Phase 1–4 verification checklists, MongoDB keyfile + Registry htpasswd ops steps, breaking-change notices)
|
||||
- `specs/04-Infrastructure-OPS/04-00-docker-compose/x-base.yml` — shared anchors
|
||||
|
||||
#### **Ops Actions Required (Post-Merge)**
|
||||
|
||||
1. **Rotate** every secret that ever appeared in git history (JWT, DB, Redis, Grafana, n8n, Mongo, Registry)
|
||||
2. Populate per-stack `.env` files on QNAP/ASUSTOR from the new `.env.example` + root `.env.template`
|
||||
3. Generate MongoDB keyfile: `openssl rand -base64 756 > /share/np-dms/rocketchat/mongo-keyfile && chmod 400 && chown 999:999`
|
||||
4. Generate Registry htpasswd: `docker run --rm --entrypoint htpasswd httpd:2 -Bbn $USER $PASS > /volume1/np-dms/registry/auth/htpasswd`
|
||||
5. `ALTER USER 'n8n'@'%' IDENTIFIED BY '<new>';` in MariaDB before recreating n8n-db container
|
||||
6. Update CI pipelines to pass `BACKEND_IMAGE_TAG=$GITHUB_SHA` / `FRONTEND_IMAGE_TAG=$GITHUB_SHA`
|
||||
7. Verify backend/frontend work under `read_only: true` (tmpfs covers `/tmp`, `/app/.next/cache`)
|
||||
|
||||
#### **Breaking Changes**
|
||||
|
||||
- **MongoDB**: requires keyfile + data migration (`mongodump` → wipe → `mongorestore` with new auth) before restart
|
||||
- **Frontend `read_only`**: Next.js image must not write outside `/tmp` or `/app/.next/cache`
|
||||
- **Backend `user: node`**: image must have `node` user with write access to `/app/logs`
|
||||
- **Registry auth**: existing CI runners need new credentials; pushes fail with 401 otherwise
|
||||
- **phpMyAdmin**: direct-port `:89` users must switch to `https://pma.np-dms.work` via NPM
|
||||
|
||||
#### **Files Modified**
|
||||
|
||||
`QNAP/app/docker-compose-app.yml`, `QNAP/mariadb/docker-compose-lcbp3-db.yml`, `QNAP/service/docker-compose.yml`, `QNAP/npm/docker-compose.yml`, `QNAP/gitea/docker-compose.yml`, `QNAP/n8n/docker-compose.yml`, `QNAP/rocketchat/docker-compose.yml`, `QNAP/monitoring/docker-compose.yml`, `ASUSTOR/registry/docker-compose.yml`, `ASUSTOR/gitea-runner/docker-compose.yml`, `ASUSTOR/monitoring/docker-compose.yml`
|
||||
|
||||
#### **Root/Docs Updates**
|
||||
|
||||
- `README.md` — version badge 1.8.9, added "Infrastructure" row + Roadmap entry
|
||||
- `CONTRIBUTING.md` — version history table + compose folder entry
|
||||
- `specs/README.md` — version bump, added Infra Hardening to Critical Files table
|
||||
- `specs/04-Infrastructure-OPS/README.md` — refreshed with hardened stack layout + new Guiding Principles (§5 Secret Hygiene, §6 Container Hardening)
|
||||
|
||||
## 1.8.8 (2026-04-14)
|
||||
|
||||
### feat(workflow): ADR-021 Integrated Workflow Context & Step-specific Attachments
|
||||
|
||||
+57
-7
@@ -58,10 +58,14 @@ specs/
|
||||
│ ├── lcbp3-v1.8.0-seed-basic.sql # Master Data Seed
|
||||
│ ├── lcbp3-v1.8.0-seed-permissions.sql # RBAC Permissions Seed
|
||||
│ ├── 03-01-data-dictionary.md
|
||||
│ └── 03-06-migration-business-scope.md # Gap 7: Migration Scope [★ NEW]
|
||||
│ ├── 03-06-migration-business-scope.md # Gap 7: Migration Scope [★ NEW]
|
||||
│ └── deltas/ # Incremental SQL (ADR-009) [★ v1.8.9]
|
||||
│
|
||||
├── 04-Infrastructure-OPS/ # Deployment & Operations (8 docs)
|
||||
├── 04-Infrastructure-OPS/ # Deployment & Operations (9 docs)
|
||||
│ ├── README.md
|
||||
│ ├── 04-00-docker-compose/ # 🔒 Live compose stacks [★ v1.8.9 hardened]
|
||||
│ │ ├── SECURITY-MIGRATION-v1.8.6.md # 27-finding hardening runbook
|
||||
│ │ └── README.md # Stack overview + secret roadmap
|
||||
│ ├── 04-01-docker-compose.md
|
||||
│ ├── 04-03-monitoring.md
|
||||
│ ├── 04-04-deployment-guide.md
|
||||
@@ -550,14 +554,16 @@ graph LR
|
||||
| ------- | ---------- | ---------- | ----------------------------------------------------------------- |
|
||||
| 1.0.0 | 2025-01-15 | John Doe | Initial version |
|
||||
| 1.1.0 | 2025-02-20 | Jane Smith | Add CC support |
|
||||
| 1.8.7 | 2026-04-14 | Tech Lead | ADR-021 integration complete (22 ADRs), workflow context features |
|
||||
| 1.8.5 | 2026-04-10 | Tech Lead | ADR registry complete (21 ADRs), spec documentation updates |
|
||||
| 1.8.1 | 2026-03-21 | Tech Lead | Security hardening, numbering fixes, dependency updates |
|
||||
| 1.8.5 | 2026-04-10 | Tech Lead | ADR registry complete (21 ADRs), spec documentation updates |
|
||||
| 1.8.7 | 2026-04-14 | Tech Lead | ADR-021 integration complete (22 ADRs), workflow context features |
|
||||
| 1.8.8 | 2026-04-14 | Tech Lead | Step-specific attachments, IntegratedBanner, WorkflowLifecycle |
|
||||
| 1.8.9 | 2026-04-18 | Tech Lead | Docker Compose hardening — 27 findings (C1–S4) addressed |
|
||||
|
||||
**Current Version**: 1.8.7
|
||||
**Current Version**: 1.8.9
|
||||
**Status**: Approved
|
||||
**Last Updated**: 2026-04-14
|
||||
**Security**: 0 vulnerabilities (backend)
|
||||
**Last Updated**: 2026-04-18
|
||||
**Security**: 0 vulnerabilities (backend) + Compose stack hardened (27 findings → 0)
|
||||
**Workflow Engine**: ADR-021 Integrated Context complete
|
||||
```
|
||||
|
||||
@@ -708,6 +714,50 @@ Create `.markdownlint.json`:
|
||||
|
||||
---
|
||||
|
||||
## 🤖 AI-Assisted Contributions
|
||||
|
||||
โปรเจกต์นี้รองรับ AI agents (Windsurf Cascade, Codex CLI, opencode, Amp, Antigravity) ในการเขียน / review / refactor โค้ด — ผ่านคู่มือกลางคือ [`AGENTS.md`](./AGENTS.md) และชุดทักษะใน [`.agents/skills/`](./.agents/skills/)
|
||||
|
||||
### Canonical Rule Sources (อ่านตามลำดับนี้)
|
||||
|
||||
1. **[`AGENTS.md`](./AGENTS.md)** — quick-reference rules + change log (supersedes legacy `GEMINI.md`)
|
||||
2. **[`.agents/skills/_LCBP3-CONTEXT.md`](./.agents/skills/_LCBP3-CONTEXT.md)** — shared context loaded by every speckit-\* skill
|
||||
3. **[`.agents/skills/README.md`](./.agents/skills/README.md)** — skill-pack layout + Windsurf invocation guide
|
||||
4. `specs/06-Decision-Records/` (โดยเฉพาะ ADR-019 — UUID **March 2026 pattern**)
|
||||
5. `specs/05-Engineering-Guidelines/` (backend / frontend / testing / i18n / git conventions)
|
||||
|
||||
### Invocation (Windsurf)
|
||||
|
||||
ใช้ slash commands ด้านล่าง — `.windsurf/workflows/*.md` ห่อหุ้ม [`.agents/skills/speckit-*`](./.agents/skills/) ไว้ให้:
|
||||
|
||||
- `/02-speckit.specify` → spec.md
|
||||
- `/04-speckit.plan` → plan.md + data-model.md + contracts/
|
||||
- `/05-speckit.tasks` → tasks.md
|
||||
- `/07-speckit.implement` → execute tasks (with Ironclad Anti-Regression Protocols)
|
||||
- `/10-speckit.reviewer` → code review (Tier 1/2/3 classification)
|
||||
- `/12-speckit.security-audit` → OWASP + CASL + LCBP3-specific
|
||||
|
||||
### Health Checks
|
||||
|
||||
```bash
|
||||
# Version + frontmatter consistency
|
||||
bash ./.agents/scripts/bash/validate-versions.sh
|
||||
pwsh ./.agents/scripts/powershell/validate-versions.ps1
|
||||
|
||||
# Full skill audit (20 skills)
|
||||
bash ./.agents/scripts/bash/audit-skills.sh
|
||||
pwsh ./.agents/scripts/powershell/audit-skills.ps1
|
||||
```
|
||||
|
||||
### 🔴 Tier 1 Non-Negotiables (AI must enforce)
|
||||
|
||||
- **ADR-019 UUID** — `publicId` exposed directly; ห้าม `parseInt`/`Number`/`+` บน UUID; ห้าม `id ?? ''` fallback; ห้ามใช้ `@Expose({ name: 'id' })` rename
|
||||
- **ADR-009 Schema** — แก้ `lcbp3-v1.8.0-schema-02-tables.sql` โดยตรง + เพิ่ม delta ที่ `specs/03-Data-and-Storage/deltas/`; ห้าม TypeORM migrations
|
||||
- **ADR-016 Security** — CASL + `Idempotency-Key` + ClamAV two-phase upload
|
||||
- **ADR-018/020 AI Boundary** — Ollama on Admin Desktop only; human-in-the-loop validation
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Quality Standards
|
||||
|
||||
### Definition of Done (DoD) สำหรับ Spec Changes
|
||||
|
||||
@@ -3,27 +3,30 @@
|
||||
> **Laem Chabang Port Phase 3 - Document Management System**
|
||||
> ระบบบริหารจัดการเอกสารโครงการแบบครบวงจร สำหรับโครงการก่อสร้างท่าเรือแหลมฉบังระยะที่ 3
|
||||
|
||||
[](./CHANGELOG.md)
|
||||
[](./CHANGELOG.md)
|
||||
[]()
|
||||
[]()
|
||||
[](./specs/00-Overview/README.md)
|
||||
|
||||
---
|
||||
|
||||
## 📈 Current Status (As of 2026-04-14)
|
||||
## 📈 Current Status (As of 2026-04-18)
|
||||
|
||||
**Version 1.8.7 — ADR-021 Integration Complete, Production Ready (22 ADRs)**
|
||||
**Version 1.8.9 — Infrastructure Hardening Complete (27 findings → 0)**
|
||||
|
||||
| Area | Status | หมายเหตุ |
|
||||
| ---------------------- | ------------------------ | -------------------------------------------------- |
|
||||
| 🔧 **Backend** | ✅ Production Ready | NestJS 11, Express v5, 0 Vulnerabilities |
|
||||
| 🎨 **Frontend** | ✅ 100% Complete | Next.js 16.2.0, React 19.2.4, ESLint 9 |
|
||||
| 💾 **Database** | ✅ Schema v1.8.0 Stable | MariaDB 11.8, No-migration Policy |
|
||||
| 📘 **Documentation** | ✅ **10/10 Gaps Closed** | Product Vision → Release Policy |
|
||||
| 🤖 **AI Migration** | 🔄 Pre-migration Setup | n8n + Ollama (ADR-017/018) |
|
||||
| 🔄 **Workflow Engine** | ✅ ADR-021 Integrated | Transmittals & Circulation with Integrated Context |
|
||||
| 🧪 **Testing** | 🔄 UAT Preparation | E2E + Acceptance Criteria ready |
|
||||
| 🚀 **Deployment** | 📋 Pending Go-Live Gate | Blue-Green on QNAP Container Station |
|
||||
> v1.8.7 (ADR-021 Integration) + v1.8.8 (Workflow Attachments) shipped Apr 14; v1.8.9 (Compose stack hardening) shipped Apr 18.
|
||||
|
||||
| Area | Status | หมายเหตุ |
|
||||
| ---------------------- | ------------------------ | ------------------------------------------------------------------ |
|
||||
| 🔧 **Backend** | ✅ Production Ready | NestJS 11, Express v5, 0 Vulnerabilities |
|
||||
| 🎨 **Frontend** | ✅ 100% Complete | Next.js 16.2.0, React 19.2.4, ESLint 9 |
|
||||
| 💾 **Database** | ✅ Schema v1.8.0 Stable | MariaDB 11.8, No-migration Policy |
|
||||
| 📘 **Documentation** | ✅ **10/10 Gaps Closed** | Product Vision → Release Policy |
|
||||
| 🤖 **AI Migration** | 🔄 Pre-migration Setup | n8n + Ollama (ADR-017/018) |
|
||||
| 🔄 **Workflow Engine** | ✅ ADR-021 Integrated | Transmittals & Circulation with Integrated Context |
|
||||
| 🧪 **Testing** | 🔄 UAT Preparation | E2E + Acceptance Criteria ready |
|
||||
| 🚀 **Deployment** | 📋 Pending Go-Live Gate | Blue-Green on QNAP Container Station |
|
||||
| 🔒 **Infrastructure** | ✅ Hardened (v1.8.9) | Compose stacks audited; secrets, auth, container hardening applied |
|
||||
|
||||
---
|
||||
|
||||
@@ -322,9 +325,9 @@ lcbp3-dms/
|
||||
├── .vscode/ # VS Code settings and extensions
|
||||
├── .husky/ # Git hooks
|
||||
│
|
||||
├── AGENTS.md # AI agent rules & project context
|
||||
├── GEMINI.md # AI coding guidelines
|
||||
├── CONTRIBUTING.md # Contribution guidelines
|
||||
├── AGENTS.md # AI agent rules & project context (v1.8.9) [★ primary]
|
||||
├── GEMINI.md # AI coding guidelines [legacy — kept for backward compat]
|
||||
├── CONTRIBUTING.md # Contribution guidelines (+ AI-Assisted section)
|
||||
├── CHANGELOG.md # Version history
|
||||
├── README.md # This file
|
||||
├── package.json # Root package.json (monorepo)
|
||||
@@ -627,13 +630,14 @@ pnpm test:e2e # Playwright E2E
|
||||
|
||||
### Security Features
|
||||
|
||||
- ✅ **JWT Authentication** - Access & Refresh Tokens
|
||||
- ✅ **JWT Authentication** - Access & Refresh Tokens (separate `AUTH_SECRET`)
|
||||
- ✅ **RBAC 4-Level** - Global, Organization, Project, Contract
|
||||
- ✅ **Rate Limiting** - ป้องกัน Brute-force
|
||||
- ✅ **Virus Scanning** - ClamAV สำหรับไฟล์ที่อัปโหลด
|
||||
- ✅ **Virus Scanning** - ClamAV สำหรับไฟล์ที่อัปโหลด (mandatory)
|
||||
- ✅ **Input Validation** - ป้องกัน SQL Injection, XSS, CSRF
|
||||
- ✅ **Idempotency** - ป้องกันการทำรายการซ้ำ
|
||||
- ✅ **Audit Logging** - บันทึกการกระทำทั้งหมด
|
||||
- ✅ **Container Hardening (v1.8.9)** - `read_only`, `cap_drop: [ALL]`, `no-new-privileges`, non-root `user:`, pinned image tags, MongoDB + Registry auth
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
@@ -735,6 +739,22 @@ docker-compose -f docker-compose.yml up -d
|
||||
- Development Process
|
||||
- Pull Request Process
|
||||
- Coding Standards
|
||||
- **AI-Assisted Contributions** (AGENTS.md + `.agents/skills/` skill pack + Windsurf slash commands)
|
||||
|
||||
### 🤖 For AI Agents (Windsurf Cascade, Codex CLI, opencode, Amp, Antigravity)
|
||||
|
||||
ไฟล์กลางสำหรับ AI assistants:
|
||||
|
||||
| Priority | File | Purpose |
|
||||
| -------- | ------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------- |
|
||||
| 1 | [`AGENTS.md`](./AGENTS.md) | Quick-reference rules (Tier 1/2/3 enforcement, ADR-019 March 2026 pattern, forbidden actions) |
|
||||
| 2 | [`.agents/skills/_LCBP3-CONTEXT.md`](./.agents/skills/_LCBP3-CONTEXT.md) | Shared context appendix injected into every speckit-\* skill |
|
||||
| 3 | [`.agents/skills/README.md`](./.agents/skills/README.md) | Skill-pack layout + slash-command invocation guide |
|
||||
| 4 | `specs/06-Decision-Records/` | 22 ADRs (architectural decisions) |
|
||||
|
||||
**Slash commands:** `/02-speckit.specify` → `/04-speckit.plan` → `/05-speckit.tasks` → `/07-speckit.implement` → `/10-speckit.reviewer` → `/12-speckit.security-audit`
|
||||
|
||||
**Health checks:** `bash ./.agents/scripts/bash/audit-skills.sh` or `pwsh ./.agents/scripts/powershell/audit-skills.ps1`
|
||||
|
||||
---
|
||||
|
||||
@@ -765,6 +785,28 @@ This project is **Internal Use Only** - ลิขสิทธิ์เป็น
|
||||
|
||||
## 🗺️ Roadmap
|
||||
|
||||
### ✅ Version 1.8.9 (Apr 2026) — Infrastructure Hardening + Agent Skill Pack Rebuild
|
||||
|
||||
**Agent skill pack rebuilt (`.agents/skills/` @ v1.8.9) — 2026-04-22:**
|
||||
|
||||
- ✅ 20 skills standardized (2 best-practices + 18 speckit-\*) — shared `_LCBP3-CONTEXT.md` appendix
|
||||
- ✅ ADR-019 drift removed: `publicId` exposed directly (no `@Expose({ name: 'id' })` rename); `id ?? ''` fallback eliminated
|
||||
- ✅ Dead references cleaned: `GEMINI.md` → `AGENTS.md`; `.specify/memory/` → `AGENTS.md`; `v1.7.0` → `v1.8.0` schema
|
||||
- ✅ New rules: workflow-engine (ADR-001/002/021), file-two-phase-upload (ADR-016), ai-boundary (ADR-018/020), no-typeorm-migrations (ADR-009), i18n, two-phase-upload (frontend)
|
||||
- ✅ `.windsurf/workflows/` path fixes (18 files) + 2 new wrappers (`12-speckit.security-audit`, `util-speckit.taskstoissues`)
|
||||
- ✅ `specs/03-Data-and-Storage/deltas/` directory bootstrapped (ADR-009 incremental SQL)
|
||||
- ✅ Regenerated `nestjs-best-practices/AGENTS.md` (188KB, 45 rules × 11 categories incl. LCBP3 project-specific)
|
||||
- ✅ Helper scripts fixed (bash + pwsh): BASE_DIR, CRLF, color enum, version extraction
|
||||
|
||||
**Docker Compose stacks fully hardened — 27 findings across 4 phases:**
|
||||
|
||||
- ✅ **Phase 1 (C1–C6 + H6):** Secrets extracted to `env_file`; JWT_SECRET/AUTH_SECRET split; Redis `--requirepass`; Elasticsearch internal-only; MariaDB root/app user split; ClamAV service added; filename typo fixed
|
||||
- ✅ **Phase 2 (H1–H5, H7):** n8n docker-socket-proxy (read-only); ASUSTOR cAdvisor port fix; QNAP exporters expose-only; all `:latest` tags pinned to verified semver
|
||||
- ✅ **Phase 3 (M1–M9):** Healthchecks + resource limits on all services; backend/frontend `read_only` + `cap_drop: [ALL]` + non-root `user`; MongoDB `--auth --keyFile`; Registry htpasswd auth; phpMyAdmin via NPM only
|
||||
- ✅ **Phase 4 (L1–L5 + S1–S4):** Removed `stdin_open`/`tty` from production services; trimmed legacy comments; shared `x-base.yml` anchors; per-stack `.env.example`; secret-manager roadmap (Swarm / Infisical / SOPS)
|
||||
|
||||
**New files:** `specs/04-Infrastructure-OPS/04-00-docker-compose/README.md`, `SECURITY-MIGRATION-v1.8.6.md`, `x-base.yml`, 9 per-stack `.env.example` files.
|
||||
|
||||
### ✅ Version 1.8.7 (Apr 2026) — ADR-021 Integration Complete
|
||||
|
||||
- ✅ ADR-021 (Integrated Workflow Context) — Transmittals & Circulation workflow integration
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
# ========================================
|
||||
# LCBP3 Backend — Environment Variables
|
||||
# Copy to .env and fill in real values
|
||||
# หมายเหตุ: ค่า DB_PASSWORD, REDIS_PASSWORD,
|
||||
# ELASTICSEARCH_PASSWORD ต้องตรงกับที่ตั้งไว้ใน
|
||||
# services stack (MariaDB/Redis/Elasticsearch
|
||||
# ดูจาก .env ของ services stack ที่รันอยู่แล้วบน QNAP
|
||||
# ========================================
|
||||
|
||||
# Database
|
||||
DB_HOST=localhost
|
||||
DB_PORT=3306
|
||||
DB_USERNAME=admin
|
||||
DB_PASSWORD=Center2025
|
||||
DB_DATABASE=lcbp3_dev
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=16379
|
||||
REDIS_PASSWORD=Center2025
|
||||
|
||||
# JWT
|
||||
JWT_SECRET=change-me-in-production
|
||||
JWT_EXPIRES_IN=7d
|
||||
|
||||
# File Storage
|
||||
UPLOAD_DEST=./uploads
|
||||
MAX_FILE_SIZE=52428800
|
||||
|
||||
# ClamAV
|
||||
CLAMAV_HOST=localhost
|
||||
CLAMAV_PORT=3310
|
||||
|
||||
# ========================================
|
||||
# ADR-022 RAG — Retrieval-Augmented Generation
|
||||
# ========================================
|
||||
|
||||
# Qdrant vector store (local docker-compose or QNAP)
|
||||
QDRANT_URL=http://localhost:6333
|
||||
|
||||
# Ollama (Admin Desktop Desk-5439 — ADR-018 AI boundary)
|
||||
OLLAMA_EMBED_MODEL=nomic-embed-text
|
||||
OLLAMA_RAG_MODEL=gemma3:12b
|
||||
OLLAMA_URL=http://192.168.10.100:11434
|
||||
|
||||
# Thai preprocessing microservice (PyThaiNLP — Admin Desktop)
|
||||
THAI_PREPROCESS_URL=http://192.168.10.100:8765
|
||||
|
||||
# Typhoon API (cloud LLM — PUBLIC/INTERNAL only, never CONFIDENTIAL)
|
||||
TYPHOON_API_KEY=your-typhoon-api-key-here
|
||||
TYPHOON_API_URL=https://api.opentyphoon.ai/v1
|
||||
|
||||
# RAG query config
|
||||
RAG_TOPK=20
|
||||
RAG_FINAL_K=5
|
||||
RAG_TIMEOUT_MS=5000
|
||||
RAG_QUERY_CACHE_TTL=300
|
||||
@@ -54,10 +54,24 @@ services:
|
||||
- esdata:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- lcbp3-net
|
||||
# ADR-022 RAG: Qdrant vector store (tiered multitenancy for project isolation)
|
||||
qdrant:
|
||||
image: qdrant/qdrant:v1.16.1
|
||||
container_name: lcbp3-qdrant-local
|
||||
restart: always
|
||||
ports:
|
||||
- '6333:6333' # REST API
|
||||
- '6334:6334' # gRPC
|
||||
volumes:
|
||||
- qdrant_data:/qdrant/storage
|
||||
networks:
|
||||
- lcbp3-net
|
||||
|
||||
volumes:
|
||||
db_data:
|
||||
redis_data: # เพิ่ม Volume
|
||||
esdata:
|
||||
qdrant_data: # ADR-022 RAG vector store
|
||||
|
||||
networks:
|
||||
lcbp3-net:
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -24,19 +24,19 @@
|
||||
"seed": "ts-node -r tsconfig-paths/register src/database/seeds/run-seed.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@casl/ability": "^6.7.5",
|
||||
"@casl/ability": "6.8.0",
|
||||
"@elastic/elasticsearch": "^8.13.0",
|
||||
"@nestjs-modules/ioredis": "^2.0.2",
|
||||
"@nestjs/axios": "^4.0.1",
|
||||
"@nestjs/bullmq": "^11.0.4",
|
||||
"@nestjs/cache-manager": "^3.0.1",
|
||||
"@nestjs/common": "^11.0.1",
|
||||
"@nestjs/common": "^11.1.19",
|
||||
"@nestjs/config": "^4.0.2",
|
||||
"@nestjs/core": "^11.0.1",
|
||||
"@nestjs/core": "^11.1.19",
|
||||
"@nestjs/elasticsearch": "^11.1.0",
|
||||
"@nestjs/jwt": "^11.0.1",
|
||||
"@nestjs/passport": "^11.0.5",
|
||||
"@nestjs/platform-express": "^11.0.1",
|
||||
"@nestjs/platform-express": "^11.1.19",
|
||||
"@nestjs/platform-socket.io": "^11.1.9",
|
||||
"@nestjs/schedule": "^6.0.1",
|
||||
"@nestjs/swagger": "^11.2.3",
|
||||
@@ -44,12 +44,13 @@
|
||||
"@nestjs/throttler": "^6.4.0",
|
||||
"@nestjs/typeorm": "^11.0.0",
|
||||
"@nestjs/websockets": "^11.1.9",
|
||||
"@qdrant/js-client-rest": "^1.17.0",
|
||||
"@types/nodemailer": "^7.0.4",
|
||||
"@willsoto/nestjs-prometheus": "^6.0.2",
|
||||
"ajv": "^8.17.1",
|
||||
"ajv-formats": "^3.0.1",
|
||||
"async-retry": "^1.3.3",
|
||||
"axios": "^1.13.2",
|
||||
"axios": "^1.15.0",
|
||||
"bcrypt": "^6.0.0",
|
||||
"bullmq": "^5.63.2",
|
||||
"cache-manager": "^7.2.5",
|
||||
@@ -81,7 +82,7 @@
|
||||
"zod": "^4.1.13"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@compodoc/compodoc": "^1.1.32",
|
||||
"@compodoc/compodoc": "^1.1.23",
|
||||
"@eslint/eslintrc": "^3.2.0",
|
||||
"@eslint/js": "^9.18.0",
|
||||
"@nestjs/cli": "^11.0.0",
|
||||
|
||||
@@ -52,6 +52,7 @@ import { SearchModule } from './modules/search/search.module';
|
||||
import { AuditLogModule } from './modules/audit-log/audit-log.module';
|
||||
import { MigrationModule } from './modules/migration/migration.module';
|
||||
import { AiModule } from './modules/ai/ai.module';
|
||||
import { RagModule } from './modules/rag/rag.module';
|
||||
|
||||
@Module({
|
||||
imports: [
|
||||
@@ -189,6 +190,7 @@ import { AiModule } from './modules/ai/ai.module';
|
||||
AuditLogModule,
|
||||
MigrationModule,
|
||||
AiModule,
|
||||
RagModule,
|
||||
],
|
||||
controllers: [AppController],
|
||||
providers: [
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { AbilityFactory, ScopeContext } from './ability.factory';
|
||||
import { User } from '../../../modules/user/entities/user.entity';
|
||||
import { UserAssignment } from '../../../modules/user/entities/user-assignment.entity';
|
||||
import { Role } from '../../../modules/auth/entities/role.entity';
|
||||
import { Role } from '../../../modules/user/entities/role.entity';
|
||||
|
||||
describe('AbilityFactory', () => {
|
||||
let factory: AbilityFactory;
|
||||
|
||||
@@ -14,6 +14,7 @@ export enum ErrorType {
|
||||
DATABASE_ERROR = 'DATABASE_ERROR',
|
||||
EXTERNAL_SERVICE = 'EXTERNAL_SERVICE',
|
||||
INFRASTRUCTURE = 'INFRASTRUCTURE',
|
||||
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE', // 503 — ระบบไม่พร้อมให้บริการชั่วคราว (Redlock fail, Redis down)
|
||||
}
|
||||
|
||||
// ระดับความรุนแรงของ Error
|
||||
@@ -49,6 +50,8 @@ export function getStatusCode(type: ErrorType): number {
|
||||
case ErrorType.EXTERNAL_SERVICE:
|
||||
case ErrorType.INFRASTRUCTURE:
|
||||
return HttpStatus.INTERNAL_SERVER_ERROR;
|
||||
case ErrorType.SERVICE_UNAVAILABLE:
|
||||
return HttpStatus.SERVICE_UNAVAILABLE; // 503
|
||||
default:
|
||||
return HttpStatus.INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
@@ -233,3 +236,27 @@ export class DatabaseException extends BaseException {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Service Unavailable (503) - ระบบไม่พร้อมให้บริการชั่วคราว
|
||||
// ADR-021 C1: Redlock Fail-closed — retry ครบแล้ว ยัง acquire ไม่ได้
|
||||
export class ServiceUnavailableException extends BaseException {
|
||||
constructor(
|
||||
code: string,
|
||||
message: string,
|
||||
userMessage?: string,
|
||||
recoveryActions?: string[]
|
||||
) {
|
||||
super(
|
||||
ErrorType.SERVICE_UNAVAILABLE,
|
||||
code,
|
||||
message,
|
||||
userMessage || 'ระบบยุ่งชั่วคราว กรุณาลองใหม่ภายหลัง',
|
||||
ErrorSeverity.HIGH,
|
||||
undefined,
|
||||
recoveryActions || [
|
||||
'รอสักครู่แล้วลองใหม่',
|
||||
'แจ้งผู้ดูแลระบบหากยังพบปัญหา',
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ export {
|
||||
WorkflowException,
|
||||
SystemException,
|
||||
DatabaseException,
|
||||
ServiceUnavailableException,
|
||||
} from './base.exception';
|
||||
|
||||
export type { ValidationErrorDetail, ErrorPayload } from './base.exception';
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { TypeOrmModule } from '@nestjs/typeorm';
|
||||
import { BullModule } from '@nestjs/bullmq';
|
||||
import { ScheduleModule } from '@nestjs/schedule'; // ✅ Import
|
||||
import { FileStorageService } from './file-storage.service.js';
|
||||
import { FileStorageController } from './file-storage.controller.js';
|
||||
@@ -12,6 +13,7 @@ import { UserModule } from '../../modules/user/user.module';
|
||||
TypeOrmModule.forFeature([Attachment]),
|
||||
ScheduleModule.forRoot(), // ✅ เปิดใช้งาน Cron Job],
|
||||
UserModule,
|
||||
BullModule.registerQueue({ name: 'rag-ocr' }),
|
||||
],
|
||||
controllers: [FileStorageController],
|
||||
providers: [
|
||||
|
||||
@@ -4,10 +4,13 @@ import {
|
||||
NotFoundException,
|
||||
BadRequestException,
|
||||
Logger,
|
||||
Optional,
|
||||
} from '@nestjs/common';
|
||||
import { InjectRepository } from '@nestjs/typeorm';
|
||||
import { Repository, In } from 'typeorm';
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import { InjectQueue } from '@nestjs/bullmq';
|
||||
import { Queue } from 'bullmq';
|
||||
import * as fs from 'fs-extra';
|
||||
import * as path from 'path';
|
||||
import * as crypto from 'crypto';
|
||||
@@ -24,7 +27,8 @@ export class FileStorageService {
|
||||
constructor(
|
||||
@InjectRepository(Attachment)
|
||||
private attachmentRepository: Repository<Attachment>,
|
||||
private configService: ConfigService
|
||||
private configService: ConfigService,
|
||||
@Optional() @InjectQueue('rag-ocr') private readonly ragOcrQueue?: Queue
|
||||
) {
|
||||
// ใช้ env vars จาก docker-compose สำหรับ Production
|
||||
// ถ้าไม่ได้กำหนดจะ fallback เป็น ./uploads/temp และ ./uploads/permanent
|
||||
@@ -90,7 +94,18 @@ export class FileStorageService {
|
||||
*/
|
||||
async commit(
|
||||
tempIds: string[],
|
||||
options?: { issueDate?: Date; documentType?: string }
|
||||
options?: {
|
||||
issueDate?: Date;
|
||||
documentType?: string;
|
||||
ragMeta?: {
|
||||
docType: string;
|
||||
docNumber: string | null;
|
||||
revision: string | null;
|
||||
projectCode: string;
|
||||
projectPublicId: string;
|
||||
classification: 'PUBLIC' | 'INTERNAL' | 'CONFIDENTIAL';
|
||||
};
|
||||
}
|
||||
): Promise<Attachment[]> {
|
||||
if (!tempIds || tempIds.length === 0) {
|
||||
return [];
|
||||
@@ -149,7 +164,27 @@ export class FileStorageService {
|
||||
att.expiresAt = undefined; // เคลียร์วันหมดอายุ
|
||||
att.referenceDate = effectiveDate; // Save reference date
|
||||
|
||||
committedAttachments.push(await this.attachmentRepository.save(att));
|
||||
const saved = await this.attachmentRepository.save(att);
|
||||
committedAttachments.push(saved);
|
||||
|
||||
if (this.ragOcrQueue && options?.ragMeta) {
|
||||
await this.ragOcrQueue
|
||||
.add(
|
||||
'ocr',
|
||||
{
|
||||
attachmentPublicId: saved.publicId,
|
||||
filePath: saved.filePath,
|
||||
...options.ragMeta,
|
||||
},
|
||||
{ jobId: saved.publicId }
|
||||
)
|
||||
.catch((err: unknown) => {
|
||||
this.logger.error(
|
||||
`Failed to enqueue rag-ocr for ${saved.publicId}`,
|
||||
err instanceof Error ? err.stack : String(err)
|
||||
);
|
||||
});
|
||||
}
|
||||
} else {
|
||||
this.logger.error(`File missing during commit: ${oldPath}`);
|
||||
throw new NotFoundException(
|
||||
|
||||
@@ -10,10 +10,15 @@ import { of, lastValueFrom } from 'rxjs';
|
||||
import { Request } from 'express';
|
||||
import type { Socket } from 'net';
|
||||
|
||||
type MockAuditLogRepo = {
|
||||
create: jest.Mock;
|
||||
save: jest.Mock;
|
||||
};
|
||||
|
||||
describe('AuditLogInterceptor', () => {
|
||||
let interceptor: AuditLogInterceptor;
|
||||
let reflector: Reflector;
|
||||
let auditLogRepo: jest.Mocked<Partial<typeof AuditLog.prototype.constructor>>;
|
||||
let auditLogRepo: MockAuditLogRepo;
|
||||
|
||||
const createMockUser = (userId: number): User => {
|
||||
const user = new User();
|
||||
@@ -55,7 +60,7 @@ describe('AuditLogInterceptor', () => {
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
const mockRepository = {
|
||||
const mockRepository: MockAuditLogRepo = {
|
||||
create: jest.fn().mockReturnValue({}),
|
||||
save: jest.fn().mockResolvedValue({}),
|
||||
};
|
||||
@@ -78,7 +83,7 @@ describe('AuditLogInterceptor', () => {
|
||||
|
||||
interceptor = module.get<AuditLogInterceptor>(AuditLogInterceptor);
|
||||
reflector = module.get<Reflector>(Reflector);
|
||||
auditLogRepo = module.get(getRepositoryToken(AuditLog));
|
||||
auditLogRepo = module.get<MockAuditLogRepo>(getRepositoryToken(AuditLog));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
|
||||
@@ -99,7 +99,17 @@ describe('CorrespondenceController', () => {
|
||||
mockResult
|
||||
);
|
||||
|
||||
const mockReq = { user: { user_id: 1, roles: [] } };
|
||||
const mockReq = {
|
||||
user: {
|
||||
user_id: 1,
|
||||
username: 'testuser',
|
||||
password: 'hashedpassword',
|
||||
email: 'test@example.com',
|
||||
publicId: '019505a1-7c3e-7000-8000-abc123def456',
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
},
|
||||
};
|
||||
(mockCorrespondenceService.findOneByUuid as jest.Mock).mockResolvedValue({
|
||||
id: 1,
|
||||
uuid: 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11',
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { MigrationController } from './migration.controller';
|
||||
import { MigrationService } from './migration.service';
|
||||
import { ImportCorrespondenceDto } from './dto/import-correspondence.dto';
|
||||
import { User } from '../user/entities/user.entity';
|
||||
|
||||
describe('MigrationController', () => {
|
||||
let controller: MigrationController;
|
||||
@@ -32,17 +33,29 @@ describe('MigrationController', () => {
|
||||
|
||||
it('should call importCorrespondence on service', async () => {
|
||||
const dto: ImportCorrespondenceDto = {
|
||||
document_number: 'DOC-001',
|
||||
documentNumber: 'DOC-001',
|
||||
subject: 'Legacy Record',
|
||||
category: 'Correspondence',
|
||||
source_file_path: '/staging_ai/test.pdf',
|
||||
migrated_by: 'SYSTEM_IMPORT',
|
||||
batch_id: 'batch1',
|
||||
project_id: 1,
|
||||
sourceFilePath: '/staging_ai/test.pdf',
|
||||
migratedBy: 'SYSTEM_IMPORT',
|
||||
batchId: 'batch1',
|
||||
projectId: 1,
|
||||
};
|
||||
|
||||
const idempotencyKey = 'key123';
|
||||
const user = { userId: 5 };
|
||||
const user: User = {
|
||||
user_id: 5,
|
||||
username: 'testuser',
|
||||
password: 'hashedpassword',
|
||||
email: 'test@example.com',
|
||||
publicId: '019505a1-7c3e-7000-8000-abc123def456',
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
isActive: true,
|
||||
failedAttempts: 0,
|
||||
primaryOrganizationPublicId: undefined,
|
||||
generatePublicId: jest.fn(),
|
||||
};
|
||||
|
||||
const result = await controller.importCorrespondence(
|
||||
dto,
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
|
||||
import { IngestionService } from '../ingestion.service';
|
||||
|
||||
const QUEUE_TOKEN = 'BullQueue_rag-ocr';
|
||||
|
||||
const mockOcrQueue = {
|
||||
getJob: jest.fn(),
|
||||
add: jest.fn(),
|
||||
};
|
||||
|
||||
const baseJobData = {
|
||||
attachmentPublicId: 'att-uuid-001',
|
||||
filePath: '/uploads/permanent/CORR/2026/04/file.pdf',
|
||||
docType: 'CORR',
|
||||
docNumber: 'REF-001',
|
||||
revision: null,
|
||||
projectCode: 'PRJ-001',
|
||||
projectPublicId: 'proj-uuid-001',
|
||||
classification: 'INTERNAL' as const,
|
||||
};
|
||||
|
||||
describe('IngestionService', () => {
|
||||
let service: IngestionService;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
IngestionService,
|
||||
{ provide: QUEUE_TOKEN, useValue: mockOcrQueue },
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<IngestionService>(IngestionService);
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should enqueue rag-ocr job with attachmentPublicId as jobId', async () => {
|
||||
mockOcrQueue.getJob.mockResolvedValue(null);
|
||||
mockOcrQueue.add.mockResolvedValue({ id: baseJobData.attachmentPublicId });
|
||||
|
||||
await service.enqueue(baseJobData);
|
||||
|
||||
expect(mockOcrQueue.add).toHaveBeenCalledWith('ocr', baseJobData, {
|
||||
jobId: baseJobData.attachmentPublicId,
|
||||
});
|
||||
});
|
||||
|
||||
it('EC-RAG-001: duplicate enqueue when job is active → second call is no-op (log only)', async () => {
|
||||
const mockJob = { getState: jest.fn().mockResolvedValue('active') };
|
||||
mockOcrQueue.getJob.mockResolvedValue(mockJob);
|
||||
|
||||
await service.enqueue(baseJobData);
|
||||
|
||||
expect(mockOcrQueue.add).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('EC-RAG-001: duplicate enqueue when job is waiting → second call is no-op', async () => {
|
||||
const mockJob = { getState: jest.fn().mockResolvedValue('waiting') };
|
||||
mockOcrQueue.getJob.mockResolvedValue(mockJob);
|
||||
|
||||
await service.enqueue(baseJobData);
|
||||
|
||||
expect(mockOcrQueue.add).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should re-enqueue if job exists but is completed (state=completed)', async () => {
|
||||
const mockJob = { getState: jest.fn().mockResolvedValue('completed') };
|
||||
mockOcrQueue.getJob.mockResolvedValue(mockJob);
|
||||
mockOcrQueue.add.mockResolvedValue({ id: baseJobData.attachmentPublicId });
|
||||
|
||||
await service.enqueue(baseJobData);
|
||||
|
||||
expect(mockOcrQueue.add).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should re-enqueue if job exists but is failed (state=failed)', async () => {
|
||||
const mockJob = { getState: jest.fn().mockResolvedValue('failed') };
|
||||
mockOcrQueue.getJob.mockResolvedValue(mockJob);
|
||||
mockOcrQueue.add.mockResolvedValue({ id: baseJobData.attachmentPublicId });
|
||||
|
||||
await service.enqueue(baseJobData);
|
||||
|
||||
expect(mockOcrQueue.add).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,206 @@
|
||||
import { Test, TestingModule } from '@nestjs/testing';
|
||||
import { ServiceUnavailableException } from '@nestjs/common';
|
||||
import { getRepositoryToken } from '@nestjs/typeorm';
|
||||
|
||||
import { RagService } from '../rag.service';
|
||||
import { QdrantService } from '../qdrant.service';
|
||||
import { EmbeddingService } from '../embedding.service';
|
||||
import { TyphoonService } from '../typhoon.service';
|
||||
import { IngestionService } from '../ingestion.service';
|
||||
import { DocumentChunk } from '../entities/document-chunk.entity';
|
||||
|
||||
const DEFAULT_REDIS_TOKEN = 'default_IORedisModuleConnectionToken';
|
||||
|
||||
const mockQdrant = {
|
||||
isReady: jest.fn(),
|
||||
hybridSearch: jest.fn(),
|
||||
deleteByDocumentId: jest.fn(),
|
||||
};
|
||||
|
||||
const mockEmbedding = {
|
||||
embed: jest.fn(),
|
||||
};
|
||||
|
||||
const mockTyphoon = {
|
||||
generate: jest.fn(),
|
||||
sanitizeInput: jest.fn((t: string) => t),
|
||||
};
|
||||
|
||||
const mockIngestion = { enqueue: jest.fn() };
|
||||
|
||||
const mockChunkRepo = {
|
||||
count: jest.fn(),
|
||||
delete: jest.fn(),
|
||||
manager: {
|
||||
query: jest.fn(),
|
||||
},
|
||||
};
|
||||
|
||||
const mockRedis = {
|
||||
get: jest.fn(),
|
||||
setex: jest.fn(),
|
||||
};
|
||||
|
||||
describe('RagService', () => {
|
||||
let service: RagService;
|
||||
|
||||
beforeEach(async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
RagService,
|
||||
{ provide: QdrantService, useValue: mockQdrant },
|
||||
{ provide: EmbeddingService, useValue: mockEmbedding },
|
||||
{ provide: TyphoonService, useValue: mockTyphoon },
|
||||
{ provide: IngestionService, useValue: mockIngestion },
|
||||
{ provide: getRepositoryToken(DocumentChunk), useValue: mockChunkRepo },
|
||||
{ provide: DEFAULT_REDIS_TOKEN, useValue: mockRedis },
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<RagService>(RagService);
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('query()', () => {
|
||||
const dto = {
|
||||
question: 'เอกสารเกี่ยวกับอะไร?',
|
||||
projectPublicId: 'proj-uuid-1234',
|
||||
};
|
||||
const memberPerms: string[] = [];
|
||||
const adminPerms = ['system.manage_all'];
|
||||
|
||||
it('should return answer with citations on PUBLIC cache miss → write cache', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
mockRedis.get.mockResolvedValue(null);
|
||||
mockEmbedding.embed.mockResolvedValue(new Array(768).fill(0.1));
|
||||
mockQdrant.hybridSearch.mockResolvedValue([
|
||||
{
|
||||
chunkId: 'chunk-1',
|
||||
publicId: 'att-1',
|
||||
docType: 'CORR',
|
||||
docNumber: 'REF-001',
|
||||
revision: null,
|
||||
projectCode: 'PRJ-001',
|
||||
contentPreview: 'เนื้อหาเอกสาร',
|
||||
score: 0.92,
|
||||
},
|
||||
]);
|
||||
mockTyphoon.generate.mockResolvedValue({
|
||||
answer: 'คำตอบ',
|
||||
usedFallbackModel: false,
|
||||
});
|
||||
|
||||
const result = await service.query(dto, memberPerms);
|
||||
|
||||
expect(result.answer).toBe('คำตอบ');
|
||||
expect(result.citations).toHaveLength(1);
|
||||
expect(result.usedFallbackModel).toBe(false);
|
||||
expect(mockRedis.setex).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return cached result without calling Qdrant on cache hit', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
const cached = JSON.stringify({
|
||||
answer: 'cached answer',
|
||||
citations: [],
|
||||
confidence: 0.9,
|
||||
usedFallbackModel: false,
|
||||
});
|
||||
mockRedis.get.mockResolvedValue(cached);
|
||||
|
||||
const result = await service.query(dto, memberPerms);
|
||||
|
||||
expect(result.answer).toBe('cached answer');
|
||||
expect(mockQdrant.hybridSearch).not.toHaveBeenCalled();
|
||||
expect(mockEmbedding.embed).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('CONFIDENTIAL: must use Ollama only, skip cache read and write', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
mockEmbedding.embed.mockResolvedValue(new Array(768).fill(0.1));
|
||||
mockQdrant.hybridSearch.mockResolvedValue([]);
|
||||
mockTyphoon.generate.mockResolvedValue({
|
||||
answer: 'ลับมาก',
|
||||
usedFallbackModel: true,
|
||||
});
|
||||
|
||||
const result = await service.query(dto, adminPerms);
|
||||
|
||||
expect(mockRedis.get).not.toHaveBeenCalled();
|
||||
expect(mockRedis.setex).not.toHaveBeenCalled();
|
||||
expect(mockTyphoon.generate).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
true
|
||||
);
|
||||
expect(result.usedFallbackModel).toBe(true);
|
||||
});
|
||||
|
||||
it('collectionReady=false → throw ServiceUnavailableException RAG_NOT_READY', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(false);
|
||||
|
||||
await expect(service.query(dto, memberPerms)).rejects.toThrow(
|
||||
ServiceUnavailableException
|
||||
);
|
||||
});
|
||||
|
||||
it('cross-project cache isolation: same question different projectPublicId → different cache key', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
mockRedis.get.mockResolvedValue(null);
|
||||
mockEmbedding.embed.mockResolvedValue(new Array(768).fill(0.1));
|
||||
mockQdrant.hybridSearch.mockResolvedValue([]);
|
||||
mockTyphoon.generate.mockResolvedValue({
|
||||
answer: 'A',
|
||||
usedFallbackModel: false,
|
||||
});
|
||||
|
||||
await service.query(
|
||||
{ question: 'Q?', projectPublicId: 'proj-A' },
|
||||
memberPerms
|
||||
);
|
||||
await service.query(
|
||||
{ question: 'Q?', projectPublicId: 'proj-B' },
|
||||
memberPerms
|
||||
);
|
||||
|
||||
const calls = mockRedis.setex.mock.calls as [string, ...unknown[]][];
|
||||
expect(calls[0][0]).not.toBe(calls[1][0]);
|
||||
});
|
||||
|
||||
it('classification ceiling derived from role, not from request body', async () => {
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
mockRedis.get.mockResolvedValue(null);
|
||||
mockEmbedding.embed.mockResolvedValue(new Array(768).fill(0.1));
|
||||
mockQdrant.hybridSearch.mockResolvedValue([]);
|
||||
mockTyphoon.generate.mockResolvedValue({
|
||||
anwer: 'ok',
|
||||
usedFallbackModel: false,
|
||||
});
|
||||
|
||||
await service.query(dto, memberPerms);
|
||||
expect(mockQdrant.hybridSearch).toHaveBeenCalledWith(
|
||||
expect.any(Array),
|
||||
dto.projectPublicId,
|
||||
'INTERNAL',
|
||||
20
|
||||
);
|
||||
|
||||
jest.clearAllMocks();
|
||||
mockQdrant.isReady.mockReturnValue(true);
|
||||
mockRedis.get.mockResolvedValue(null);
|
||||
mockEmbedding.embed.mockResolvedValue(new Array(768).fill(0.1));
|
||||
mockQdrant.hybridSearch.mockResolvedValue([]);
|
||||
mockTyphoon.generate.mockResolvedValue({
|
||||
answer: 'ok',
|
||||
usedFallbackModel: true,
|
||||
});
|
||||
|
||||
await service.query(dto, adminPerms);
|
||||
expect(mockQdrant.hybridSearch).toHaveBeenCalledWith(
|
||||
expect.any(Array),
|
||||
dto.projectPublicId,
|
||||
'CONFIDENTIAL',
|
||||
20
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,11 @@
|
||||
import { IsNotEmpty, IsString, IsUUID, MaxLength } from 'class-validator';
|
||||
|
||||
export class RagQueryDto {
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
@MaxLength(500)
|
||||
question!: string;
|
||||
|
||||
@IsUUID()
|
||||
projectPublicId!: string;
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
export interface RagCitation {
|
||||
chunkId: string;
|
||||
docNumber: string | null;
|
||||
docType: string;
|
||||
revision: string | null;
|
||||
snippet: string;
|
||||
score: number;
|
||||
}
|
||||
|
||||
export class RagResponseDto {
|
||||
answer!: string;
|
||||
citations!: RagCitation[];
|
||||
confidence!: number;
|
||||
usedFallbackModel!: boolean;
|
||||
cachedAt?: string;
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { ConfigService } from '@nestjs/config';
|
||||
import axios from 'axios';
|
||||
|
||||
@Injectable()
|
||||
export class EmbeddingService {
|
||||
private readonly logger = new Logger(EmbeddingService.name);
|
||||
private readonly ollamaUrl: string;
|
||||
private readonly model: string;
|
||||
|
||||
constructor(private readonly configService: ConfigService) {
|
||||
this.ollamaUrl = this.configService.get<string>(
|
||||
'OLLAMA_URL',
|
||||
'http://localhost:11434'
|
||||
);
|
||||
this.model = this.configService.get<string>(
|
||||
'OLLAMA_EMBED_MODEL',
|
||||
'nomic-embed-text'
|
||||
);
|
||||
}
|
||||
|
||||
async embed(text: string): Promise<number[]> {
|
||||
try {
|
||||
const response = await axios.post<{ embedding: number[] }>(
|
||||
`${this.ollamaUrl}/api/embeddings`,
|
||||
{ model: this.model, prompt: text },
|
||||
{ timeout: 30000 }
|
||||
);
|
||||
return response.data.embedding;
|
||||
} catch (err) {
|
||||
this.logger.error(
|
||||
'Embedding failed',
|
||||
err instanceof Error ? err.stack : String(err)
|
||||
);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async embedBatch(texts: string[]): Promise<number[][]> {
|
||||
return Promise.all(texts.map((t) => this.embed(t)));
|
||||
}
|
||||
|
||||
getModelName(): string {
|
||||
return this.model;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
import { Column, CreateDateColumn, Entity, PrimaryColumn } from 'typeorm';
|
||||
|
||||
@Entity('document_chunks')
|
||||
export class DocumentChunk {
|
||||
@PrimaryColumn({ type: 'char', length: 36 })
|
||||
id!: string;
|
||||
|
||||
@Column({ type: 'char', length: 36, name: 'document_id' })
|
||||
documentId!: string;
|
||||
|
||||
@Column({ name: 'chunk_index' })
|
||||
chunkIndex!: number;
|
||||
|
||||
@Column({ type: 'text' })
|
||||
content!: string;
|
||||
|
||||
@Column({ length: 20, name: 'doc_type' })
|
||||
docType!: string;
|
||||
|
||||
@Column({ type: 'varchar', length: 100, name: 'doc_number', nullable: true })
|
||||
docNumber!: string | null;
|
||||
|
||||
@Column({ type: 'varchar', length: 20, nullable: true })
|
||||
revision!: string | null;
|
||||
|
||||
@Column({ length: 50, name: 'project_code' })
|
||||
projectCode!: string;
|
||||
|
||||
@Column({ length: 36, name: 'project_public_id' })
|
||||
projectPublicId!: string;
|
||||
|
||||
@Column({
|
||||
type: 'enum',
|
||||
enum: ['PUBLIC', 'INTERNAL', 'CONFIDENTIAL'],
|
||||
default: 'INTERNAL',
|
||||
})
|
||||
classification!: 'PUBLIC' | 'INTERNAL' | 'CONFIDENTIAL';
|
||||
|
||||
@Column({ type: 'varchar', length: 20, nullable: true })
|
||||
version!: string | null;
|
||||
|
||||
@Column({ length: 100, name: 'embedding_model', default: 'nomic-embed-text' })
|
||||
embeddingModel!: string;
|
||||
|
||||
@CreateDateColumn({ name: 'created_at', precision: 3 })
|
||||
createdAt!: Date;
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import { InjectQueue } from '@nestjs/bullmq';
|
||||
import { Queue } from 'bullmq';
|
||||
|
||||
import { OcrJobData } from './processors/ocr.processor';
|
||||
|
||||
@Injectable()
|
||||
export class IngestionService {
|
||||
private readonly logger = new Logger(IngestionService.name);
|
||||
|
||||
constructor(@InjectQueue('rag-ocr') private readonly ocrQueue: Queue) {}
|
||||
|
||||
async enqueue(data: OcrJobData): Promise<void> {
|
||||
const jobId = data.attachmentPublicId;
|
||||
|
||||
const existing = await this.ocrQueue.getJob(jobId);
|
||||
if (existing) {
|
||||
const state = await existing.getState();
|
||||
if (state === 'active' || state === 'waiting' || state === 'delayed') {
|
||||
this.logger.log(
|
||||
`rag-ocr job already queued for ${jobId} (state: ${state})`
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
await this.ocrQueue.add('ocr', data, { jobId });
|
||||
this.logger.log(`Enqueued rag-ocr for attachment ${jobId}`);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
import { Processor, WorkerHost } from '@nestjs/bullmq';
|
||||
import { Logger } from '@nestjs/common';
|
||||
import { InjectRepository } from '@nestjs/typeorm';
|
||||
import { Repository } from 'typeorm';
|
||||
import { Job } from 'bullmq';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import { EmbeddingService } from '../embedding.service';
|
||||
import { QdrantService, VectorMetadata } from '../qdrant.service';
|
||||
import { DocumentChunk } from '../entities/document-chunk.entity';
|
||||
import { EmbeddingJobData } from './thai-preprocess.processor';
|
||||
|
||||
const CHUNK_SIZE = 512;
|
||||
const CHUNK_OVERLAP = 50;
|
||||
|
||||
@Processor('rag-embedding')
|
||||
export class EmbeddingProcessor extends WorkerHost {
|
||||
private readonly logger = new Logger(EmbeddingProcessor.name);
|
||||
|
||||
constructor(
|
||||
private readonly embeddingService: EmbeddingService,
|
||||
private readonly qdrantService: QdrantService,
|
||||
@InjectRepository(DocumentChunk)
|
||||
private readonly chunkRepo: Repository<DocumentChunk>
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
async process(job: Job<EmbeddingJobData>): Promise<void> {
|
||||
const {
|
||||
attachmentPublicId,
|
||||
normalizedText,
|
||||
docType,
|
||||
docNumber,
|
||||
revision,
|
||||
projectCode,
|
||||
projectPublicId,
|
||||
classification,
|
||||
} = job.data;
|
||||
|
||||
const chunks = this.chunkText(normalizedText);
|
||||
const model = this.embeddingService.getModelName();
|
||||
|
||||
const upsertPoints: Parameters<QdrantService['upsertBatch']>[0] = [];
|
||||
const chunkEntities: DocumentChunk[] = [];
|
||||
|
||||
for (let i = 0; i < chunks.length; i++) {
|
||||
const chunkId = uuidv4();
|
||||
const vector = await this.embeddingService.embed(chunks[i]);
|
||||
|
||||
const payload: VectorMetadata = {
|
||||
chunk_id: chunkId,
|
||||
public_id: attachmentPublicId,
|
||||
project_public_id: projectPublicId,
|
||||
doc_type: docType,
|
||||
doc_number: docNumber,
|
||||
revision,
|
||||
project_code: projectCode,
|
||||
classification,
|
||||
content_preview: chunks[i].slice(0, 500),
|
||||
embedding_model: model,
|
||||
};
|
||||
|
||||
upsertPoints.push({ id: chunkId, vector, payload });
|
||||
|
||||
const entity = this.chunkRepo.create({
|
||||
id: chunkId,
|
||||
documentId: attachmentPublicId,
|
||||
chunkIndex: i,
|
||||
content: chunks[i],
|
||||
docType,
|
||||
docNumber,
|
||||
revision,
|
||||
projectCode,
|
||||
projectPublicId,
|
||||
classification,
|
||||
embeddingModel: model,
|
||||
});
|
||||
chunkEntities.push(entity);
|
||||
}
|
||||
|
||||
if (upsertPoints.length > 0) {
|
||||
await this.qdrantService.upsertBatch(upsertPoints);
|
||||
await this.chunkRepo.save(chunkEntities);
|
||||
}
|
||||
|
||||
await this.chunkRepo.manager.query(
|
||||
`UPDATE attachments SET rag_status = 'INDEXED', rag_last_error = NULL WHERE public_id = ?`,
|
||||
[attachmentPublicId]
|
||||
);
|
||||
|
||||
this.logger.log(
|
||||
`Embedded ${chunks.length} chunks for ${attachmentPublicId}`
|
||||
);
|
||||
}
|
||||
|
||||
private chunkText(text: string): string[] {
|
||||
const words = text.split(/\s+/);
|
||||
const chunks: string[] = [];
|
||||
let start = 0;
|
||||
|
||||
while (start < words.length) {
|
||||
const end = Math.min(start + CHUNK_SIZE, words.length);
|
||||
chunks.push(words.slice(start, end).join(' '));
|
||||
start += CHUNK_SIZE - CHUNK_OVERLAP;
|
||||
}
|
||||
|
||||
return chunks.filter((c) => c.trim().length > 0);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user