diff --git a/backend/src/modules/correspondence/correspondence.module.ts b/backend/src/modules/correspondence/correspondence.module.ts index f100fda..140f888 100644 --- a/backend/src/modules/correspondence/correspondence.module.ts +++ b/backend/src/modules/correspondence/correspondence.module.ts @@ -19,6 +19,7 @@ import { JsonSchemaModule } from '../json-schema/json-schema.module'; import { UserModule } from '../user/user.module'; import { WorkflowEngineModule } from '../workflow-engine/workflow-engine.module'; import { SearchModule } from '../search/search.module'; +import { FileStorageModule } from '../../common/file-storage/file-storage.module'; /** * CorrespondenceModule @@ -42,6 +43,7 @@ import { SearchModule } from '../search/search.module'; UserModule, WorkflowEngineModule, SearchModule, + FileStorageModule, ], controllers: [CorrespondenceController], providers: [CorrespondenceService, CorrespondenceWorkflowService], diff --git a/backend/src/modules/correspondence/correspondence.service.spec.ts b/backend/src/modules/correspondence/correspondence.service.spec.ts index 50cead4..9fca407 100644 --- a/backend/src/modules/correspondence/correspondence.service.spec.ts +++ b/backend/src/modules/correspondence/correspondence.service.spec.ts @@ -14,6 +14,7 @@ import { JsonSchemaService } from '../json-schema/json-schema.service'; import { WorkflowEngineService } from '../workflow-engine/workflow-engine.service'; import { UserService } from '../user/user.service'; import { SearchService } from '../search/search.service'; +import { FileStorageService } from '../../common/file-storage/file-storage.service'; describe('CorrespondenceService', () => { let service: CorrespondenceService; @@ -118,6 +119,10 @@ describe('CorrespondenceService', () => { provide: SearchService, useValue: { indexDocument: jest.fn() }, }, + { + provide: FileStorageService, + useValue: { commit: jest.fn().mockResolvedValue([]) }, + }, ], }).compile(); diff --git a/backend/src/modules/correspondence/correspondence.service.ts b/backend/src/modules/correspondence/correspondence.service.ts index 4d38b8f..ba1f8d0 100644 --- a/backend/src/modules/correspondence/correspondence.service.ts +++ b/backend/src/modules/correspondence/correspondence.service.ts @@ -34,6 +34,7 @@ import { JsonSchemaService } from '../json-schema/json-schema.service'; import { WorkflowEngineService } from '../workflow-engine/workflow-engine.service'; import { UserService } from '../user/user.service'; import { SearchService } from '../search/search.service'; +import { FileStorageService } from '../../common/file-storage/file-storage.service'; /** * CorrespondenceService - Document management (CRUD) @@ -64,7 +65,8 @@ export class CorrespondenceService { private workflowEngine: WorkflowEngineService, private userService: UserService, private dataSource: DataSource, - private searchService: SearchService + private searchService: SearchService, + private fileStorageService: FileStorageService ) {} async create(createDto: CreateCorrespondenceDto, user: User) { @@ -180,6 +182,12 @@ export class CorrespondenceService { body: createDto.body, remarks: createDto.remarks, dueDate: createDto.dueDate ? new Date(createDto.dueDate) : undefined, + documentDate: createDto.documentDate + ? new Date(createDto.documentDate) + : undefined, + issuedDate: createDto.issuedDate + ? new Date(createDto.issuedDate) + : undefined, description: createDto.description, details: createDto.details, createdBy: user.user_id, @@ -199,6 +207,20 @@ export class CorrespondenceService { await queryRunner.manager.save(recipients); } + // Commit attachments from Temp → Permanent (Two-Phase Storage) + if (createDto.attachmentTempIds?.length) { + const issueDate = createDto.issuedDate + ? new Date(createDto.issuedDate) + : createDto.documentDate + ? new Date(createDto.documentDate) + : undefined; + + await this.fileStorageService.commit(createDto.attachmentTempIds, { + issueDate, + documentType: 'Correspondence', + }); + } + await queryRunner.commitTransaction(); // Start Workflow Instance (non-blocking) @@ -457,6 +479,10 @@ export class CorrespondenceService { if (updateDto.remarks) revisionUpdate.remarks = updateDto.remarks; // Format Date correctly if string if (updateDto.dueDate) revisionUpdate.dueDate = new Date(updateDto.dueDate); + if (updateDto.documentDate) + revisionUpdate.documentDate = new Date(updateDto.documentDate); + if (updateDto.issuedDate) + revisionUpdate.issuedDate = new Date(updateDto.issuedDate); if (updateDto.description) revisionUpdate.description = updateDto.description; if (updateDto.details) revisionUpdate.details = updateDto.details; @@ -465,6 +491,20 @@ export class CorrespondenceService { await this.revisionRepo.update(revision.id, revisionUpdate); } + // 4.5 Commit new attachments from Temp → Permanent (Two-Phase Storage) + if (updateDto.attachmentTempIds?.length) { + const issueDate = updateDto.issuedDate + ? new Date(updateDto.issuedDate) + : updateDto.documentDate + ? new Date(updateDto.documentDate) + : revision.issuedDate || revision.documentDate || undefined; + + await this.fileStorageService.commit(updateDto.attachmentTempIds, { + issueDate: issueDate ? new Date(issueDate) : undefined, + documentType: 'Correspondence', + }); + } + // 5. Update Recipients if provided if (updateDto.recipients) { const recipientRepo = this.dataSource.getRepository( diff --git a/backend/src/modules/correspondence/dto/create-correspondence.dto.ts b/backend/src/modules/correspondence/dto/create-correspondence.dto.ts index 9bcf3c4..503207f 100644 --- a/backend/src/modules/correspondence/dto/create-correspondence.dto.ts +++ b/backend/src/modules/correspondence/dto/create-correspondence.dto.ts @@ -84,6 +84,30 @@ export class CreateCorrespondenceDto { @IsOptional() isInternal?: boolean; + @ApiPropertyOptional({ + description: 'Document Date (วันที่เอกสาร)', + example: '2025-12-06', + }) + @IsDateString() + @IsOptional() + documentDate?: string; + + @ApiPropertyOptional({ + description: 'Issued Date (วันที่ออกเอกสาร) — ใช้จัดเก็บไฟล์ตาม YYYY/MM', + example: '2025-12-06T00:00:00Z', + }) + @IsDateString() + @IsOptional() + issuedDate?: string; + + @ApiPropertyOptional({ + description: 'Attachment temp IDs from upload phase (Two-Phase Storage)', + example: ['uuid-temp-1', 'uuid-temp-2'], + }) + @IsArray() + @IsOptional() + attachmentTempIds?: string[]; + // ✅ เพิ่ม Field สำหรับ Impersonation (เลือกองค์กรผู้ส่ง) @ApiPropertyOptional({ description: 'Originator Organization ID (for impersonation)', diff --git a/backend/src/modules/drawing/contract-drawing.service.ts b/backend/src/modules/drawing/contract-drawing.service.ts index 8c993da..a21ad09 100644 --- a/backend/src/modules/drawing/contract-drawing.service.ts +++ b/backend/src/modules/drawing/contract-drawing.service.ts @@ -11,6 +11,7 @@ import { Repository, DataSource, In, Brackets } from 'typeorm'; import { ContractDrawing } from './entities/contract-drawing.entity'; import { Attachment } from '../../common/file-storage/entities/attachment.entity'; import { User } from '../user/entities/user.entity'; +import { Contract } from '../contract/entities/contract.entity'; // DTOs import { CreateContractDrawingDto } from './dto/create-contract-drawing.dto'; @@ -29,10 +30,23 @@ export class ContractDrawingService { private drawingRepo: Repository, @InjectRepository(Attachment) private attachmentRepo: Repository, + @InjectRepository(Contract) + private contractRepo: Repository, private fileStorageService: FileStorageService, private dataSource: DataSource ) {} + /** + * Resolve issueDate from contract.startDate for file storage path + * Fallback: contract.startDate → current date + */ + private async resolveIssueDateByProject(projectId: number): Promise { + const contract = await this.contractRepo.findOne({ + where: { projectId }, + }); + return contract?.startDate ?? new Date(); + } + /** * สร้างแบบสัญญาใหม่ (Create Contract Drawing) * - ตรวจสอบเลขที่ซ้ำในโปรเจกต์ @@ -84,9 +98,12 @@ export class ContractDrawingService { // 4. Commit Files (ย้ายไฟล์จริง) if (createDto.attachmentIds?.length) { // ✅ FIX TS2345: แปลง number[] เป็น string[] ก่อนส่ง + const issueDate = await this.resolveIssueDateByProject( + createDto.projectId + ); await this.fileStorageService.commit( createDto.attachmentIds.map(String), - { documentType: 'ContractDrawing' } + { issueDate, documentType: 'ContractDrawing' } ); } @@ -225,10 +242,14 @@ export class ContractDrawingService { drawing.attachments = newAttachments; // Commit new files + // ✅ FIX TS2345: แปลง number[] เป็น string[] ก่อนส่ง + const issueDate = await this.resolveIssueDateByProject( + drawing.projectId + ); await this.fileStorageService.commit( updateDto.attachmentIds.map(String), - { documentType: 'ContractDrawing' } + { issueDate, documentType: 'ContractDrawing' } ); } diff --git a/backend/src/modules/drawing/drawing.module.ts b/backend/src/modules/drawing/drawing.module.ts index 287c915..0b19846 100644 --- a/backend/src/modules/drawing/drawing.module.ts +++ b/backend/src/modules/drawing/drawing.module.ts @@ -20,6 +20,7 @@ import { ShopDrawingSubCategory } from './entities/shop-drawing-sub-category.ent // Common Entities import { Attachment } from '../../common/file-storage/entities/attachment.entity'; +import { Contract } from '../contract/entities/contract.entity'; // Services import { ShopDrawingService } from './shop-drawing.service'; @@ -57,6 +58,7 @@ import { UserModule } from '../user/user.module'; // Common Attachment, + Contract, ]), FileStorageModule, UserModule, diff --git a/n8n-workflow-lcbp3.json b/n8n-workflow-lcbp3.json index 18c952a..4b6abf0 100644 --- a/n8n-workflow-lcbp3.json +++ b/n8n-workflow-lcbp3.json @@ -38,23 +38,23 @@ }, "options": {} }, - "id": "8ae8102d-0de5-4646-87c9-ed4bb619614d", + "id": "347a7bdb-b681-45dc-b9eb-2fe15e9d7eb3", "name": "Form Trigger", "type": "n8n-nodes-base.formTrigger", "typeVersion": 2.2, - "position": [-1360, -27472], - "webhookId": "5cb2ee58-164a-4db4-a107-46cf1a51009f", + "position": [3952, -26304], + "webhookId": "8c87176d-fa61-4a82-ab2a-1c14615e720c", "notes": "เปิด URL เพื่อเลือก Model ก่อนรัน" }, { "parameters": { "jsCode": "// Read model selected from Form Trigger dropdown\nconst formData = $('Form Trigger').first()?.json || {};\nconst selectedModelLabel = String(formData['Ollama Model (Primary)'] || '');\n\n// Extract just the model ID (before the space in the label)\nconst MODEL_MAP = {\n 'qwen2.5:7b-instruct-q4_K_M (สมดุล - แนะนำ)': 'qwen2.5:7b-instruct-q4_K_M',\n 'scb10x/typhoon2.1-gemma3-4b (เร็ว + ไทยดี)': 'scb10x/typhoon2.1-gemma3-4b',\n 'promptnow/openthaigpt1.5-7b-instruct-q4_k_m (ไทยเฉพาะทาง)': 'promptnow/openthaigpt1.5-7b-instruct-q4_k_m'\n};\nconst selectedModel = MODEL_MAP[selectedModelLabel] || 'scb10x/typhoon2.1-gemma3-4b';\n\nconst batchSizeInput = parseInt(formData['Batch Size'] || '0');\nconst excelFileInput = String(formData['Excel File Path'] || '').trim();\n\nconst CONFIG = {\n // Ollama Settings\n OLLAMA_HOST: 'http://192.168.20.100:11434',\n // Model selected from Form UI\n OLLAMA_MODEL_PRIMARY: selectedModel,\n // Fallback\n OLLAMA_MODEL_FALLBACK: 'mistral:7b-instruct-q4_K_M',\n \n // Backend Settings\n BACKEND_URL: 'https://backend.np-dms.work',\n MIGRATION_TOKEN: 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6Im1pZ3JhdGlvbl9ib3QiLCJzdWIiOjUsInNjb3BlIjoiR2xvYmFsIiwiaWF0IjoxNzcyNzc0MzI5LCJleHAiOjQ5Mjg1MzQzMjl9.TtA8zoHy7G9J5jPgYQPv7yw-9X--B_hl-Nv-c9V4PaA',\n \n // Batch Settings\n BATCH_SIZE: batchSizeInput > 0 ? batchSizeInput : 2,\n BATCH_ID: (() => { const d = new Date(Date.now() + 7 * 3600000); const s = d.toISOString(); return s.substring(0,10).replace(/-/g,'') + ':' + s.substring(11,16).replace(/:/g,''); })(),\n DELAY_MS: 2000,\n \n // Thresholds\n CONFIDENCE_HIGH: 0.85,\n CONFIDENCE_LOW: 0.60,\n MAX_RETRY: 3,\n FALLBACK_THRESHOLD: 5,\n \n // Source Definitions - แก้ไขโฟลเดอร์และไฟล์ทำงานที่นี่\n EXCEL_FILE: excelFileInput || '/home/node/.n8n-files/staging_ai/C22024.xlsx',\n SOURCE_PDF_DIR: '/home/node/.n8n-files/staging_ai/Incoming/08C.2/2567',\n LOG_PATH: '/home/node/.n8n-files/migration_logs',\n PROJECT_ID: 1\n};\n\nreturn { config: CONFIG };" }, - "id": "f6d94e21-daa6-4dcc-ba37-e822ded168d6", + "id": "20824a92-7433-4644-be1a-22ddc665bb44", "name": "Set Configuration", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-1184, -27584], + "position": [4112, -26304], "notes": "กำหนดค่า Configuration ทั้งหมด - แก้ไขที่นี่ก่อนรัน" }, { @@ -73,11 +73,11 @@ "timeout": 10000 } }, - "id": "9f8bd98a-997d-4471-a80d-d93abe64888f", + "id": "8a1b921e-def2-4302-bf77-3e0e717bfc11", "name": "Fetch Categories", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [-1184, -27392], + "position": [4000, -26128], "notes": "ดึง Categories จาก Backend" }, { @@ -96,37 +96,22 @@ "timeout": 10000 } }, - "id": "c40d67a5-de33-4898-8d2d-cf77bd89fa20", + "id": "cac43f4f-eef6-4783-a622-968d15a672b0", "name": "Fetch Tags", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [-1008, -27392], + "position": [4144, -26128], "notes": "ดึง Tags ที่มีอยู่แล้วจาก Backend" }, - { - "parameters": { - "url": "={{$('Set Configuration').first().json.config.BACKEND_URL}}/health", - "options": { - "timeout": 5000 - } - }, - "id": "02ef2241-b2b9-436e-98b2-7bbad7da67e6", - "name": "Check Backend Health", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.1, - "position": [-816, -27584], - "onError": "continueErrorOutput", - "notes": "ตรวจสอบ Backend พร้อมใช้งาน" - }, { "parameters": { "jsCode": "const fs = require('fs');\nconst config = $('Set Configuration').first().json.config;\n\n// Check file mount and inputs\ntry {\n if (!fs.existsSync(config.EXCEL_FILE)) {\n throw new Error(`Excel file not found at: ${config.EXCEL_FILE}`);\n }\n if (!fs.existsSync(config.SOURCE_PDF_DIR)) {\n throw new Error(`PDF Source directory not found at: ${config.SOURCE_PDF_DIR}`);\n }\n \n const files = fs.readdirSync(config.SOURCE_PDF_DIR);\n \n // Check write permission to log path\n if (!fs.existsSync(config.LOG_PATH)) {\n fs.mkdirSync(config.LOG_PATH, { recursive: true });\n }\n fs.writeFileSync(`${config.LOG_PATH}/.preflight_ok`, new Date().toISOString());\n \n // Grab categories out of the previous node (Fetch Categories) if available\n // API returns raw array — each item becomes a separate n8n item\n let categories = ['Correspondence','RFA','Drawing','Transmittal','Report','Other'];\n try {\n const upstreamItems = $('Fetch Categories').all().map(i => i.json);\n if (upstreamItems && upstreamItems.length > 0) {\n categories = upstreamItems.map(c => c.typeName || c.typeCode || c); \n }\n } catch(e) {}\n \n // Grab existing tags from Fetch Tags node\n // API returns raw array — each item becomes a separate n8n item\n let existingTags = [];\n try {\n const tagItems = $('Fetch Tags').all().map(i => i.json);\n existingTags = Array.isArray(tagItems) ? tagItems.map(t => t.tag_name || t.name || '').filter(Boolean) : [];\n } catch(e) {}\n \n return [{ json: { \n preflight_ok: true, \n pdf_count_in_source: files.length,\n excel_target: config.EXCEL_FILE,\n system_categories: categories,\n existing_tags: existingTags,\n timestamp: new Date().toISOString()\n }}];\n} catch (err) {\n throw new Error(`Pre-flight check failed: ${err.message}`);\n}" }, - "id": "2204f397-a2bf-4eaa-88b6-d0242d06fad7", + "id": "5878a59f-a287-4658-90ff-5f47fb2dcf9f", "name": "File Mount Check", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-816, -27392], + "position": [4288, -26128], "notes": "ตรวจสอบ File System มีไฟล์ Excel และ Folder ตามตั้งค่า" }, { @@ -135,11 +120,11 @@ "query": "SELECT last_processed_index, status FROM migration_progress WHERE batch_id = '{{$('Set Configuration').first().json.config.BATCH_ID}}' LIMIT 1", "options": {} }, - "id": "a1630364-d44b-4ef4-bc51-f0f75adf20f9", + "id": "96d5491d-d2ad-4f5f-bcb0-899509edae9f", "name": "Read Checkpoint", "type": "n8n-nodes-base.mySql", "typeVersion": 2.4, - "position": [-768, -27168], + "position": [4320, -25936], "alwaysOutputData": true, "credentials": { "mySql": { @@ -155,33 +140,33 @@ "fileSelector": "={{ $json.excel_target }}", "options": {} }, - "id": "71e94a39-e01d-46c1-8f35-c36da02211dc", + "id": "dc51fe19-c41b-4272-9fad-da9977ee3683", "name": "Read Excel Binary", "type": "n8n-nodes-base.readWriteFile", "typeVersion": 1, - "position": [-1168, -27168], + "position": [4000, -25936], "notes": "ดึงไฟล์ Excel ขึ้นมาไว้ในหน่วยความจำ" }, { "parameters": { "options": {} }, - "id": "023aaed1-8c33-480c-ab43-8241702ac17d", + "id": "6d6fba87-9a86-4cb2-bd02-0340a8dab023", "name": "Read Excel", "type": "n8n-nodes-base.spreadsheetFile", "typeVersion": 2, - "position": [-976, -27168], + "position": [4160, -25936], "notes": "แปลงข้อมูล Excel เป็น JSON Data" }, { "parameters": { - "jsCode": "const cpJson = $input.first()?.json || {};\nconst startIndex = cpJson.last_processed_index || 0;\nconst config = $('Set Configuration').first().json.config;\n\nconst allItems = $('Read Excel').all().map(i => i.json);\nconst remaining = allItems.slice(startIndex);\nconst currentBatch = remaining.slice(0, config.BATCH_SIZE);\n\n// Encoding Normalization\nconst normalize = (str) => {\n if (!str) return '';\n return String(str).normalize('NFC').trim();\n};\n\nreturn currentBatch.map((item, i) => {\n const getVal = (possibleKeys) => {\n const exactMatch = possibleKeys.find(k => item[k] !== undefined);\n if (exactMatch) return item[exactMatch];\n const lowerTrimmedKeys = Object.keys(item).map(k => ({ original: k, parsed: k.toLowerCase().trim() }));\n for (const pk of possibleKeys) {\n const found = lowerTrimmedKeys.find(k => k.parsed === pk.toLowerCase().trim());\n if (found) return item[found.original];\n }\n return '';\n };\n\n const docNum = getVal(['document_number', 'correspondence_number', 'Document Number', 'Corr. No.']);\n const excelFileName = getVal(['File name', 'file_name', 'File Name', 'filename']);\n \n if (!excelFileName) {\n throw new Error(`Missing 'File name' column for row ${i + startIndex + 1}, document: ${docNum}`);\n }\n \n return {\n json: {\n document_number: normalize(docNum),\n subject: normalize(getVal(['Subject', 'subject', 'Title', 'title'])),\n legacy_number: normalize(getVal(['legacy_number', 'Legacy Number', 'Response Doc.'])),\n excel_revision: getVal(['revision', 'Revision', 'rev']) || 1,\n original_index: startIndex + i,\n batch_id: config.BATCH_ID,\n file_name: normalize(excelFileName),\n issued_date: normalize(getVal(['issued_date', 'Issued_date', 'Issued Date', 'date', 'Date', 'document_date'])),\n received_date: normalize(getVal(['received_date', 'Received_date', 'Received Date'])),\n correspondence_type: getVal(['correspondence_type', 'type', 'Type', 'Category']),\n sender: normalize(getVal(['sender', 'Sender', 'From', 'from'])),\n receiver: normalize(getVal(['receiver', 'Receiver', 'To', 'to'])),\n project_code: normalize(getVal(['project', 'Project', 'project_code']))\n }\n };\n});" + "jsCode": "const cpJson = $input.first()?.json || {};\nconst startIndex = cpJson.last_processed_index || 0;\nconst config = $('Set Configuration').first().json.config;\n\nconst allItems = $('Read Excel').all().map(i => i.json);\nconst remaining = allItems.slice(startIndex);\nconst currentBatch = remaining.slice(0, config.BATCH_SIZE);\n\n// Encoding Normalization\nconst normalize = (str) => {\n if (!str) return '';\n return String(str).normalize('NFC').trim();\n};\n\nreturn currentBatch.map((item, i) => {\n const getVal = (possibleKeys) => {\n const exactMatch = possibleKeys.find(k => item[k] !== undefined);\n if (exactMatch) return item[exactMatch];\n const lowerTrimmedKeys = Object.keys(item).map(k => ({ original: k, parsed: k.toLowerCase().trim() }));\n for (const pk of possibleKeys) {\n const found = lowerTrimmedKeys.find(k => k.parsed === pk.toLowerCase().trim());\n if (found) return item[found.original];\n }\n return '';\n };\n\n const docNum = getVal(['document_number', 'correspondence_number', 'Document Number', 'Corr. No.']);\n const excelFileName = getVal(['File name', 'file_name', 'File Name', 'filename']);\n \n if (!excelFileName) {\n throw new Error(`Missing 'File name' column for row ${i + startIndex + 1}, document: ${docNum}`);\n }\n \n return {\n json: {\n document_number: normalize(docNum),\n subject: normalize(getVal(['Subject', 'subject', 'Title', 'title'])),\n remarks: normalize(getVal(['remarks'])),\n excel_revision: getVal(['revision', 'Revision', 'rev']) || 1,\n original_index: startIndex + i,\n batch_id: config.BATCH_ID,\n file_name: normalize(excelFileName),\n issued_date: normalize(getVal(['issued_date', 'Issued_date', 'Issued Date', 'date', 'Date', 'document_date'])),\n received_date: normalize(getVal(['received_date', 'Received_date', 'Received Date'])),\n correspondence_type: getVal(['correspondence_type', 'type', 'Type', 'Category']),\n sender: normalize(getVal(['sender', 'Sender', 'From', 'from'])),\n receiver: normalize(getVal(['receiver', 'Receiver', 'To', 'to'])),\n project_code: normalize(getVal(['project', 'Project', 'project_code']))\n }\n };\n});" }, - "id": "0b6f8817-fe02-4482-b707-64c58692d77b", + "id": "1f115c9e-ecf0-4c8b-b035-876d6fc2da4f", "name": "Process Batch + Encoding", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-560, -27584], + "position": [4560, -25952], "alwaysOutputData": true, "notes": "ตัด Batch + Normalize UTF-8" }, @@ -189,11 +174,11 @@ "parameters": { "jsCode": "const fs = require('fs');\nconst path = require('path');\nconst config = $('Set Configuration').first().json.config;\n\nconst items = $input.all();\nif (!items || items.length === 0) return [];\n\nconst validated = [];\nconst errors = [];\n\nfor (const item of items) {\n const fileName = item.json?.file_name;\n if (!fileName) {\n errors.push({\n ...item,\n json: { ...item.json, file_valid: false, error: 'file_name is missing', error_type: 'FILE_NOT_FOUND', file_exists: false }\n });\n continue;\n }\n \n let safeName = path.basename(String(fileName)).normalize('NFC');\n if (!safeName.toLowerCase().endsWith('.pdf')) {\n safeName += '.pdf';\n }\n const filePath = path.resolve(config.SOURCE_PDF_DIR, safeName);\n \n if (!filePath.startsWith(path.resolve(config.SOURCE_PDF_DIR))) {\n errors.push({\n ...item,\n json: { ...item.json, file_valid: false, error: 'Path traversal detected', error_type: 'SECURITY', file_exists: false }\n });\n continue;\n }\n \n try {\n if (fs.existsSync(filePath)) {\n const stats = fs.statSync(filePath);\n validated.push({\n ...item,\n json: { ...item.json, file_valid: true, file_exists: true, file_size: stats.size, file_path: filePath }\n });\n } else {\n errors.push({\n ...item,\n json: { ...item.json, file_valid: false, error: `File not found: ${safeName}`, error_type: 'FILE_NOT_FOUND', file_exists: false }\n });\n }\n } catch (err) {\n errors.push({\n ...item,\n json: { ...item.json, file_valid: false, error: err.message, error_type: 'UNKNOWN', file_exists: false }\n });\n }\n}\n\n// Log errors inline to CSV (single-output node — errors don't flow downstream)\nif (errors.length > 0) {\n const csvPath = `${config.LOG_PATH}/error_log.csv`;\n const header = 'timestamp,document_number,error_type,error_message\\n';\n const esc = (s) => `\"${String(s || '').replace(/\"/g, '\"\"')}\"`;\n if (!fs.existsSync(config.LOG_PATH)) fs.mkdirSync(config.LOG_PATH, { recursive: true });\n if (!fs.existsSync(csvPath)) fs.writeFileSync(csvPath, header, 'utf8');\n for (const e of errors) {\n const line = [new Date().toISOString(), esc(e.json.document_number), esc(e.json.error_type), esc(e.json.error)].join(',') + '\\n';\n fs.appendFileSync(csvPath, line, 'utf8');\n }\n}\n\nreturn validated;" }, - "id": "254e8f42-e32a-486f-a5d8-c77bfcb5ee44", + "id": "8fb3d45a-eecb-4415-aee0-841420d3779f", "name": "File Validator", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-384, -27584], + "position": [4704, -25952], "notes": "ตรวจสอบไฟล์ PDF ตัวชี้ใน Directory จาก Config" }, { @@ -202,11 +187,11 @@ "query": "SELECT is_fallback_active, recent_error_count FROM migration_fallback_state WHERE batch_id = '{{$('Set Configuration').first().json.config.BATCH_ID}}' LIMIT 1", "options": {} }, - "id": "c86229c0-493d-4138-a450-db65e0bc1d5d", + "id": "81bb9e8f-5e11-4787-92ac-7cc660796f06", "name": "Check Fallback State", "type": "n8n-nodes-base.mySql", "typeVersion": 2.4, - "position": [-560, -27184], + "position": [5056, -26336], "alwaysOutputData": true, "credentials": { "mySql": { @@ -219,13 +204,13 @@ }, { "parameters": { - "jsCode": "const config = $('Set Configuration').first().json.config;\nconst fallbackState = $('Check Fallback State').first()?.json || { is_fallback_active: false, recent_error_count: 0 };\nconst isFallback = fallbackState.is_fallback_active || false;\nconst model = isFallback ? config.OLLAMA_MODEL_FALLBACK : config.OLLAMA_MODEL_PRIMARY;\n\nconst dbContext = $('Fetch DB Context').all().map(i => i.json);\nconst dbProjects = dbContext.filter(d => d.type === 'projects').map(d => ({id: d.id, code: d.text1, name: d.text2}));\nconst dbDisciplines = dbContext.filter(d => d.type === 'disciplines').map(d => ({id: d.id, th: d.text1, en: d.text2}));\nconst dbOrgs = dbContext.filter(d => d.type === 'organizations').map(d => ({id: d.id, name: d.text1, code: d.text2}));\nconst dbTags = dbContext.filter(d => d.type === 'tags').map(d => ({id: d.id, name: d.text1, description: d.text2 || ''}));\nconst dbCorrTypes = dbContext.filter(d => d.type === 'correspondence_types').map(d => ({id: d.id, code: d.text1, name: d.text2}));\n\nlet systemCategories = ['Correspondence','RFA','Drawing','Transmittal','Report','Other'];\ntry { systemCategories = $('File Mount Check').first().json.system_categories || systemCategories; } catch (e) {}\n\nconst pdfItems = $('Extract PDF Text').all();\nconst metaItems = $('File Validator').all();\n\nreturn pdfItems.map((pdfItem, i) => {\n const item = metaItems[i] || pdfItem;\n\n const docNum = String(item.json.document_number || '');\n const subject = String(item.json.subject || '');\n const projectCode = String(item.json.project_code || '');\n const legacyNum = String(item.json.legacy_number || '');\n const issuedDate = String(item.json.issued_date || '');\n const receivedDate = String(item.json.received_date || '');\n const corrType = String(item.json.correspondence_type || '');\n const senderCode = String(item.json.sender || '');\n const receiverCode = String(item.json.receiver || '');\n\n const prompt = `Validate and summarize this document. Respond in JSON.\nDocument Number: ${docNum}\nOriginal Subject: ${subject}\nExtracted Text: ${(pdfItem.json.response || pdfItem.json.data || '').substring(0, 4000)}\n\nExisting Projects: ${JSON.stringify(dbProjects)}\nExisting Disciplines: ${JSON.stringify(dbDisciplines)}\nExisting Orgs: ${JSON.stringify(dbOrgs)}\nExisting Categories: ${JSON.stringify(systemCategories)}\nExisting Tags: ${JSON.stringify(dbTags)}\n\nAnalyze the content to provide:\n1. Validate the Subject and Dates against PDF text.\n2. Write a detailed summary (4-5 sentences) for the body field.\n3. Suggest 1-5 tags. Prefer Existing Tags when applicable. Each tag MUST have tag_name and description.\n\nRespond ONLY with this exact JSON structure:\n{\n \"is_valid\": true,\n \"confidence\": 0.9,\n \"category\": \"Correspondence\",\n \"subject\": \"Verified or corrected subject line\",\n \"body\": \"Detailed 4-5 sentence summary of the document content for archival.\",\n \"discipline_id\": 64,\n \"tags\": [{\"tag_name\": \"TagName\", \"description\": \"Why this tag applies\"}],\n \"key_points\": [\"...\"],\n \"document_date\": \"YYYY-MM-DD\",\n \"issued_date\": \"YYYY-MM-DD\",\n \"received_date\": \"YYYY-MM-DD\"\n}`;\n\n return {\n json: {\n ...item.json,\n ollama_payload: {\n model: model,\n prompt: prompt,\n stream: false,\n format: \"json\",\n options: { temperature: 0.2, num_ctx: 8192 }\n },\n system_categories: systemCategories,\n pre_mapped: {\n project_id: (projectCode && dbProjects.find(p => p.code === projectCode)?.id) || dbProjects.find(p => docNum.includes(p.code))?.id || config.PROJECT_ID,\n sender_id: dbOrgs.find(o => senderCode.includes(o.code) || senderCode.includes(o.name))?.id,\n receiver_id: dbOrgs.find(o => receiverCode.includes(o.code) || receiverCode.includes(o.name))?.id\n }\n }\n };\n});" + "jsCode": "const config = $('Set Configuration').first().json.config;\nconst fallbackState = $('Check Fallback State').first()?.json || { is_fallback_active: false, recent_error_count: 0 };\nconst isFallback = fallbackState.is_fallback_active || false;\nconst model = isFallback ? config.OLLAMA_MODEL_FALLBACK : config.OLLAMA_MODEL_PRIMARY;\n\nconst dbContext = $('Fetch DB Context').all().map(i => i.json);\nconst dbProjects = dbContext.filter(d => d.type === 'projects').map(d => ({id: d.id, code: d.text1, name: d.text2}));\nconst dbDisciplines = dbContext.filter(d => d.type === 'disciplines').map(d => ({id: d.id, th: d.text1, en: d.text2}));\nconst dbOrgs = dbContext.filter(d => d.type === 'organizations').map(d => ({id: d.id, name: d.text1, code: d.text2}));\nconst dbTags = dbContext.filter(d => d.type === 'tags').map(d => ({id: d.id, name: d.text1, description: d.text2 || ''}));\nconst dbCorrTypes = dbContext.filter(d => d.type === 'correspondence_types').map(d => ({id: d.id, code: d.text1, name: d.text2}));\n\nlet systemCategories = ['Correspondence','RFA','Drawing','Transmittal','Report','Other'];\ntry { systemCategories = $('File Mount Check').first().json.system_categories || systemCategories; } catch (e) {}\n\nconst pdfItems = $('Extract PDF Text').all();\nconst metaItems = $('File Validator').all();\n\nreturn pdfItems.map((pdfItem, i) => {\n const item = metaItems[i] || pdfItem;\n\n const docNum = String(item.json.document_number || '');\n const subject = String(item.json.subject || '');\n const projectCode = String(item.json.project_code || '');\n const remarks = String(item.json.remarks || '');\n const issuedDate = String(item.json.issued_date || '');\n const receivedDate = String(item.json.received_date || '');\n const corrType = String(item.json.correspondence_type || '');\n const senderCode = String(item.json.sender || '');\n const receiverCode = String(item.json.receiver || '');\n\n const prompt = `Validate and summarize this document. Respond in JSON.\\nDocument Number: ${docNum}\\nOriginal Subject: ${subject}\\nExtracted Text: ${(pdfItem.json.response || pdfItem.json.data || '').substring(0, 4000)}\\n\\nExisting Projects: ${JSON.stringify(dbProjects)}\\nExisting Disciplines: ${JSON.stringify(dbDisciplines)}\\nExisting Orgs: ${JSON.stringify(dbOrgs)}\\nExisting Categories: ${JSON.stringify(systemCategories)}\\nExisting Tags: ${JSON.stringify(dbTags)}\\n\\nAnalyze the content to provide:\\n1. Validate the Subject and Dates against PDF text.\\n2. Write a detailed summary (4-5 sentences) for the body field.\\n3. Suggest 1-5 tags. Prefer Existing Tags when applicable. Each tag MUST have tag_name and description.\\n\\nRespond ONLY with this exact JSON structure:\\n{\\n \\\"is_valid\\\": true,\\n \\\"confidence\\\": 0.9,\\n \\\"category\\\": \\\"Correspondence\\\",\\n \\\"subject\\\": \\\"Verified or corrected subject line\\\",\\n \\\"body\\\": \\\"Detailed 4-5 sentence summary of the document content for archival.\\\",\\n \\\"discipline_id\\\": 64,\\n \\\"tags\\\": [{\\\"tag_name\\\": \\\"TagName\\\", \\\"description\\\": \\\"Why this tag applies\\\"}],\\n \\\"key_points\\\": [\\\"...\\\"],\\n \\\"document_date\\\": \\\"YYYY-MM-DD\\\",\\n \\\"issued_date\\\": \\\"YYYY-MM-DD\\\",\\n \\\"received_date\\\": \\\"YYYY-MM-DD\\\"\\n}`;\n\n return {\n json: {\n ...item.json,\n ollama_payload: {\n model: model,\n prompt: prompt,\n stream: false,\n format: \"json\",\n options: { temperature: 0.2, num_ctx: 8192 }\n },\n system_categories: systemCategories,\n pre_mapped: {\n project_id: (projectCode && dbProjects.find(p => p.code === projectCode)?.id) || dbProjects.find(p => docNum.includes(p.code))?.id || config.PROJECT_ID,\n sender_id: dbOrgs.find(o => senderCode.includes(o.code) || senderCode.includes(o.name))?.id,\n receiver_id: dbOrgs.find(o => receiverCode.includes(o.code) || receiverCode.includes(o.name))?.id\n }\n }\n };\n});" }, - "id": "81346da7-a5b0-4a5f-9dc5-34bfc61ebac2", + "id": "08cc5940-194e-486a-bffe-d4ed6a00e252", "name": "Build AI Prompt", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-192, -27200], + "position": [4736, -26144], "notes": "สร้าง Prompt โดยใช้ Categories จาก System" }, { @@ -239,22 +224,22 @@ "timeout": 120000 } }, - "id": "8634f965-41f4-485e-9c01-22640b42d8cd", + "id": "8e757be4-1b86-45f6-8ae4-45d6d573bcdb", "name": "Ollama AI Analysis", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [-560, -26992], + "position": [4912, -26144], "notes": "เรียก Ollama วิเคราะห์เอกสาร" }, { "parameters": { "jsCode": "const ollamaItems = $input.all();\nconst originalItems = $('Build AI Prompt').all();\nconst results = [];\n\nconst CATEGORY_TO_TYPE_CODE = {\n 'Correspondence': 'LETTER',\n 'RFA': 'RFA',\n 'Transmittal': 'TRANSMITTAL',\n 'Drawing': 'OTHER',\n 'Report': 'OTHER',\n 'Other': 'OTHER',\n};\n\nfor (let i = 0; i < ollamaItems.length; i++) {\n const ollamaItem = ollamaItems[i];\n const originalItem = originalItems[i];\n if (!originalItem) continue;\n const baseJson = originalItem.json;\n\n try {\n let raw = ollamaItem.json.response || '';\n raw = raw.replace(/`{3}json/gi, '').replace(/`{3}/g, '').trim();\n if (!raw) throw new Error('Empty response from AI');\n\n const result = JSON.parse(raw);\n const systemCategories = baseJson.system_categories || [];\n let finalCategory = result.category;\n if (!systemCategories.includes(finalCategory)) {\n finalCategory = String(baseJson.document_number || '').includes('-RFA-') ? 'RFA' : 'Correspondence';\n }\n\n const typeCode = CATEGORY_TO_TYPE_CODE[finalCategory] || 'LETTER';\n const preMapped = baseJson.pre_mapped || {};\n\n results.push({\n json: {\n ...baseJson,\n ai_result: {\n suggested_category: finalCategory,\n type_code: typeCode,\n confidence: result.confidence || 0.8,\n project_id: preMapped.project_id || null,\n discipline_id: result.discipline_id || 64,\n sender_id: preMapped.sender_id || null,\n receiver_id: preMapped.receiver_id || null,\n subject: result.subject || baseJson.subject || '',\n body: result.body || result.summary || '',\n issued_date: result.issued_date || baseJson.issued_date,\n received_date: result.received_date || baseJson.received_date,\n summary: result.summary || result.body || '',\n key_points: result.key_points || [],\n tags: (result.tags || []).map(t => (typeof t === 'string' ? { tag_name: t, description: '' } : { tag_name: t.tag_name || t.name || '', description: t.description || '' })).filter(t => t.tag_name),\n is_valid: result.is_valid !== false\n }\n }\n });\n } catch (err) {\n results.push({\n json: {\n ...baseJson,\n parse_error: err.message,\n raw_ai_response: ollamaItem.json.response\n }\n });\n }\n}\n\nreturn results;" }, - "id": "2154cfc3-e4d1-499f-b918-923e78f442e9", + "id": "9ff12b75-03c6-43f0-8654-855d5da42e56", "name": "Parse & Validate AI Response", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-352, -26992], + "position": [5104, -26144], "notes": "Parse JSON + Validate Schema + Enum Check" }, { @@ -263,11 +248,11 @@ "query": "INSERT INTO migration_fallback_state (batch_id, recent_error_count, is_fallback_active) VALUES ('{{$('Set Configuration').first().json.config.BATCH_ID}}', 1, FALSE) ON DUPLICATE KEY UPDATE recent_error_count = recent_error_count + 1, is_fallback_active = CASE WHEN recent_error_count + 1 >= {{$('Set Configuration').first().json.config.FALLBACK_THRESHOLD}} THEN TRUE ELSE is_fallback_active END, updated_at = NOW()", "options": {} }, - "id": "a1adab22-8382-4336-ba74-15a4721d51f4", + "id": "eb703a14-2670-4cf4-b7d0-762e148bf4f7", "name": "Update Fallback State", "type": "n8n-nodes-base.mySql", "typeVersion": 2.4, - "position": [0, -27488], + "position": [4928, -25952], "credentials": { "mySql": { "id": "CHHfbKhMacNo03V4", @@ -280,33 +265,33 @@ "parameters": { "jsCode": "const config = $('Set Configuration').first().json.config;\nconst items = $('Parse & Validate AI Response').all();\n\nconst results = [];\n\nfor (const item of items) {\n const data = item.json;\n let resultItem = { json: { ...data } };\n \n if (data.parse_error || !data.ai_result) {\n resultItem.json.route_index = 3;\n results.push(resultItem);\n continue;\n }\n \n const ai = data.ai_result;\n \n if (ai.confidence >= config.CONFIDENCE_HIGH) {\n resultItem.json.route_index = 0;\n resultItem.json.staging_status = 'PENDING';\n resultItem.json.staging_remarks = 'Ready for auto-ingest (High Confidence)';\n } else if (ai.confidence >= config.CONFIDENCE_LOW) {\n resultItem.json.route_index = 1;\n resultItem.json.staging_status = 'PENDING';\n resultItem.json.staging_remarks = 'Flagged for human review (Medium Confidence)';\n } else {\n resultItem.json.route_index = 2;\n resultItem.json.staging_status = 'REJECTED';\n resultItem.json.staging_remarks = ai.is_valid === false ? 'AI marked invalid' : `Rejected for human review (Low Confidence: ${ai.confidence.toFixed(2)})`;\n }\n results.push(resultItem);\n}\n\nreturn results;" }, - "id": "34288a94-82da-4642-88b1-b0929f921eeb", + "id": "a8b9d938-39a0-49c2-98ca-8defcec0d5ab", "name": "Confidence Router", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-192, -26992], + "position": [5312, -26304], "notes": "แยกตาม Confidence: Auto(≥0.85) / Review(≥0.60) / Reject(<0.60)" }, { "parameters": { "jsCode": "const fs = require('fs');\nconst item = $input.first();\nconst config = $('Set Configuration').first().json.config;\n\nconst csvPath = `${config.LOG_PATH}/reject_log.csv`;\nconst header = 'timestamp,document_number,title,reject_reason,ai_confidence,key_points\\n';\nconst esc = (s) => `\"${String(s || '').replace(/\"/g, '\"\"')}\"`;\n\nif (!fs.existsSync(config.LOG_PATH)) {\n fs.mkdirSync(config.LOG_PATH, { recursive: true });\n}\n\nif (!fs.existsSync(csvPath)) {\n fs.writeFileSync(csvPath, header, 'utf8');\n}\n\nconst line = [\n new Date().toISOString(),\n esc(item.json.document_number),\n esc(item.json.title),\n esc(item.json.staging_remarks),\n item.json.ai_result?.confidence ?? 'N/A',\n esc(JSON.stringify(item.json.ai_result?.key_points || []))\n].join(',') + '\\n';\n\nfs.appendFileSync(csvPath, line, 'utf8');\n\nreturn [$input.first()];" }, - "id": "e12e7219-0f80-4414-8d4a-aa9363ec1ee9", + "id": "f8f5484e-5f2f-4124-a3f3-f405e1fc9972", "name": "Log Reject to CSV", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [304, -27216], + "position": [5680, -25920], "notes": "บันทึกรายการที่ถูกปฏิเสธลง CSV" }, { "parameters": { "jsCode": "const fs = require('fs');\nconst items = $input.all();\nconst config = $('Set Configuration').first().json.config;\n\nconst csvPath = `${config.LOG_PATH}/error_log.csv`;\nconst header = 'timestamp,document_number,error_type,error_message,raw_ai_response\\n';\nconst esc = (s) => `\"${String(s || '').replace(/\"/g, '\"\"')}\"`;\n\nif (!fs.existsSync(config.LOG_PATH)) {\n fs.mkdirSync(config.LOG_PATH, { recursive: true });\n}\n\nif (!fs.existsSync(csvPath)) {\n fs.writeFileSync(csvPath, header, 'utf8');\n}\n\nfor (const item of items) {\n const line = [\n new Date().toISOString(),\n esc(item.json.document_number),\n esc(item.json.error_type || 'UNKNOWN'),\n esc(item.json.error || item.json.parse_error),\n esc(item.json.raw_ai_response || '')\n ].join(',') + '\\n';\n \n fs.appendFileSync(csvPath, line, 'utf8');\n}\n\nreturn items;" }, - "id": "a2526431-176a-4246-a9ad-3b2dbfec574a", + "id": "2625fd9a-6623-45f4-9a49-bba0c911bb0f", "name": "Log Error to CSV", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [144, -27120], + "position": [5488, -25856], "notes": "บันทึก Error ลง CSV (จาก File Validator)" }, { @@ -329,11 +314,11 @@ "timeout": 10000 } }, - "id": "7d620e96-a067-430e-af53-06c4492c11e4", + "id": "b61ff3b1-f11c-4308-a81e-b12d8540d058", "name": "Log Error to DB", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [464, -27104], + "position": [5872, -25856], "onError": "continueErrorOutput", "notes": "บันทึก Error ผ่าน Backend API (ป้องกัน SQL Injection)" }, @@ -342,11 +327,11 @@ "amount": "={{$('Set Configuration').first().json.config.DELAY_MS / 1000}}", "unit": "seconds" }, - "id": "5909f64b-67a5-40e7-b7b0-3e031376e5ad", + "id": "86de1e7b-9142-41ec-9e5e-817677273603", "name": "Delay", "type": "n8n-nodes-base.wait", "typeVersion": 1, - "position": [704, -27152], + "position": [6096, -25920], "webhookId": "38e97a99-4dcc-4b63-977a-a02945a1c369", "notes": "หน่วงเวลาระหว่าง Batches" }, @@ -454,22 +439,22 @@ }, "options": {} }, - "id": "0558f317-fc7a-460a-bcfd-60ea541fc2b9", + "id": "6d178e98-238c-419d-bc17-f464a4ea98b1", "name": "Route by Confidence", "type": "n8n-nodes-base.switch", "typeVersion": 3.2, - "position": [-16, -27312] + "position": [5328, -26144] }, { "parameters": { "fileSelector": "={{ $json.file_path }}", "options": {} }, - "id": "319a9a0b-2837-4f1c-8e5a-58a0e95dfed5", + "id": "7ac25b43-80c6-457d-8c40-6fa32de32fae", "name": "Read PDF File", "type": "n8n-nodes-base.readWriteFile", "typeVersion": 1, - "position": [-224, -27584], + "position": [4560, -26304], "onError": "continueErrorOutput" }, { @@ -500,22 +485,22 @@ "timeout": 60000 } }, - "id": "6db11a13-cd5f-4e85-9263-085590e0b07f", + "id": "6a0329ce-99d8-45ee-b997-22f7f6420310", "name": "Upload to Backend", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [-544, -27376], + "position": [5712, -26336], "notes": "Upload PDF to Backend Temp Storage" }, { "parameters": { "jsCode": "const item = $input.first();\nconst binaryData = $('Read PDF File').first().binary.data;\n\nreturn {\n json: { ...item.json },\n binary: { data: binaryData }\n};" }, - "id": "eeea6259-6bb0-42cc-8812-d6dd22e8fa1c", + "id": "fd2c0b17-ba48-488c-89ce-1d95e37e1f75", "name": "Restore Binary", "type": "n8n-nodes-base.code", "typeVersion": 2, - "position": [-400, -27376], + "position": [5536, -26336], "notes": "Re-attach PDF binary จาก Read PDF File เพื่อส่ง Upload (หลัง AI ตรวจแล้ว)" }, { @@ -555,11 +540,11 @@ "timeout": 600000 } }, - "id": "0b34e1a9-9d83-4c8a-9d59-2d9be4b23123", + "id": "a9ccd66d-ece4-48cb-8fac-6daa9257904a", "name": "Extract PDF Text", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.2, - "position": [-240, -27376], + "position": [4848, -26320], "onError": "continueErrorOutput" }, { @@ -568,11 +553,11 @@ "query": "SELECT 'projects' as type, id, project_code as text1, project_name as text2 FROM projects\nUNION ALL\nSELECT 'disciplines' as type, id, code_name_th as text1, code_name_en as text2 FROM disciplines\nUNION ALL\nSELECT 'organizations' as type, id, organization_name as text1, organization_code as text2 FROM organizations\nUNION ALL\nSELECT 'tags' as type, id, tag_name as text1, description as text2 FROM tags\nUNION ALL\nSELECT 'correspondence_types' as type, id, type_code as text1, type_name as text2 FROM correspondence_types", "options": {} }, - "id": "5f785556-e8f5-40ae-8fd6-786c46ed7090", + "id": "3b941922-b7a0-4ce7-99c7-c16b72ba3b04", "name": "Fetch DB Context", "type": "n8n-nodes-base.mySql", "typeVersion": 2.4, - "position": [-336, -27200], + "position": [4576, -26144], "alwaysOutputData": true, "credentials": { "mySql": { @@ -584,13 +569,13 @@ }, { "parameters": { - "jsCode": "const items = $input.all();\nconst config = $('Set Configuration').first().json.config;\n\nreturn items.map(itemWrapper => {\n const item = itemWrapper.json;\n const ai = item.ai_result || {};\n\n return {\n json: {\n ...item,\n enqueue_payload: {\n document_number: String(item.document_number || ''),\n subject: String(ai.subject || item.subject || ''),\n original_subject: String(item.subject || ''),\n category: ai.suggested_category || 'Correspondence',\n body: String(ai.body || ai.summary || ''),\n ai_summary: ai.summary || ai.body || '',\n project_id: Number(ai.project_id || config.PROJECT_ID),\n sender_org_id: ai.sender_id || null,\n receiver_org_id: ai.receiver_id || null,\n issued_date: ai.issued_date || item.issued_date || '',\n received_date: ai.received_date || item.received_date || '',\n remarks: item.staging_remarks || '',\n extracted_tags: ai.tags || [],\n details: { tags: ai.tags || [] },\n temp_attachment_id: $('Upload to Backend').first()?.json?.id || item.temp_attachment_id || null,\n is_valid: ai.is_valid !== false,\n confidence: ai.confidence || 0.0,\n ai_issues: ai.key_points || []\n }\n }\n };\n});" + "jsCode": "const items = $input.all();\nconst config = $('Set Configuration').first().json.config;\n\nreturn items.map(itemWrapper => {\n const item = itemWrapper.json;\n const ai = item.ai_result || {};\n\n return {\n json: {\n ...item,\n enqueue_payload: {\n document_number: String(item.document_number || ''),\n subject: String(ai.subject || item.subject || ''),\n original_subject: String(item.subject || ''),\n category: ai.suggested_category || 'Correspondence',\n body: String(ai.body || ai.summary || ''),\n ai_summary: ai.summary || ai.body || '',\n project_id: Number(ai.project_id || config.PROJECT_ID),\n sender_org_id: ai.sender_id || null,\n receiver_org_id: ai.receiver_id || null,\n issued_date: ai.issued_date || item.issued_date || '',\n received_date: ai.received_date || item.received_date || '',\n remarks: item.remarks ? item.remarks + (item.staging_remarks ? ' [System: ' + item.staging_remarks + ']' : '') : (item.staging_remarks || ''),\n extracted_tags: ai.tags || [],\n details: { tags: ai.tags || [] },\n temp_attachment_id: $('Upload to Backend').first()?.json?.id || item.temp_attachment_id || null,\n is_valid: ai.is_valid !== false,\n confidence: ai.confidence || 0.0,\n ai_issues: ai.key_points || []\n }\n }\n };\n});" }, - "id": "6bc2f3a0-9094-4bfd-a0a0-ba9a8effb53a", + "id": "51613de7-db28-41bd-bdb3-f7ba12b9186e", "name": "Build Enqueue Payload", "typeVersion": 2, "type": "n8n-nodes-base.code", - "position": [192, -27408], + "position": [5888, -26336], "notes": "สร้าง payload สำหรับ Enqueue Migration" }, { @@ -613,11 +598,11 @@ "timeout": 30000 } }, - "id": "69152618-eed4-4b2b-a34f-e03cb630649a", + "id": "aecb004d-846b-4aea-8174-7b9c7aa5c39f", "name": "Enqueue to Review Queue", "type": "n8n-nodes-base.httpRequest", "typeVersion": 4.1, - "position": [368, -27408], + "position": [5600, -26144], "notes": "ส่งข้อมูลเข้า Staging Queue" }, { @@ -626,11 +611,11 @@ "query": "INSERT INTO migration_progress (batch_id, last_processed_index, status) VALUES ('{{$('Set Configuration').first().json.config.BATCH_ID}}', {{$json.original_index || 0}}, 'RUNNING') ON DUPLICATE KEY UPDATE last_processed_index = {{$json.original_index || 0}}, updated_at = NOW()", "options": {} }, - "id": "a7926600-01e6-49d3-9f29-ef7bab1c79c0", + "id": "748b1ee9-b3d8-4900-92a1-4444ded15c61", "name": "Save Checkpoint", "type": "n8n-nodes-base.mySql", "typeVersion": 2.4, - "position": [560, -27312], + "position": [5792, -26144], "credentials": { "mySql": { "id": "CHHfbKhMacNo03V4", @@ -638,6 +623,86 @@ } }, "notes": "บันทึกความคืบหน้าลง Database" + }, + { + "parameters": { + "content": "## Initialization & Preflight", + "height": 368, + "width": 544, + "color": 4 + }, + "type": "n8n-nodes-base.stickyNote", + "position": [3936, -26352], + "typeVersion": 1, + "id": "9eb3cfbd-2fe4-4237-a7ee-9387aa909efb", + "name": "Sticky Note" + }, + { + "parameters": { + "url": "={{$('Set Configuration').first().json.config.BACKEND_URL}}/health", + "options": { + "timeout": 5000 + } + }, + "id": "6d29f005-ae65-42e8-8d3d-55992927a13a", + "name": "Check Backend Health", + "type": "n8n-nodes-base.httpRequest", + "typeVersion": 4.1, + "position": [4288, -26304], + "onError": "continueErrorOutput", + "notes": "ตรวจสอบ Backend พร้อมใช้งาน" + }, + { + "parameters": { + "content": "## Data Ingestion & Batching", + "height": 256, + "width": 928, + "color": 5 + }, + "type": "n8n-nodes-base.stickyNote", + "position": [3936, -25968], + "typeVersion": 1, + "id": "f2daf117-9cf2-477e-9c69-56a92503c783", + "name": "Sticky Note1" + }, + { + "parameters": { + "content": "## Text Extraction & AI Analysis", + "height": 368, + "width": 752, + "color": 6 + }, + "type": "n8n-nodes-base.stickyNote", + "position": [4496, -26352], + "typeVersion": 1, + "id": "0b628ba8-de1a-40c6-8722-fc0e2411d666", + "name": "Sticky Note2" + }, + { + "parameters": { + "content": "## Error Logging", + "height": 224, + "width": 800, + "color": 3 + }, + "type": "n8n-nodes-base.stickyNote", + "position": [5264, -25936], + "typeVersion": 1, + "id": "fee4de9d-be2f-4e92-aea6-2a11ee89af8c", + "name": "Sticky Note3" + }, + { + "parameters": { + "content": "## Routing & Ingestion", + "height": 416, + "width": 784, + "color": 2 + }, + "type": "n8n-nodes-base.stickyNote", + "position": [5264, -26352], + "typeVersion": 1, + "id": "23619681-d936-4608-92ba-bfb10c062789", + "name": "Sticky Note4" } ], "pinData": {}, @@ -664,17 +729,6 @@ ] ] }, - "Check Backend Health": { - "main": [ - [ - { - "node": "Fetch Categories", - "type": "main", - "index": 0 - } - ] - ] - }, "Fetch Categories": { "main": [ [ @@ -988,6 +1042,17 @@ } ] ] + }, + "Check Backend Health": { + "main": [ + [ + { + "node": "Fetch Categories", + "type": "main", + "index": 0 + } + ] + ] } }, "active": false, @@ -996,7 +1061,7 @@ "binaryMode": "separate", "availableInMCP": false }, - "versionId": "1a305c6e-35fe-43cb-b5fa-bda279a36500", + "versionId": "3fea0965-f77e-4bba-a4be-00ed7c232899", "meta": { "instanceId": "9e70e47c1eaf3bac72f497ddfbde0983f840f7d0f059537f7e37dd70de18ecb7" }, diff --git a/specs/03-Data-and-Storage/0.md b/specs/03-Data-and-Storage/0.md index 9025b92..2d500ae 100644 --- a/specs/03-Data-and-Storage/0.md +++ b/specs/03-Data-and-Storage/0.md @@ -1,975 +1,46 @@ - ผมจะสร้างไฟล์ JSON สำหรับ n8n Free Plan (ไม่ใช้ Environment Variables) และปรับปรุงเอกสารคู่มือให้สอดคล้อง โดยใช้ **Static Parameters** และ **Set Node** แทนการอ่านจาก Environment - ---- - -## 📝 ไฟล์ `n8n.workflow` (Free Plan Compatible) - -```json -{ - "name": "LCBP3 Migration Workflow v1.8.0", - "meta": { - "instanceId": "lcbp3-migration-free" - }, - "settings": { - "executionOrder": "v1" - }, - "nodes": [ - { - "parameters": { - "rule": { - "interval": [ - { - "field": "cronExpression", - "expression": "0 22 * * *" - } - ] - } - }, - "id": "trigger-schedule", - "name": "Schedule Trigger (22:00)", - "type": "n8n-nodes-base.scheduleTrigger", - "typeVersion": 1.1, - "position": [0, 0], - "notes": "เริ่มรันอัตโนมัติทุกวันเวลา 22:00" - }, - { - "parameters": { - "jsCode": "// ============================================\n// CONFIGURATION - แก้ไขค่าที่นี่\n// ============================================\nconst CONFIG = {\n // Ollama Settings\n OLLAMA_HOST: 'http://192.168.20.100:11434',\n OLLAMA_MODEL_PRIMARY: 'llama3.2:3b',\n OLLAMA_MODEL_FALLBACK: 'mistral:7b-instruct-q4_K_M',\n \n // Backend Settings\n BACKEND_URL: 'https://api.np-dms.work',\n MIGRATION_TOKEN: 'Bearer YOUR_MIGRATION_TOKEN_HERE',\n \n // Batch Settings\n BATCH_SIZE: 10,\n BATCH_ID: 'migration_20260226',\n DELAY_MS: 2000,\n \n // Thresholds\n CONFIDENCE_HIGH: 0.85,\n CONFIDENCE_LOW: 0.60,\n MAX_RETRY: 3,\n FALLBACK_THRESHOLD: 5,\n \n // Paths\n STAGING_PATH: '/share/np-dms/staging_ai',\n LOG_PATH: '/share/np-dms/n8n/migration_logs',\n \n // Database\n DB_HOST: '192.168.1.100',\n DB_PORT: 3306,\n DB_NAME: 'lcbp3_production',\n DB_USER: 'migration_bot',\n DB_PASSWORD: 'YOUR_DB_PASSWORD_HERE'\n};\n\n// Store in global workflow data\n$workflow.staticData = $workflow.staticData || {};\n$workflow.staticData.config = CONFIG;\n\nreturn [{ json: { config_loaded: true, timestamp: new Date().toISOString(), config: CONFIG } }];" - }, - "id": "config-setter", - "name": "Set Configuration", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [200, 0], - "notes": "กำหนดค่า Configuration ทั้งหมด - แก้ไขที่นี่ก่อนรัน" - }, - { - "parameters": { - "method": "GET", - "url": "={{$workflow.staticData.config.BACKEND_URL}}/api/meta/categories", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Authorization", - "value": "={{$workflow.staticData.config.MIGRATION_TOKEN}}" - } - ] - }, - "options": { - "timeout": 10000 - } - }, - "id": "preflight-categories", - "name": "Fetch Categories", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.1, - "position": [400, 0], - "notes": "ดึง Categories จาก Backend" - }, - { - "parameters": { - "method": "GET", - "url": "={{$workflow.staticData.config.BACKEND_URL}}/api/health", - "options": { - "timeout": 5000 - } - }, - "id": "preflight-health", - "name": "Check Backend Health", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.1, - "position": [400, 200], - "notes": "ตรวจสอบ Backend พร้อมใช้งาน", - "onError": "continueErrorOutput" - }, - { - "parameters": { - "jsCode": "const fs = require('fs');\nconst config = $workflow.staticData.config;\n\n// Check file mount\ntry {\n const files = fs.readdirSync(config.STAGING_PATH);\n if (files.length === 0) throw new Error('staging_ai is empty');\n \n // Check write permission to log path\n fs.writeFileSync(`${config.LOG_PATH}/.preflight_ok`, new Date().toISOString());\n \n // Store categories\n const categories = $input.first().json.categories || \n ['Correspondence','RFA','Drawing','Transmittal','Report','Other'];\n $workflow.staticData.systemCategories = categories;\n \n return [{ json: { \n preflight_ok: true, \n file_count: files.length,\n system_categories: categories,\n timestamp: new Date().toISOString()\n }}];\n} catch (err) {\n throw new Error(`Pre-flight check failed: ${err.message}`);\n}" - }, - "id": "preflight-check", - "name": "File Mount Check", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [600, 0], - "notes": "ตรวจสอบ File System และเก็บ Categories" - }, - { - "parameters": { - "operation": "executeQuery", -n "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "SELECT last_processed_index, status FROM migration_progress WHERE batch_id = '{{$workflow.staticData.config.BATCH_ID}}' LIMIT 1", - "options": {} - }, - "id": "checkpoint-read", - "name": "Read Checkpoint", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4, - "position": [800, 0], - "notes": "อ่านตำแหน่งล่าสุดที่ประมวลผล", - "onError": "continueErrorOutput" - }, - { - "parameters": { - "operation": "toData", - "binaryProperty": "data", - "options": { - "sheetName": "Sheet1" - } - }, - "id": "excel-reader", - "name": "Read Excel", - "type": "n8n-nodes-base.spreadsheetFile", - "typeVersion": 2, - "position": [800, 200], - "notes": "อ่านไฟล์ Excel รายการเอกสาร" - }, - { - "parameters": { - "jsCode": "const checkpoint = $input.first().json[0] || { last_processed_index: 0, status: 'NEW' };\nconst startIndex = checkpoint.last_processed_index || 0;\nconst config = $workflow.staticData.config;\n\nconst allItems = $('Read Excel').all()[0].json.data || [];\nconst remaining = allItems.slice(startIndex);\nconst currentBatch = remaining.slice(0, config.BATCH_SIZE);\n\n// Encoding Normalization\nconst normalize = (str) => {\n if (!str) return '';\n return String(str).normalize('NFC').trim();\n};\n\nreturn currentBatch.map((item, i) => ({\n json: {\n document_number: normalize(item.document_number || item['Document Number']),\n title: normalize(item.title || item.Title || item['Subject']),\n legacy_number: normalize(item.legacy_number || item['Legacy Number']),\n excel_revision: item.revision || item.Revision || 1,\n original_index: startIndex + i,\n batch_id: config.BATCH_ID,\n file_name: `${normalize(item.document_number)}.pdf`\n }\n}));" - }, - "id": "batch-processor", - "name": "Process Batch + Encoding", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [1000, 0], - "notes": "ตัด Batch + Normalize UTF-8" - }, - { - "parameters": { - "jsCode": "const fs = require('fs');\nconst path = require('path');\nconst config = $workflow.staticData.config;\n\nconst items = $input.all();\nconst validated = [];\nconst errors = [];\n\nfor (const item of items) {\n const docNum = item.json.document_number;\n \n // Sanitize filename\n const safeName = path.basename(String(docNum).replace(/[^a-zA-Z0-9\\-_.]/g, '_')).normalize('NFC');\n const filePath = path.resolve(config.STAGING_PATH, `${safeName}.pdf`);\n \n // Path traversal check\n if (!filePath.startsWith(config.STAGING_PATH)) {\n errors.push({\n ...item,\n json: { ...item.json, error: 'Path traversal detected', error_type: 'SECURITY', file_exists: false }\n });\n continue;\n }\n \n try {\n if (fs.existsSync(filePath)) {\n const stats = fs.statSync(filePath);\n validated.push({\n ...item,\n json: { ...item.json, file_exists: true, file_size: stats.size, file_path: filePath }\n });\n } else {\n errors.push({\n ...item,\n json: { ...item.json, error: `File not found: ${safeName}.pdf`, error_type: 'FILE_NOT_FOUND', file_exists: false }\n });\n }\n } catch (err) {\n errors.push({\n ...item,\n json: { ...item.json, error: err.message, error_type: 'FILE_ERROR', file_exists: false }\n });\n }\n}\n\n// Output 0: Validated, Output 1: Errors\nreturn [validated, errors];" - }, - "id": "file-validator", - "name": "File Validator", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [1200, 0], - "notes": "ตรวจสอบไฟล์ PDF มีอยู่จริง + Sanitize path" - }, - { - "parameters": { - "operation": "executeQuery", - "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "SELECT is_fallback_active, recent_error_count FROM migration_fallback_state WHERE batch_id = '{{$workflow.staticData.config.BATCH_ID}}' LIMIT 1", - "options": {} - }, - "id": "fallback-check", - "name": "Check Fallback State", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4, - "position": [1400, -200], - "notes": "ตรวจสอบว่าต้องใช้ Fallback Model หรือไม่", - "onError": "continueErrorOutput" - }, - { - "parameters": { - "jsCode": "const config = $workflow.staticData.config;\nconst fallbackState = $input.first().json[0] || { is_fallback_active: false, recent_error_count: 0 };\n\nconst isFallback = fallbackState.is_fallback_active || false;\nconst model = isFallback ? config.OLLAMA_MODEL_FALLBACK : config.OLLAMA_MODEL_PRIMARY;\n\nconst systemCategories = $workflow.staticData.systemCategories || \n ['Correspondence','RFA','Drawing','Transmittal','Report','Other'];\n\nconst items = $('File Validator').all();\n\nreturn items.map(item => {\n const systemPrompt = `You are a Document Controller for a large construction project.\nYour task is to validate document metadata.\nYou MUST respond ONLY with valid JSON. No explanation, no markdown, no extra text.\nIf there are no issues, \"detected_issues\" must be an empty array [].`;\n\n const userPrompt = `Validate this document metadata and respond in JSON:\n\nDocument Number: ${item.json.document_number}\nTitle: ${item.json.title}\nExpected Pattern: [ORG]-[TYPE]-[SEQ] e.g. \"TCC-COR-0001\"\nCategory List (MUST match system enum exactly): ${JSON.stringify(systemCategories)}\n\nRespond ONLY with this exact JSON structure:\n{\n \"is_valid\": true | false,\n \"confidence\": 0.0 to 1.0,\n \"suggested_category\": \"\",\n \"detected_issues\": [\"\"],\n \"suggested_title\": \"\"\n}`;\n\n return {\n json: {\n ...item.json,\n active_model: model,\n is_fallback: isFallback,\n system_categories: systemCategories,\n ollama_payload: {\n model: model,\n prompt: `${systemPrompt}\\n\\n${userPrompt}`,\n stream: false,\n format: 'json'\n }\n }\n };\n});" - }, - "id": "prompt-builder", - "name": "Build AI Prompt", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [1400, 0], - "notes": "สร้าง Prompt โดยใช้ Categories จาก System" - }, - { - "parameters": { - "method": "POST", - "url": "={{$workflow.staticData.config.OLLAMA_HOST}}/api/generate", - "sendBody": true, - "specifyBody": "json", - "jsonBody": "={{ $json.ollama_payload }}", - "options": { - "timeout": 30000 - } - }, - "id": "ollama-call", - "name": "Ollama AI Analysis", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.1, - "position": [1600, 0], - "notes": "เรียก Ollama วิเคราะห์เอกสาร" - }, - { - "parameters": { - "jsCode": "const items = $input.all();\nconst parsed = [];\nconst parseErrors = [];\n\nfor (const item of items) {\n try {\n let raw = item.json.response || '';\n \n // Clean markdown\n raw = raw.replace(/```json/gi, '').replace(/```/g, '').trim();\n const result = JSON.parse(raw);\n \n // Schema Validation\n if (typeof result.is_valid !== 'boolean') throw new Error('is_valid must be boolean');\n if (typeof result.confidence !== 'number' || result.confidence < 0 || result.confidence > 1) {\n throw new Error('confidence must be float 0.0-1.0');\n }\n if (!Array.isArray(result.detected_issues)) throw new Error('detected_issues must be array');\n \n // Enum Validation\n const systemCategories = item.json.system_categories || [];\n if (!systemCategories.includes(result.suggested_category)) {\n throw new Error(`Category \"${result.suggested_category}\" not in system enum`);\n }\n \n parsed.push({\n ...item,\n json: { ...item.json, ai_result: result, parse_error: null }\n });\n } catch (err) {\n parseErrors.push({\n ...item,\n json: {\n ...item.json,\n ai_result: null,\n parse_error: err.message,\n raw_ai_response: item.json.response,\n error_type: 'AI_PARSE_ERROR'\n }\n });\n }\n}\n\nreturn [parsed, parseErrors];" - }, - "id": "json-parser", - "name": "Parse & Validate AI Response", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [1800, 0], - "notes": "Parse JSON + Validate Schema + Enum Check" - }, - { - "parameters": { - "operation": "executeQuery", - "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "INSERT INTO migration_fallback_state (batch_id, recent_error_count, is_fallback_active) VALUES ('{{$workflow.staticData.config.BATCH_ID}}', 1, FALSE) ON DUPLICATE KEY UPDATE recent_error_count = recent_error_count + 1, is_fallback_active = CASE WHEN recent_error_count + 1 >= {{$workflow.staticData.config.FALLBACK_THRESHOLD}} THEN TRUE ELSE is_fallback_active END, updated_at = NOW()", - "options": {} -n }, - "id": "fallback-update", - "name": "Update Fallback State", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4, - "position": [2000, 200], - "notes": "เพิ่ม Error count และตรวจสอบ Fallback threshold" - }, - { - "parameters": { - "jsCode": "const config = $workflow.staticData.config;\nconst items = $('Parse & Validate AI Response').all();\n\nconst autoIngest = [];\nconst reviewQueue = [];\nconst rejectLog = [];\nconst errorLog = [];\n\nfor (const item of items) {\n if (item.json.parse_error || !item.json.ai_result) {\n errorLog.push(item);\n continue;\n }\n \n const ai = item.json.ai_result;\n \n // Revision Drift Protection (ถ้ามีข้อมูลจาก DB)\n if (item.json.current_db_revision !== undefined) {\n const expectedRev = item.json.current_db_revision + 1;\n if (parseInt(item.json.excel_revision) !== expectedRev) {\n reviewQueue.push({\n ...item,\n json: { ...item.json, review_reason: `Revision drift: Excel=${item.json.excel_revision}, Expected=${expectedRev}` }\n });\n continue;\n }\n }\n \n // Confidence Routing\n if (ai.confidence >= config.CONFIDENCE_HIGH && ai.is_valid === true) {\n autoIngest.push(item);\n } else if (ai.confidence >= config.CONFIDENCE_LOW) {\n reviewQueue.push({\n ...item,\n json: { ...item.json, review_reason: `Confidence ${ai.confidence.toFixed(2)} < ${config.CONFIDENCE_HIGH}` }\n });\n } else {\n rejectLog.push({\n ...item,\n json: { ...item.json, reject_reason: ai.is_valid === false ? 'AI marked invalid' : `Confidence ${ai.confidence.toFixed(2)} < ${config.CONFIDENCE_LOW}` }\n });\n }\n}\n\n// Output 0: Auto, 1: Review, 2: Reject, 3: Error\nreturn [autoIngest, reviewQueue, rejectLog, errorLog];" - }, - "id": "confidence-router", - "name": "Confidence Router", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [2000, 0], - "notes": "แยกตาม Confidence: Auto(≥0.85) / Review(≥0.60) / Reject(<0.60)" - }, - { - "parameters": { - "method": "POST", - "url": "={{$workflow.staticData.config.BACKEND_URL}}/api/correspondences/import", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Authorization", - "value": "={{$workflow.staticData.config.MIGRATION_TOKEN}}" - }, - { - "name": "Idempotency-Key", - "value": "={{$json.document_number}}:{{$workflow.staticData.config.BATCH_ID}}" - } - ] - }, - "sendBody": true, - "specifyBody": "json", - "jsonBody": "={\n \"document_number\": \"{{$json.document_number}}\",\n \"title\": \"{{$json.ai_result.suggested_title || $json.title}}\",\n \"category\": \"{{$json.ai_result.suggested_category}}\",\n \"source_file_path\": \"{{$json.file_path}}\",\n \"ai_confidence\": {{$json.ai_result.confidence}},\n \"ai_issues\": {{JSON.stringify($json.ai_result.detected_issues)}},\n \"migrated_by\": \"SYSTEM_IMPORT\",\n \"batch_id\": \"{{$workflow.staticData.config.BATCH_ID}}\",\n \"details\": {\n \"legacy_number\": \"{{$json.legacy_number}}\"\n }\n}", - "options": { - "timeout": 30000 - } - }, - "id": "backend-import", - "name": "Import to Backend", - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.1, - "position": [2200, -200], - "notes": "ส่งข้อมูลเข้า LCBP3 Backend พร้อม Idempotency-Key" - }, - { - "parameters": { - "jsCode": "const item = $input.first();\nconst shouldCheckpoint = item.json.original_index % 10 === 0;\n\nreturn [{\n json: {\n ...item.json,\n should_update_checkpoint: shouldCheckpoint,\n checkpoint_index: item.json.original_index,\n import_status: 'success',\n timestamp: new Date().toISOString()\n }\n}];" - }, - "id": "checkpoint-flag", - "name": "Flag Checkpoint", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [2400, -200], - "notes": "กำหนดว่าจะบันทึก Checkpoint หรือไม่ (ทุก 10 records)" - }, - { - "parameters": { - "operation": "executeQuery", - "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "INSERT INTO migration_progress (batch_id, last_processed_index, status) VALUES ('{{$workflow.staticData.config.BATCH_ID}}', {{$json.checkpoint_index}}, 'RUNNING') ON DUPLICATE KEY UPDATE last_processed_index = {{$json.checkpoint_index}}, updated_at = NOW()", - "options": {} - }, - "id": "checkpoint-save", - "name": "Save Checkpoint", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4, - "position": [2600, -200], - "notes": "บันทึกความคืบหน้าลง Database" - }, - { - "parameters": { - "operation": "executeQuery", - "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "INSERT INTO migration_review_queue (document_number, title, original_title, ai_suggested_category, ai_confidence, ai_issues, review_reason, status, created_at) VALUES ('{{$json.document_number}}', '{{$json.ai_result.suggested_title || $json.title}}', '{{$json.title}}', '{{$json.ai_result.suggested_category}}', {{$json.ai_result.confidence}}, '{{JSON.stringify($json.ai_result.detected_issues)}}', '{{$json.review_reason}}', 'PENDING', NOW()) ON DUPLICATE KEY UPDATE status = 'PENDING', review_reason = '{{$json.review_reason}}', created_at = NOW()", - "options": {} - }, - "id": "review-queue-insert", - "name": "Insert Review Queue", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4", - "position": [2200, 0], - "notes": "บันทึกรายการที่ต้องตรวจสอบโดยคน (ไม่สร้าง Correspondence)" - }, - { - "parameters": { - "jsCode": "const fs = require('fs');\nconst item = $input.first();\nconst config = $workflow.staticData.config;\n\nconst csvPath = `${config.LOG_PATH}/reject_log.csv`;\nconst header = 'timestamp,document_number,title,reject_reason,ai_confidence,ai_issues\\n';\nconst esc = (s) => `\"${String(s || '').replace(/\"/g, '\"\"')}\"`;\n\nif (!fs.existsSync(csvPath)) {\n fs.writeFileSync(csvPath, header, 'utf8');\n}\n\nconst line = [\n new Date().toISOString(),\n esc(item.json.document_number),\n esc(item.json.title),\n esc(item.json.reject_reason),\n item.json.ai_result?.confidence ?? 'N/A',\n esc(JSON.stringify(item.json.ai_result?.detected_issues || []))\n].join(',') + '\\n';\n\nfs.appendFileSync(csvPath, line, 'utf8');\n\nreturn [$input.first()];" - }, - "id": "reject-logger", - "name": "Log Reject to CSV", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [2200, 200], - "notes": "บันทึกรายการที่ถูกปฏิเสธลง CSV" - }, - { - "parameters": { - "jsCode": "const fs = require('fs');\nconst items = $input.all();\nconst config = $workflow.staticData.config;\n\nconst csvPath = `${config.LOG_PATH}/error_log.csv`;\nconst header = 'timestamp,document_number,error_type,error_message,raw_ai_response\\n';\nconst esc = (s) => `\"${String(s || '').replace(/\"/g, '\"\"')}\"`;\n\nif (!fs.existsSync(csvPath)) {\n fs.writeFileSync(csvPath, header, 'utf8');\n}\n\nfor (const item of items) {\n const line = [\n new Date().toISOString(),\n esc(item.json.document_number),\n esc(item.json.error_type || 'UNKNOWN'),\n esc(item.json.error || item.json.parse_error),\n esc(item.json.raw_ai_response || '')\n ].join(',') + '\\n';\n \n fs.appendFileSync(csvPath, line, 'utf8');\n}\n\nreturn items;" - }, - "id": "error-logger-csv", - "name": "Log Error to CSV", - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [1400, 400], - "notes": "บันทึก Error ลง CSV (จาก File Validator)" - }, - { - "parameters": { - "operation": "executeQuery", - "host": "={{$workflow.staticData.config.DB_HOST}}", - "port": "={{$workflow.staticData.config.DB_PORT}}", - "database": "={{$workflow.staticData.config.DB_NAME}}", - "user": "={{$workflow.staticData.config.DB_USER}}", - "password": "={{$workflow.staticData.config.DB_PASSWORD}}", - "query": "INSERT INTO migration_errors (batch_id, document_number, error_type, error_message, raw_ai_response, created_at) VALUES ('{{$workflow.staticData.config.BATCH_ID}}', '{{$json.document_number}}', '{{$json.error_type || \"UNKNOWN\"}}', '{{$json.error || $json.parse_error}}', '{{$json.raw_ai_response || \"\"}}', NOW())", - "options": {} - }, - "id": "error-logger-db", - "name": "Log Error to DB", - "type": "n8n-nodes-base.mySql", - "typeVersion": 2.4", - "position": [2000, 400], - "notes": "บันทึก Error ลง MariaDB" - }, - { - "parameters": { - "amount": "={{$workflow.staticData.config.DELAY_MS}}", - "unit": "milliseconds" - }, - "id": "delay-node", - "name": "Delay", - "type": "n8n-nodes-base.wait", - "typeVersion": 1, - "position": [2800, 0], - "notes": "หน่วงเวลาระหว่าง Batches" - } - ], - "connections": { - "Schedule Trigger (22:00)": { - "main": [ - [ - { - "node": "Set Configuration", - "type": "main", - "index": 0 - } - ] - ] - }, - "Set Configuration": { - "main": [ - [ - { - "node": "Fetch Categories", - "type": "main", - "index": 0 - } - ] - ] - }, - "Fetch Categories": { - "main": [ - [ - { - "node": "File Mount Check", - "type": "main", - "index": 0 - } - ] - ] - }, - "File Mount Check": { - "main": [ - [ - { - "node": "Read Checkpoint", - "type": "main", - "index": 0 - } - ] - ] - }, - "Read Checkpoint": { - "main": [ - [ - { - "node": "Process Batch + Encoding", - "type": "main", - "index": 0 - } - ] - ] - }, - "Process Batch + Encoding": { - "main": [ - [ - { - "node": "File Validator", - "type": "main", - "index": 0 - } - ] - ] - }, - "File Validator": { - "main": [ - [ - { - "node": "Build AI Prompt", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Log Error to CSV", - "type": "main", - "index": 0 - } - ] - ] - }, - "Build AI Prompt": { - "main": [ - [ - { - "node": "Ollama AI Analysis", - "type": "main", - "index": 0 - } - ] - ] - }, - "Ollama AI Analysis": { - "main": [ - [ - { - "node": "Parse & Validate AI Response", - "type": "main", - "index": 0 - } - ] - ] - }, - "Parse & Validate AI Response": { - "main": [ - [ - { - "node": "Confidence Router", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Update Fallback State", - "type": "main", - "index": 0 - } - ] - ] - }, - "Confidence Router": { - "main": [ - [ - { - "node": "Import to Backend", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Insert Review Queue", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Log Reject to CSV", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Log Error to DB", - "type": "main", - "index": 0 - } - ] - ] - }, - "Import to Backend": { - "main": [ - [ - { - "node": "Flag Checkpoint", - "type": "main", - "index": 0 - } - ] - ] - }, - "Flag Checkpoint": { - "main": [ - [ - { - "node": "Save Checkpoint", - "type": "main", - "index": 0 - } - ] - ] - }, - "Log Error to CSV": { - "main": [ - [ - { - "node": "Log Error to DB", - "type": "main", - "index": 0 - } - ] - ] - } - } -} -``` - ---- - -## 📄 เอกสารปรับปรุง `03-05-n8n-migration-setup-guide.md` (Free Plan Edition) - -```markdown -# 📋 คู่มือการตั้งค่า n8n สำหรับ Legacy Data Migration (Free Plan Edition) - -> **สำหรับ n8n Free Plan (Self-hosted)** - ไม่ใช้ Environment Variables -> **Version:** 1.8.0-free | **Last Updated:** 2026-03-03 - ---- - -## ⚠️ ความแตกต่างจากเวอร์ชัน Enterprise - -| ฟีเจอร์ | Enterprise | Free Plan (นี้) | -|---------|-----------|----------------| -| Environment Variables | ✅ ใช้ `$env` | ❌ ใช้ `Set Node` + `staticData` | -| External Secrets | ✅ Vault/Secrets Manager | ❌ Hardcode ใน Set Node | -| Multiple Workflows | ✅ Unlimited | ⚠️ รวมเป็น Workflow เดียว | -| Error Handling | ✅ Advanced | ⚠️ Manual Retry | -| Webhook Triggers | ✅ | ✅ ใช้ได้ | - ---- - -## 🏗️ สถาปัตยกรรมใหม่สำหรับ Free Plan - -``` -┌─────────────────────────────────────────────────────────────┐ -│ MIGRATION WORKFLOW v1.8.0-FREE │ -├─────────────────────────────────────────────────────────────┤ -│ │ -│ [Schedule Trigger 22:00] │ -│ │ │ -│ ▼ │ -│ ┌─────────────┐ ค่า Config ทั้งหมดอยู่ที่นี่ │ -│ │ Set Config │ (แก้ไขใน Code Node นี้เท่านั้น) │ -│ │ (Node 0) │ │ -│ └──────┬──────┘ │ -│ │ │ -│ ┌──────▼──────┐ ┌──────────────┐ ┌──────────────┐ │ -│ │Pre-flight │───▶│Fetch Categories│──▶│File Validator│ │ -│ │Checks │ │from Backend │ │+ Sanitize │ │ -│ └─────────────┘ └──────────────┘ └──────┬───────┘ │ -│ │ │ -│ ┌────────────────────────────┤ │ -│ │ │ │ -│ Valid │ Error │ │ -│ ▼ ▼ │ -│ ┌─────────────────┐ ┌─────────────────┐ │ -│ │ AI Analysis │ │ Error Logger │ │ -│ │ (Ollama) │ │ (CSV + DB) │ │ -│ └────────┬────────┘ └─────────────────┘ │ -│ │ │ -│ ┌────────▼────────┐ │ -│ │ Confidence │ │ -│ │ Router │ │ -│ │ (4 outputs) │ │ -│ └────┬───┬───┬────┘ │ -│ │ │ │ │ -│ ┌─────────┘ │ └─────────┐ │ -│ ▼ ▼ ▼ │ -│ ┌──────┐ ┌──────────┐ ┌────────┐ │ -│ │Auto │ │ Review │ │Reject │ │ -│ │Ingest│ │ Queue │ │Log │ │ -│ │+Chkpt│ │(DB only) │ │(CSV) │ │ -│ └──────┘ └──────────┘ └────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────┘ -``` - ---- - -## 📝 การตั้งค่า Configuration (สำคัญมาก) - -### ขั้นตอนที่ 1: แก้ไข Node "Set Configuration" - -**เปิด Workflow → คลิก Node "Set Configuration" → แก้ไข Code:** - -```javascript -// ============================================ -// CONFIGURATION - แก้ไขค่าที่นี่เท่านั้น -// ============================================ -const CONFIG = { - // 🔴 สำคัญ: เปลี่ยนทุกค่าที่มี <...> - - // Ollama Settings - OLLAMA_HOST: 'http://192.168.20.100:11434', - OLLAMA_MODEL_PRIMARY: 'llama3.2:3b', - OLLAMA_MODEL_FALLBACK: 'mistral:7b-instruct-q4_K_M', - - // Backend Settings - BACKEND_URL: 'https://api.np-dms.work', - MIGRATION_TOKEN: 'Bearer YOUR_MIGRATION_TOKEN_HERE', // 🔴 เปลี่ยน - - // Batch Settings - BATCH_SIZE: 10, - BATCH_ID: 'migration_20260226', - DELAY_MS: 2000, - - // Thresholds - CONFIDENCE_HIGH: 0.85, - CONFIDENCE_LOW: 0.60, - MAX_RETRY: 3, - FALLBACK_THRESHOLD: 5, - - // Paths (QNAP NAS) - STAGING_PATH: '/share/np-dms/staging_ai', - LOG_PATH: '/share/np-dms/n8n/migration_logs', - - // Database (MariaDB) - DB_HOST: '192.168.1.100', - DB_PORT: 3306, - DB_NAME: 'lcbp3_production', - DB_USER: 'migration_bot', - DB_PASSWORD: 'YOUR_DB_PASSWORD_HERE' // 🔴 เปลี่ยน -}; - -// อย่าแก้โค้ดด้านล่างนี้ -$workflow.staticData = $workflow.staticData || {}; -$workflow.staticData.config = CONFIG; - -return [{ json: { config_loaded: true, timestamp: new Date().toISOString() }}]; -``` - -### ขั้นตอนที่ 2: ตั้งค่า Credentials ใน n8n UI - -เนื่องจาก Free Plan ไม่สามารถซ่อน Sensitive Data ได้ทั้งหมด แนะนำให้: - -1. **สร้าง Dedicated User สำหรับ Migration เท่านั้น** -2. **ใช้ Token ที่มีสิทธิ์จำกัด** (เฉพาะ API ที่จำเป็น) -3. **Rotate Token ทันทีหลัง Migration เสร็จ** - -**การตั้งค่า Credentials (ถ้าใช้):** - -| Credential | Type | ใช้ใน Node | -|-----------|------|-----------| -| Ollama API | HTTP Request | Ollama AI Analysis | -| LCBP3 Backend | HTTP Request | Import to Backend, Fetch Categories | -| MariaDB | MySQL | ทุก Database Node | - ---- - -## 🗄️ การเตรียม Database (เหมือนเดิม) - -รัน SQL นี้บน MariaDB **ก่อน** เริ่มใช้งาน: - -```sql --- Checkpoint -CREATE TABLE IF NOT EXISTS migration_progress ( - batch_id VARCHAR(50) PRIMARY KEY, - last_processed_index INT DEFAULT 0, - status ENUM('RUNNING','COMPLETED','FAILED') DEFAULT 'RUNNING', - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP -); - --- Review Queue -CREATE TABLE IF NOT EXISTS migration_review_queue ( - id INT AUTO_INCREMENT PRIMARY KEY, - document_number VARCHAR(100) NOT NULL, - title TEXT, - original_title TEXT, - ai_suggested_category VARCHAR(50), - ai_confidence DECIMAL(4,3), - ai_issues JSON, - review_reason VARCHAR(255), - status ENUM('PENDING','APPROVED','REJECTED') DEFAULT 'PENDING', - reviewed_by VARCHAR(100), - reviewed_at TIMESTAMP NULL, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - UNIQUE KEY uq_doc_number (document_number) -); - --- Error Log -CREATE TABLE IF NOT EXISTS migration_errors ( - id INT AUTO_INCREMENT PRIMARY KEY, - batch_id VARCHAR(50), - document_number VARCHAR(100), - error_type ENUM('FILE_NOT_FOUND','AI_PARSE_ERROR','API_ERROR','DB_ERROR','SECURITY','UNKNOWN'), - error_message TEXT, - raw_ai_response TEXT, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - INDEX idx_batch_id (batch_id), - INDEX idx_error_type (error_type) -); - --- Fallback State -CREATE TABLE IF NOT EXISTS migration_fallback_state ( - id INT AUTO_INCREMENT PRIMARY KEY, - batch_id VARCHAR(50) UNIQUE, - recent_error_count INT DEFAULT 0, - is_fallback_active BOOLEAN DEFAULT FALSE, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP -); - --- Idempotency -CREATE TABLE IF NOT EXISTS import_transactions ( - id INT AUTO_INCREMENT PRIMARY KEY, - idempotency_key VARCHAR(255) UNIQUE NOT NULL, - document_number VARCHAR(100), - batch_id VARCHAR(100), - status_code INT DEFAULT 201, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - INDEX idx_idem_key (idempotency_key) -); -``` - ---- - -## 🐳 Docker Compose สำหรับ QNAP (Free Plan) - -```yaml -version: '3.8' - -services: - n8n: - image: n8nio/n8n:1.78.0 - container_name: n8n-lcbp3 - restart: unless-stopped - ports: - - "5678:5678" - environment: - - TZ=Asia/Bangkok - - NODE_ENV=production - - N8N_BASIC_AUTH_ACTIVE=true - - N8N_BASIC_AUTH_USER=admin - - N8N_BASIC_AUTH_PASSWORD=YOUR_N8N_PASSWORD_HERE - - N8N_ENCRYPTION_KEY=YOUR_ENCRYPTION_KEY_HERE - volumes: - - /share/np-dms/n8n:/home/node/.n8n - - /share/np-dms/n8n/cache:/home/node/.cache - # อ่านอย่างเดียว: ไฟล์ต้นฉบับ - - /share/np-dms/staging_ai:/share/np-dms/staging_ai:ro - # เขียนได้: Logs และ CSV - - /share/np-dms/n8n/migration_logs:/share/np-dms/n8n/migration_logs:rw - networks: - - lcbp3-network - -networks: - lcbp3-network: - external: true -``` - -> **หมายเหตุ:** Free Plan ไม่ต้องใช้ PostgreSQL สำหรับ n8n (ใช้ SQLite ได้) - ---- - -## 🔄 การทำงานของแต่ละ Node - -### Node 0: Set Configuration -- เก็บค่า Config ทั้งหมดใน `$workflow.staticData.config` -- อ่านผ่าน `$workflow.staticData.config.KEY` ใน Node อื่น - -### Node 1-2: Pre-flight Checks -- ตรวจสอบ Backend Health -- ดึง Categories จาก `/api/meta/categories` -- ตรวจ File Mount (Read-only) -- เก็บ Categories ใน `$workflow.staticData.systemCategories` - -### Node 3: Read Checkpoint -- อ่าน `last_processed_index` จาก `migration_progress` -- ถ้าไม่มี เริ่มจาก 0 - -### Node 4: Process Batch -- อ่าน Excel -- Normalize UTF-8 (NFC) -- ตัด Batch ตาม `BATCH_SIZE` - -### Node 5: File Validator -- Sanitize filename (replace special chars) -- Path traversal check -- ตรวจสอบไฟล์มีอยู่จริง -- **Output 2 ทาง**: Valid → AI, Error → Log - -### Node 6: Build AI Prompt -- ดึง Categories จาก `staticData` (ไม่ hardcode) -- เลือก Model ตาม Fallback State -- สร้าง Prompt ตาม Template - -### Node 7: Ollama AI Analysis -- เรียก `POST /api/generate` -- Timeout 30 วินาที -- Retry 3 ครั้ง (n8n built-in) - -### Node 8: Parse & Validate -- Parse JSON Response -- Schema Validation (is_valid, confidence, detected_issues) -- Enum Validation (ตรวจ Category ว่าอยู่ใน List หรือไม่) -- **Output 2 ทาง**: Success → Router, Error → Fallback - -### Node 9: Confidence Router -- **4 Outputs**: - 1. Auto Ingest (confidence ≥ 0.85 && is_valid) - 2. Review Queue (0.60 ≤ confidence < 0.85) - 3. Reject Log (confidence < 0.60 หรือ is_valid = false) - 4. Error Log (parse error) - -### Node 10A: Auto Ingest -- POST `/api/correspondences/import` -- Header: `Idempotency-Key: {doc_num}:{batch_id}` -- บันทึก Checkpoint ทุก 10 records - -### Node 10B: Review Queue -- INSERT เข้า `migration_review_queue` เท่านั้น -- ยังไม่สร้าง Correspondence - -### Node 10C: Reject Log -- เขียน CSV ที่ `/share/np-dms/n8n/migration_logs/reject_log.csv` - -### Node 10D: Error Log -- เขียน CSV + INSERT เข้า `migration_errors` - ---- - -## 🚨 ข้อควรระวังสำหรับ Free Plan - -### 1. Security -- **อย่า Commit ไฟล์นี้เข้า Git** ถ้ามี Password/Token -- ใช้ `.gitignore` สำหรับไฟล์ JSON ที่มี Config -- Rotate Token ทันทีหลังใช้งาน - -### 2. Limitations -- **Execution Timeout**: ตรวจสอบ n8n execution timeout (default 5 นาที) -- **Memory**: จำกัดที่ 2GB (ตาม Docker Compose) -- **Concurrent**: รัน Batch ต่อเนื่อง ไม่ parallel - -### 3. Backup -- สำรอง SQLite database ของ n8n ที่ `/home/node/.n8n` -- สำรอง Logs ที่ `/share/np-dms/n8n/migration_logs` - ---- - -## ✅ Pre-Production Checklist (Free Plan) - -| ลำดับ | รายการ | วิธีตรวจสอบ | -|-------|--------|-------------| -| 1 | Config ถูกต้อง | รัน Test Execution ดูผลลัพธ์ Node 0 | -| 2 | Database Connect ได้ | Test Step ใน Node Read Checkpoint | -| 3 | Ollama พร้อม | `curl http:///api/tags` | -| 4 | Backend Token ใช้ได้ | Test Step ใน Node Fetch Categories | -| 5 | File Mount RO ถูกต้อง | `docker exec n8n ls /share/np-dms/staging_ai` | -| 6 | Log Mount RW ถูกต้อง | `docker exec n8n touch /share/np-dms/n8n/migration_logs/test` | -| 7 | Categories ไม่ hardcode | ดูผลลัพธ์ Node Fetch Categories | -| 8 | Idempotency Key ถูกต้อง | ตรวจ Header ใน Node Import | -| 9 | Checkpoint บันทึก | ตรวจสอบ `migration_progress` หลังรัน | -| 10 | Error Log สร้างไฟล์ | ตรวจสอบ `error_log.csv` | - ---- - -## 🔧 การแก้ไขปัญหาเฉพาะหน้า - -### ปัญหา: Config ไม่ถูกต้อง -**แก้ไข:** แก้ที่ Node "Set Configuration" แล้ว Save → Execute Workflow ใหม่ - -### ปัญหา: Database Connection Error -**ตรวจสอบ:** -```javascript -// ใส่ใน Code Node ชั่วคราวเพื่อ Debug -const config = $workflow.staticData.config; -return [{ json: { - host: config.DB_HOST, - port: config.DB_PORT, - // อย่าแสดง password ใน Production! - test: 'Config loaded: ' + (config ? 'YES' : 'NO') -}}]; -``` - -### ปัญหา: Ollama Timeout -**แก้ไข:** -- เพิ่ม `DELAY_MS` เป็น 3000 หรือ 5000 -- ลด `BATCH_SIZE` เหลือ 5 -- ตรวจสอบ GPU/CPU ของ Ollama Server - ---- - -## 📊 การ Monitor (Manual) - -เนื่องจาก Free Plan ไม่มี Advanced Monitoring: - -```bash -# ดู Progress ล่าสุด -docker exec n8n-lcbp3 sh -c "tail -5 /share/np-dms/n8n/migration_logs/reject_log.csv" - -# ดู Error ล่าสุด -docker exec n8n-lcbp3 sh -c "tail -10 /share/np-dms/n8n/migration_logs/error_log.csv" - -# ดู Checkpoint ใน DB -mysql -h -u migration_bot -p -e "SELECT * FROM migration_progress WHERE batch_id = 'migration_20260226'" -``` - ---- - -**เอกสารฉบับนี้จัดทำขึ้นสำหรับ n8n Free Plan (Self-hosted)** -**Version:** 1.8.0-free | **Last Updated:** 2026-03-03 -``` - ---- - -## 📥 วิธี Import Workflow - -1. บันทึก JSON ด้านบนเป็นไฟล์ `lcbp3-migration-free.json` -2. เข้า n8n UI → **Workflows** → **Import from File** -3. เลือกไฟล์ `lcbp3-migration-free.json` -4. เปิด Workflow → แก้ไข Node **"Set Configuration"** ตามข้อมูลจริง -5. ตั้งค่า **Schedule Trigger** หรือเปลี่ยนเป็น **Manual Trigger** สำหรับทดสอบ -6. **Save** → **Execute Workflow** เพื่อทดสอบ - -ต้องการให้ช่วยปรับแต่งเพิ่มเติมหรือไม่ครับ? เช่น: -- เพิ่ม Node สำหรับส่ง Email แจ้งเตือน -- เพิ่ม Rollback Workflow -- ปรับ Batch Size อัตโนมัติตาม Error Rate +ตารางสรุปหน้าที่และผลลัพธ์ (Output) ของแต่ละ Node ใน LCBP3 Migration Workflow v1.8.1 คแบ่งกลุ่มตามขั้นตอนการทำงานเพื่อให้เข้าใจได้ง่ายขึ้นครับ: +## 🚀 กลุ่มที่ 1: จุดเริ่มต้นและเตรียมการ (Initialization & Preflight) +| ชือ Node | หน้าที่ (Function) | ผลลัพธ์ (Output) | +| -------------------- | ------------------------------------------------------------------------------ | --------------------------------------------------- | +| Form Trigger | จุดเริ่มต้นของ Workflow แสดงฟอร์มให้ผู้ใช้เลือกโมเดล AI, ขนาด Batch และระบุตำแหน่งไฟล์ Excel | ข้อมูลจากผู้ใช้ (Model, Batch Size, Excel Path) | +| Set Configuration | ตั้งค่าตัวแปรระบบ (Config) เช่น URL, Token, โฟลเดอร์ทำงาน และเกณฑ์การตัดสินใจของ AI | ชุดตัวแปร config ไว้ใช้ตลอด Workflow | +| Check Backend Health | เรียก API ทดสอบว่าระบบ Backend พร้อมทำงานหรือไม่ | สถานะ HTTP 200 (OK) | +| Fetch Categories | ดึงข้อมูล Master Data หมวดหมู่เอกสารจาก Backend | รายการหมวดหมู่เอกสารทั้งหมดในระบบ | +| Fetch Tags | ดึงข้อมูล Master Data แท็กที่มีอยู่จาก Backend | รายชื่อแท็กทั้งหมดในระบบ | +| File Mount Check | ตรวจสอบว่าไฟล์ Excel และโฟลเดอร์ PDF มีอยู่จริง และเช็คสิทธิ์การเขียนไฟล์ Log | สถานะ preflight_ok: true พร้อมรายชื่อหมวดหมู่/แท็กที่ดึงมาได้ | +## 📂 กลุ่มที่ 2: เตรียมข้อมูลและการแบ่งชุด (Data Ingestion & Batching) +| ชือ Node | หน้าที่ (Function) | ผลลัพธ์ (Output) | +| ------------------------ | -------------------------------------------------------------------------------------- | ----------------------------------------------------------- | +| Read Checkpoint | อ่านฐานข้อมูลว่าครั้งที่แล้วประมวลผล Excel ถึงบรรทัดที่เท่าไหร่ (Resume capability) | ตัวเลข last_processed_index ล่าสุด | +| Read Excel Binary | อ่านไฟล์ Excel ต้นฉบับขึ้นมาเป็นข้อมูลไบนารี | ข้อมูล Binary ของ Excel | +| Read Excel | แปลงข้อมูล Binary ให้เป็นตารางข้อมูล JSON | JSON Array ของข้อมูลเอกสารทุกแถว | +| Process Batch + Encoding | ตัดแบ่งแถวตามจำนวน BATCH_SIZE เริ่มจากจุด Checkpoint และแปลง Encoding ให้รองรับภาษาไทย (UTF-8) | ข้อมูลเอกสาร 1 ชุด (เช่น 2 รายการ) ที่พร้อมทำงาน | +| File Validator | ตรวจสอบว่าไฟล์ PDF ที่ระบุใน Excel มีอยู่จริงในโฟลเดอร์ ป้องกัน Path Traversal | เฉพาะรายการที่มีไฟล์ PDF อยู่จริง (รายการ Error จะถูกตัดและส่งไป Log) | +## 🧠 กลุ่มที่ 3: สกัดข้อความและวิเคราะห์ด้วย AI (Text Extraction & AI Analysis) +| ชือ Node | หน้าที่ (Function) | ผลลัพธ์ (Output) | +| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------------------ | +| Read PDF File | อ่านไฟล์ PDF ของรายการที่ผ่านเข้ามาเป็นข้อมูลไบนารี | ข้อมูล Binary ของไฟล์ PDF | +| Extract PDF Text | ส่งไฟล์ PDF ให้ Apache Tika ทำ OCR / สกัดตัวอักษร | ข้อความดิบ (Text) ที่อ่านได้จากหน้า PDF | +| Check Fallback State | ตรวจสอบใน DB ว่าระบบกำลังอยู่ในโหมดใช้โมเดล AI สำรองหรือไม่ | สถานะ is_fallback_active | +| Fetch DB Context | ดึงข้อมูลโปรเจกต์ แผนก และองค์กร เพื่อใช้เป็นบริบทให้ AI อ้างอิง | ข้อมูลอ้างอิงรหัสและชื่อต่างๆ จากระบบเก่า | +| Build AI Prompt | ประกอบร่างข้อความ (Prompt) โดยรวมข้อมูลจาก Excel, ข้อความใน PDF และบริบท เพื่อสั่งงาน AI | คำสั่งในฟิลด์ ollama_payload | +| Ollama AI Analysis | ส่ง Prompt ยิงเข้า Server Ollama เพื่อให้ AI วิเคราะห์ จัดหมวดหมู่ และสรุปข้อมูล | ข้อความอธิบายหรือ JSON ที่ AI ตอบกลับมา | +| Parse & Validate AI Response | แปลงคำตอบ AI เป็น JSON Object ตรวจสอบว่าโครงสร้างถูกต้อง และจัดรูปแบบให้ตรงกับ Backend | ข้อมูลเดิม + ผลลัพธ์ ai_result (หรือ parse_error ถ้า AI ตอบผิดรูปแบบ) | +| Update Fallback State | นับจำนวน Error ลง DB หาก AI ทำงานพลาดหลายครั้ง ระบบจะสลับไปใช้ Fallback Model โดยอัตโนมัติ | อัปเดตตาราง migration_fallback_state สำเร็จ | +## 🔀 กลุ่มที่ 4: การตัดสินใจและการนำเข้าระบบ (Routing & Ingestion) +| ชือ Node | หน้าที่ (Function) | ผลลัพธ์ (Output) | +| ------------------------------- | ------------------------------------------------------------------------------ | ---------------------------------------------------------------- | +| Confidence Router | ตรวจประเมินคะแนนความมั่นใจ (Confidence) จาก AI และกำหนดเส้นทาง route_index ให้เอกสาร | สถานะชั่วคราวและค่า route_index (0, 1, 2, 3) | +| Route by Confidence Switch Node | แบ่งเส้นทางข้อมูลออกเป็น 4 ขา ตามค่าจาก Router | กระจายข้อมูลไปทาง Staging(High), Staging(Review), Reject หรือ Error | +| Restore Binary | (หลังแยกสาย) ดึงข้อมูล Binary ของ PDF กลับมาแนบกับข้อมูลอีกครั้งเตรียมอัปโหลด | JSON + Binary PDF ของไฟล์นั้นๆ | +| Upload to Backend | ยิง API นำไฟล์ PDF ฝากไว้ที่ Temp Storage ของ Backend DMS | รหัสไฟล์ temp_attachment_id ของ Backend | +| Build Enqueue Payload | ประกอบร่างข้อมูลผลวิเคราะห์ AI เข้ากับรหัสไฟล์ เพื่อเตรียมโยนเข้าคิว Migration | โครงสร้าง JSON ที่พร้อมส่งเข้า API Queue (enqueue_payload) | +| Enqueue to Review Queue | ยิงข้อมูลเข้า API Backend เพื่อบันทึกเข้าสู่ Review Queue ระบบ DMS | สถานะสำเร็จจากการรับข้อมูลของ Backend | +| Save Checkpoint | บันทึกประวัติลง Database ว่าประมวลผลผ่านเอกสารชุดนี้เรียบร้อยแล้ว | อัปเดต last_processed_index สำเร็จ | +| Delay | หน่วงเวลา (เช่น 2 วินาที) ก่อนวนรอบขึ้นไปทำข้อมูล Batch ถัดไป | วนลูปกลับไปที่จุด Read Checkpoint | +## 🚨 กลุ่มที่ 5: การจัดการข้อผิดพลาด (Error Logging) +| ชือ Node | หน้าที่ (Function) | ผลลัพธ์ (Output) | +| ----------------- | ------------------------------------------------------------------ | ----------------------------------- | +| Log Reject to CSV | หาก AI ให้คะแนนต่ำกว่าเกณฑ์ จะบันทึกเหตุผลทิ้งไว้ในไฟล์ CSV | บรรทัดข้อมูลใน reject_log.csv | +| Log Error to CSV | หากเกิดข้อผิดพลาดในการประมวลผล (เช่น หาไฟล์ไม่เจอ, AI หลอน) จะบันทึกลง CSV | บรรทัดข้อมูลใน error_log.csv | +| Log Error to DB | ยิง API ของ Backend เพื่อบันทึก Error เข้าสู่ Database ส่วนกลาง | ข้อมูล Error ในตาราง migration_errors | diff --git a/specs/04-Infrastructure-OPS/04-00-docker-compose/docker-compose-app.yml b/specs/04-Infrastructure-OPS/04-00-docker-compose/docker-compose-app.yml index d252f18..878fc66 100644 --- a/specs/04-Infrastructure-OPS/04-00-docker-compose/docker-compose-app.yml +++ b/specs/04-Infrastructure-OPS/04-00-docker-compose/docker-compose-app.yml @@ -79,8 +79,8 @@ services: - lcbp3 volumes: # Two-Phase Storage: จัดเก็บไฟล์นอก container - - '/share/np-dms/data/uploads/temp:/app/uploads/temp' - - '/share/np-dms/data/uploads/permanent:/app/uploads/permanent' + - '/share/np-dms-as/data/uploads/temp:/app/uploads/temp' + - '/share/np-dms-as/data/uploads/permanent:/app/uploads/permanent' - '/share/np-dms/data/logs/backend:/app/logs' # Mount legacy staging folder to match n8n's output path - '/share/np-dms-as/Legacy:/home/node/.n8n-files/staging_ai:ro'