690504:1641 Update specs [skip ci]
This commit is contained in:
@@ -5,13 +5,14 @@ set -e
|
||||
JSON_MODE=false
|
||||
SHORT_NAME=""
|
||||
BRANCH_NUMBER=""
|
||||
CATEGORY=""
|
||||
ARGS=()
|
||||
i=1
|
||||
while [ $i -le $# ]; do
|
||||
arg="${!i}"
|
||||
case "$arg" in
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
--json)
|
||||
JSON_MODE=true
|
||||
;;
|
||||
--short-name)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
@@ -40,22 +41,42 @@ while [ $i -le $# ]; do
|
||||
fi
|
||||
BRANCH_NUMBER="$next_arg"
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] <feature_description>"
|
||||
--category)
|
||||
if [ $((i + 1)) -gt $# ]; then
|
||||
echo 'Error: --category requires a value' >&2
|
||||
exit 1
|
||||
fi
|
||||
i=$((i + 1))
|
||||
next_arg="${!i}"
|
||||
if [[ "$next_arg" == --* ]]; then
|
||||
echo 'Error: --category requires a value' >&2
|
||||
exit 1
|
||||
fi
|
||||
CATEGORY="$next_arg"
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--json] [--short-name <name>] [--number N] [--category <cat>] <feature_description>"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --json Output in JSON format"
|
||||
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
|
||||
echo " --number N Specify branch number manually (overrides auto-detection)"
|
||||
echo " --category <cat> Category folder (100, 200, or 300). Defaults to 200 (fullstacks)"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Categories:"
|
||||
echo " 100 - Infrastructure (Deployment, Monitoring, Docker Compose, Network)"
|
||||
echo " 200 - Fullstack Development (Backend + Frontend features, Workflow Engine, API)"
|
||||
echo " 300 - Others (Documentation, Research, Non-code tasks)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 'Add user authentication system' --short-name 'user-auth'"
|
||||
echo " $0 'Implement OAuth2 integration for API' --number 5"
|
||||
echo " $0 'Docker compose hardening' --category 100"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
ARGS+=("$arg")
|
||||
*)
|
||||
ARGS+=("$arg")
|
||||
;;
|
||||
esac
|
||||
i=$((i + 1))
|
||||
@@ -83,35 +104,54 @@ find_repo_root() {
|
||||
# Function to get highest number from specs directory
|
||||
get_highest_from_specs() {
|
||||
local specs_dir="$1"
|
||||
local category="$2"
|
||||
local highest=0
|
||||
|
||||
|
||||
if [ -d "$specs_dir" ]; then
|
||||
for dir in "$specs_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
# If category specified, only check that category
|
||||
if [ -n "$category" ]; then
|
||||
local category_dir="$specs_dir/$category"
|
||||
if [ -d "$category_dir" ]; then
|
||||
for dir in "$category_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
# Extract the last 2 digits from nXX pattern
|
||||
number=$(echo "$dirname" | grep -o '[0-9]\{2\}$' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Check all directories in specs/ (old behavior for backward compatibility)
|
||||
for dir in "$specs_dir"/*; do
|
||||
[ -d "$dir" ] || continue
|
||||
dirname=$(basename "$dir")
|
||||
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
|
||||
number=$((10#$number))
|
||||
if [ "$number" -gt "$highest" ]; then
|
||||
highest=$number
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "$highest"
|
||||
}
|
||||
|
||||
# Function to get highest number from git branches
|
||||
get_highest_from_branches() {
|
||||
local highest=0
|
||||
|
||||
|
||||
# Get all branches (local and remote)
|
||||
branches=$(git branch -a 2>/dev/null || echo "")
|
||||
|
||||
|
||||
if [ -n "$branches" ]; then
|
||||
while IFS= read -r branch; do
|
||||
# Clean branch name: remove leading markers and remote prefixes
|
||||
clean_branch=$(echo "$branch" | sed 's/^[* ]*//; s|^remotes/[^/]*/||')
|
||||
|
||||
|
||||
# Extract feature number if branch matches pattern ###-*
|
||||
if echo "$clean_branch" | grep -q '^[0-9]\{3\}-'; then
|
||||
number=$(echo "$clean_branch" | grep -o '^[0-9]\{3\}' || echo "0")
|
||||
@@ -122,7 +162,7 @@ get_highest_from_branches() {
|
||||
fi
|
||||
done <<< "$branches"
|
||||
fi
|
||||
|
||||
|
||||
echo "$highest"
|
||||
}
|
||||
|
||||
@@ -180,19 +220,19 @@ mkdir -p "$SPECS_DIR"
|
||||
# Function to generate branch name with stop word filtering and length filtering
|
||||
generate_branch_name() {
|
||||
local description="$1"
|
||||
|
||||
|
||||
# Common stop words to filter out
|
||||
local stop_words="^(i|a|an|the|to|for|of|in|on|at|by|with|from|is|are|was|were|be|been|being|have|has|had|do|does|did|will|would|should|could|can|may|might|must|shall|this|that|these|those|my|your|our|their|want|need|add|get|set)$"
|
||||
|
||||
|
||||
# Convert to lowercase and split into words
|
||||
local clean_name=$(echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/ /g')
|
||||
|
||||
|
||||
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
|
||||
local meaningful_words=()
|
||||
for word in $clean_name; do
|
||||
# Skip empty words
|
||||
[ -z "$word" ] && continue
|
||||
|
||||
|
||||
# Keep words that are NOT stop words AND (length >= 3 OR are potential acronyms)
|
||||
if ! echo "$word" | grep -qiE "$stop_words"; then
|
||||
if [ ${#word} -ge 3 ]; then
|
||||
@@ -203,12 +243,12 @@ generate_branch_name() {
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# If we have meaningful words, use first 3-4 of them
|
||||
if [ ${#meaningful_words[@]} -gt 0 ]; then
|
||||
local max_words=3
|
||||
if [ ${#meaningful_words[@]} -eq 4 ]; then max_words=4; fi
|
||||
|
||||
|
||||
local result=""
|
||||
local count=0
|
||||
for word in "${meaningful_words[@]}"; do
|
||||
@@ -238,10 +278,10 @@ fi
|
||||
if [ -z "$BRANCH_NUMBER" ]; then
|
||||
if [ "$HAS_GIT" = true ]; then
|
||||
# Check existing branches on remotes
|
||||
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR")
|
||||
BRANCH_NUMBER=$(check_existing_branches "$SPECS_DIR" "$CATEGORY")
|
||||
else
|
||||
# Fall back to local directory check
|
||||
HIGHEST=$(get_highest_from_specs "$SPECS_DIR")
|
||||
HIGHEST=$(get_highest_from_specs "$SPECS_DIR" "$CATEGORY")
|
||||
BRANCH_NUMBER=$((HIGHEST + 1))
|
||||
fi
|
||||
fi
|
||||
@@ -257,15 +297,15 @@ if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
|
||||
# Calculate how much we need to trim from suffix
|
||||
# Account for: feature number (3) + hyphen (1) = 4 chars
|
||||
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
|
||||
|
||||
|
||||
# Truncate suffix at word boundary if possible
|
||||
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
|
||||
# Remove trailing hyphen if truncation created one
|
||||
TRUNCATED_SUFFIX=$(echo "$TRUNCATED_SUFFIX" | sed 's/-$//')
|
||||
|
||||
|
||||
ORIGINAL_BRANCH_NAME="$BRANCH_NAME"
|
||||
BRANCH_NAME="${FEATURE_NUM}-${TRUNCATED_SUFFIX}"
|
||||
|
||||
|
||||
>&2 echo "[specify] Warning: Branch name exceeded GitHub's 244-byte limit"
|
||||
>&2 echo "[specify] Original: $ORIGINAL_BRANCH_NAME (${#ORIGINAL_BRANCH_NAME} bytes)"
|
||||
>&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)"
|
||||
@@ -277,7 +317,7 @@ else
|
||||
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
|
||||
fi
|
||||
|
||||
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
|
||||
FEATURE_DIR="$SPECS_DIR/${CATEGORY}-${CATEGORY_NAME}/${BRANCH_NAME}"
|
||||
mkdir -p "$FEATURE_DIR"
|
||||
|
||||
TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
|
||||
|
||||
@@ -12,31 +12,21 @@
|
||||
Optional manual branch number (overrides auto-detection).
|
||||
.EXAMPLE
|
||||
.\create-new-feature.ps1 -Description "Add user authentication" -ShortName "user-auth"
|
||||
#>
|
||||
param(
|
||||
[Parameter(Mandatory = $true, Position = 0)]
|
||||
[string]$Description,
|
||||
#
|
||||
$aram(
|
||||
[PErroeterrMandatory = $true, Position = 0)]
|
||||
[stAing]cDescrittion,onPreference = "Stop"
|
||||
|
||||
[string]$ShortName,
|
||||
[int]$Number = 0
|
||||
)
|
||||
# Validate category
|
||||
if ($Category "tgorut be 100, 200, or 300"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Load common functions
|
||||
. "$PSScriptRoot\common.ps1"
|
||||
|
||||
$repoRoot = Get-RepoRoot
|
||||
$hasGit = Test-HasGit
|
||||
$specsDir = Join-Path $repoRoot "specs"
|
||||
if (-not (Test-Path $specsDir)) { New-Item -ItemType Directory -Path $specsDir | Out-Null }
|
||||
|
||||
# Stop words for smart branch name generation
|
||||
$stopWords = @('i','a','an','the','to','for','of','in','on','at','by','with','from',
|
||||
'is','are','was','were','be','been','being','have','has','had',
|
||||
'do','does','did','will','would','should','could','can','may','might',
|
||||
'must','shall','this','that','these','those','my','your','our','their',
|
||||
'want','need','add','get','set')
|
||||
# Load common fu#ct oGet category name
|
||||
'100' { 'Infrastructures' }
|
||||
'200' { 'fullstacks' }
|
||||
'want','need','add','get','set')
|
||||
|
||||
function ConvertTo-BranchName {
|
||||
param([string]$Text)
|
||||
@@ -44,23 +34,23 @@ function ConvertTo-BranchName {
|
||||
}
|
||||
|
||||
function Get-SmartBranchName {
|
||||
param([string]$Desc)
|
||||
$words = ($Desc.ToLower() -replace '[^a-z0-9]', ' ').Split(' ', [StringSplitOptions]::RemoveEmptyEntries)
|
||||
$meaningful = $words | Where-Object { $_ -notin $stopWords -and $_.Length -ge 3 } | Select-Object -First 3
|
||||
if ($meaningful.Count -gt 0) { return ($meaningful -join '-') }
|
||||
return ConvertTo-BranchName $Desc
|
||||
}
|
||||
param(
|
||||
[string]$Des,
|
||||
[string]$Category = ""
|
||||
c)
|
||||
$words = ($D
|
||||
|
||||
if ($Category) {
|
||||
# Check specific category directory
|
||||
$categoryDir = Join-Path $Dir "$Category-$categoryName"
|
||||
if (Test-Path $categoryDir) {
|
||||
if ( if ($num -gt $highest) { $highest = $num }
|
||||
olon T
|
||||
Get-ChildItem -Path $Dir -Directory |r $Catego yForEach-Object {
|
||||
if ($_.Name -match '^(\d+)-') {
|
||||
$num = [int]$Matches[1] if ($num -gt $highest) { $highest = $num }
|
||||
}
|
||||
|
||||
function Get-HighestNumber {
|
||||
param([string]$Dir)
|
||||
$highest = 0
|
||||
if (Test-Path $Dir) {
|
||||
Get-ChildItem -Path $Dir -Directory | ForEach-Object {
|
||||
if ($_.Name -match '^(\d+)-') {
|
||||
$num = [int]$Matches[1]
|
||||
if ($num -gt $highest) { $highest = $num }
|
||||
}
|
||||
}
|
||||
}
|
||||
return $highest
|
||||
}
|
||||
@@ -78,7 +68,8 @@ if ($Number -gt 0) {
|
||||
} else {
|
||||
$highestSpec = Get-HighestNumber $specsDir
|
||||
$highestBranch = 0
|
||||
if ($hasGit) {
|
||||
# Use nXX format where n = category hundreds digit, XX = feature number
|
||||
if ($hasGit) }{1{2 $Category,
|
||||
try {
|
||||
git fetch --all --prune 2>$null | Out-Null
|
||||
$branches = git branch -a 2>$null
|
||||
@@ -122,10 +113,31 @@ if (Test-Path $templateFile) {
|
||||
Copy-Item $templateFile $specFile
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile -Force | Out-Null
|
||||
}r "$Category-$categoyName"
|
||||
rectory -Path $featuDir -Fore | Out-Null
|
||||
|
||||
$templaeFile = Jin-Path $epoRoot ".specif" "templates" "spec-template.md"
|
||||
$specFile= Join"spec.md"
|
||||
if (TestPath $templateile) {
|
||||
Cpy-Item $templateFile $specFile
|
||||
} else {
|
||||
New-Item -ItemType File -Path $specFile -Foll
|
||||
}
|
||||
|
||||
$env:SPECIFY_FEATURE = $branchName
|
||||
|
||||
# Output
|
||||
[PSCustomObject]@{
|
||||
BranchName = $branchName
|
||||
SpecFie = $specFie
|
||||
FeatureNum = $featureNum
|
||||
}
|
||||
|
||||
Write-Host "BRANCH_NAME: $branchName"
|
||||
Write-Host "SPEC_FILE: $specFile"
|
||||
Write-Host "FEATURE_NUM: $featureNum"
|
||||
$env:SPECIFY_FEATURE = $branchName
|
||||
|
||||
# Output
|
||||
[PSCustomObject]@{
|
||||
BranchName = $branchName
|
||||
|
||||
@@ -52,20 +52,29 @@ Given that feature description, do this:
|
||||
git fetch --all --prune
|
||||
```
|
||||
|
||||
b. Find the highest feature number across all sources for the short-name:
|
||||
b. **Determine the category** for this feature (MUST ask user):
|
||||
- Ask: "Which category does this feature belong to?"
|
||||
- Present options:
|
||||
- **100 - Infrastructures**: Deployment, Monitoring, Docker Compose, Network, Security hardening
|
||||
- **200 - Fullstacks**: Backend + Frontend features, Workflow Engine, API development, UI components
|
||||
- **300 - Others**: Documentation, Research, Non-code tasks, Process improvement
|
||||
- Wait for user to respond with category (100, 200, or 300)
|
||||
- Default to 200 if user doesn't specify
|
||||
|
||||
c. Find the highest feature number across all sources for the short-name within the chosen category:
|
||||
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
|
||||
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
|
||||
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
|
||||
- Specs directories: Check for directories matching `specs/<category>-*/[0-9]+-<short-name>`
|
||||
|
||||
c. Determine the next available number:
|
||||
d. Determine the next available number:
|
||||
- Extract all numbers from all three sources
|
||||
- Find the highest number N
|
||||
- Use N+1 for the new branch number
|
||||
|
||||
d. Run the script `../scripts/bash/create-new-feature.sh --json "{{args}}"` with the calculated number and short-name:
|
||||
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||
- Bash example: `.agents/scripts/bash/create-new-feature.sh --json "{{args}}" --number 5 --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `.agents/scripts/powershell/create-new-feature.ps1 -Json -Args '{{args}}' -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||
e. Run the script `../scripts/bash/create-new-feature.sh --json "{{args}}"` with the calculated number, short-name, and category:
|
||||
- Pass `--number N+1`, `--short-name "your-short-name"`, and `--category <100|200|300>` along with the feature description
|
||||
- Bash example: `.agents/scripts/bash/create-new-feature.sh --json "{{args}}" --number 5 --short-name "user-auth" --category 200 "Add user authentication"`
|
||||
- PowerShell example: `.agents/scripts/powershell/create-new-feature.ps1 -Json -Args '{{args}}' -Number 5 -ShortName "user-auth" -Category 200 "Add user authentication"`
|
||||
|
||||
**IMPORTANT**:
|
||||
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||
|
||||
@@ -0,0 +1,93 @@
|
||||
# Specs Folder Reorganization Plan
|
||||
|
||||
This plan reorganizes the specs/ directory into categorized folders (100-Infrastructures, 200-fullstacks, 300-others) with consistent numeric naming conventions, and updates AGENTS.md to document the new structure.
|
||||
|
||||
## Current State
|
||||
- `specs/001-transmittals-circulation/` - Fullstack feature (plan.md, spec.md, tasks.md, test-report.md)
|
||||
- `specs/002-infra-ops/` - Infrastructure work (plan.md, spec.md, quickstart.md, research.md, data-model.md, checklists/, contracts/)
|
||||
- `specs/003-unified-workflow-engine/` - Fullstack core system (plan.md, spec.md, tasks.md, quickstart.md, research.md, data-model.md, checklists/, contracts/)
|
||||
- Core specs folders (00-overview, 01-requirements, etc.) - Remain unchanged
|
||||
|
||||
## Target Structure
|
||||
```
|
||||
specs/
|
||||
├── 00-overview/ (unchanged)
|
||||
├── 01-requirements/ (unchanged)
|
||||
├── 02-architecture/ (unchanged)
|
||||
├── 03-Data-and-Storage/ (unchanged)
|
||||
├── 04-Infrastructure-OPS/ (unchanged)
|
||||
├── 05-Engineering-Guidelines/ (unchanged)
|
||||
├── 06-Decision-Records/ (unchanged)
|
||||
├── 08-Tasks/ (unchanged)
|
||||
├── 88-logs/ (unchanged)
|
||||
├── 99-archives/ (unchanged)
|
||||
├── 100-Infrastructures/ # NEW: Infrastructure-related work
|
||||
│ ├── 102-infra-ops/ # Moved from 002-infra-ops
|
||||
│ └── README.md # NEW: Category guide
|
||||
├── 200-fullstacks/ # NEW: Backend + frontend features
|
||||
│ ├── 201-transmittals-circulation/ # Moved from 001-transmittals-circulation
|
||||
│ ├── 203-unified-workflow-engine/ # Moved from 003-unified-workflow-engine
|
||||
│ └── README.md # NEW: Category guide
|
||||
└── 300-others/ # NEW: Documentation, research, non-code tasks
|
||||
└── README.md # NEW: Category guide
|
||||
```
|
||||
|
||||
## Naming Convention
|
||||
- Prefix: `nXX` where `n` = hundreds digit of category folder
|
||||
- Example: `100-Infrastructures/102-infra-ops` (n=1, so 1xx)
|
||||
- Example: `200-fullstacks/201-transmittals-circulation` (n=2, so 2xx)
|
||||
|
||||
## Steps
|
||||
|
||||
### 1. Create new category folders
|
||||
- Create `specs/100-Infrastructures/`
|
||||
- Create `specs/200-fullstacks/`
|
||||
- Create `specs/300-others/`
|
||||
|
||||
### 2. Move existing folders with new names
|
||||
- Move `specs/001-transmittals-circulation/` → `specs/200-fullstacks/201-transmittals-circulation/`
|
||||
- Move `specs/002-infra-ops/` → `specs/100-Infrastructures/102-infra-ops/`
|
||||
- Move `specs/003-unified-workflow-engine/` → `specs/200-fullstacks/203-unified-workflow-engine/`
|
||||
|
||||
### 3. Create README.md files for each category
|
||||
- `specs/100-Infrastructures/README.md` - Explain infrastructure work scope
|
||||
- `specs/200-fullstacks/README.md` - Explain fullstack feature scope
|
||||
- `specs/300-others/README.md` - Explain documentation/research scope
|
||||
|
||||
### 4. Update AGENTS.md
|
||||
- Add new section: "📁 Specs Folder Organization"
|
||||
- Document the new category structure
|
||||
- Explain naming convention (nXX prefix)
|
||||
- Provide examples of what goes in each category
|
||||
- Add rule: "When creating new feature specs, place in appropriate category folder"
|
||||
|
||||
### 5. Update specs/README.md
|
||||
- Add reference to new category folders
|
||||
- Update directory structure diagram
|
||||
- Note that core specs (00-06, 08, 88, 99) remain unchanged
|
||||
|
||||
### 6. Create workflow (optional - pending user confirmation)
|
||||
- Create `.windsurf/workflows/create-feature-spec.md`
|
||||
- Workflow prompts user for feature type (infra/fullstack/other)
|
||||
- Automatically places spec in correct category with proper naming
|
||||
|
||||
## Verification
|
||||
- Verify all files moved correctly (no data loss)
|
||||
- Verify internal file references still work (check for relative paths)
|
||||
- Verify AGENTS.md documentation is clear
|
||||
- Test that new structure is intuitive for team
|
||||
|
||||
## Files Modified
|
||||
- `specs/100-Infrastructures/` (NEW)
|
||||
- `specs/200-fullstacks/` (NEW)
|
||||
- `specs/300-others/` (NEW)
|
||||
- `specs/100-Infrastructures/README.md` (NEW)
|
||||
- `specs/200-fullstacks/README.md` (NEW)
|
||||
- `specs/300-others/README.md` (NEW)
|
||||
- `AGENTS.md` (UPDATED - add Specs Folder Organization section)
|
||||
- `specs/README.md` (UPDATED - add new categories to directory structure)
|
||||
|
||||
## Files Moved
|
||||
- `specs/001-transmittals-circulation/` → `specs/200-fullstacks/201-transmittals-circulation/`
|
||||
- `specs/002-infra-ops/` → `specs/100-Infrastructures/102-infra-ops/`
|
||||
- `specs/003-unified-workflow-engine/` → `specs/200-fullstacks/203-unified-workflow-engine/`
|
||||
@@ -0,0 +1,37 @@
|
||||
# Deepening
|
||||
|
||||
How to deepen a cluster of shallow modules safely, given its dependencies. Assumes the vocabulary in [LANGUAGE.md](LANGUAGE.md) — **module**, **interface**, **seam**, **adapter**.
|
||||
|
||||
## Dependency categories
|
||||
|
||||
When assessing a candidate for deepening, classify its dependencies. The category determines how the deepened module is tested across its seam.
|
||||
|
||||
### 1. In-process
|
||||
|
||||
Pure computation, in-memory state, no I/O. Always deepenable — merge the modules and test through the new interface directly. No adapter needed.
|
||||
|
||||
### 2. Local-substitutable
|
||||
|
||||
Dependencies that have local test stand-ins (PGLite for Postgres, in-memory filesystem). Deepenable if the stand-in exists. The deepened module is tested with the stand-in running in the test suite. The seam is internal; no port at the module's external interface.
|
||||
|
||||
### 3. Remote but owned (Ports & Adapters)
|
||||
|
||||
Your own services across a network boundary (microservices, internal APIs). Define a **port** (interface) at the seam. The deep module owns the logic; the transport is injected as an **adapter**. Tests use an in-memory adapter. Production uses an HTTP/gRPC/queue adapter.
|
||||
|
||||
Recommendation shape: *"Define a port at the seam, implement an HTTP adapter for production and an in-memory adapter for testing, so the logic sits in one deep module even though it's deployed across a network."*
|
||||
|
||||
### 4. True external (Mock)
|
||||
|
||||
Third-party services (Stripe, Twilio, etc.) you don't control. The deepened module takes the external dependency as an injected port; tests provide a mock adapter.
|
||||
|
||||
## Seam discipline
|
||||
|
||||
- **One adapter means a hypothetical seam. Two adapters means a real one.** Don't introduce a port unless at least two adapters are justified (typically production + test). A single-adapter seam is just indirection.
|
||||
- **Internal seams vs external seams.** A deep module can have internal seams (private to its implementation, used by its own tests) as well as the external seam at its interface. Don't expose internal seams through the interface just because tests use them.
|
||||
|
||||
## Testing strategy: replace, don't layer
|
||||
|
||||
- Old unit tests on shallow modules become waste once tests at the deepened module's interface exist — delete them.
|
||||
- Write new tests at the deepened module's interface. The **interface is the test surface**.
|
||||
- Tests assert on observable outcomes through the interface, not internal state.
|
||||
- Tests should survive internal refactors — they describe behaviour, not implementation. If a test has to change when the implementation changes, it's testing past the interface.
|
||||
@@ -0,0 +1,44 @@
|
||||
# Interface Design
|
||||
|
||||
When the user wants to explore alternative interfaces for a chosen deepening candidate, use this parallel sub-agent pattern. Based on "Design It Twice" (Ousterhout) — your first idea is unlikely to be the best.
|
||||
|
||||
Uses the vocabulary in [LANGUAGE.md](LANGUAGE.md) — **module**, **interface**, **seam**, **adapter**, **leverage**.
|
||||
|
||||
## Process
|
||||
|
||||
### 1. Frame the problem space
|
||||
|
||||
Before spawning sub-agents, write a user-facing explanation of the problem space for the chosen candidate:
|
||||
|
||||
- The constraints any new interface would need to satisfy
|
||||
- The dependencies it would rely on, and which category they fall into (see [DEEPENING.md](DEEPENING.md))
|
||||
- A rough illustrative code sketch to ground the constraints — not a proposal, just a way to make the constraints concrete
|
||||
|
||||
Show this to the user, then immediately proceed to Step 2. The user reads and thinks while the sub-agents work in parallel.
|
||||
|
||||
### 2. Spawn sub-agents
|
||||
|
||||
Spawn 3+ sub-agents in parallel using the Agent tool. Each must produce a **radically different** interface for the deepened module.
|
||||
|
||||
Prompt each sub-agent with a separate technical brief (file paths, coupling details, dependency category from [DEEPENING.md](DEEPENING.md), what sits behind the seam). The brief is independent of the user-facing problem-space explanation in Step 1. Give each agent a different design constraint:
|
||||
|
||||
- Agent 1: "Minimize the interface — aim for 1–3 entry points max. Maximise leverage per entry point."
|
||||
- Agent 2: "Maximise flexibility — support many use cases and extension."
|
||||
- Agent 3: "Optimise for the most common caller — make the default case trivial."
|
||||
- Agent 4 (if applicable): "Design around ports & adapters for cross-seam dependencies."
|
||||
|
||||
Include both [LANGUAGE.md](LANGUAGE.md) vocabulary and CONTEXT.md vocabulary in the brief so each sub-agent names things consistently with the architecture language and the project's domain language.
|
||||
|
||||
Each sub-agent outputs:
|
||||
|
||||
1. Interface (types, methods, params — plus invariants, ordering, error modes)
|
||||
2. Usage example showing how callers use it
|
||||
3. What the implementation hides behind the seam
|
||||
4. Dependency strategy and adapters (see [DEEPENING.md](DEEPENING.md))
|
||||
5. Trade-offs — where leverage is high, where it's thin
|
||||
|
||||
### 3. Present and compare
|
||||
|
||||
Present designs sequentially so the user can absorb each one, then compare them in prose. Contrast by **depth** (leverage at the interface), **locality** (where change concentrates), and **seam placement**.
|
||||
|
||||
After comparing, give your own recommendation: which design you think is strongest and why. If elements from different designs would combine well, propose a hybrid. Be opinionated — the user wants a strong read, not a menu.
|
||||
@@ -0,0 +1,53 @@
|
||||
# Language
|
||||
|
||||
Shared vocabulary for every suggestion this skill makes. Use these terms exactly — don't substitute "component," "service," "API," or "boundary." Consistent language is the whole point.
|
||||
|
||||
## Terms
|
||||
|
||||
**Module**
|
||||
Anything with an interface and an implementation. Deliberately scale-agnostic — applies equally to a function, class, package, or tier-spanning slice.
|
||||
_Avoid_: unit, component, service.
|
||||
|
||||
**Interface**
|
||||
Everything a caller must know to use the module correctly. Includes the type signature, but also invariants, ordering constraints, error modes, required configuration, and performance characteristics.
|
||||
_Avoid_: API, signature (too narrow — those refer only to the type-level surface).
|
||||
|
||||
**Implementation**
|
||||
What's inside a module — its body of code. Distinct from **Adapter**: a thing can be a small adapter with a large implementation (a Postgres repo) or a large adapter with a small implementation (an in-memory fake). Reach for "adapter" when the seam is the topic; "implementation" otherwise.
|
||||
|
||||
**Depth**
|
||||
Leverage at the interface — the amount of behaviour a caller (or test) can exercise per unit of interface they have to learn. A module is **deep** when a large amount of behaviour sits behind a small interface. A module is **shallow** when the interface is nearly as complex as the implementation.
|
||||
|
||||
**Seam** _(from Michael Feathers)_
|
||||
A place where you can alter behaviour without editing in that place. The *location* at which a module's interface lives. Choosing where to put the seam is its own design decision, distinct from what goes behind it.
|
||||
_Avoid_: boundary (overloaded with DDD's bounded context).
|
||||
|
||||
**Adapter**
|
||||
A concrete thing that satisfies an interface at a seam. Describes *role* (what slot it fills), not substance (what's inside).
|
||||
|
||||
**Leverage**
|
||||
What callers get from depth. More capability per unit of interface they have to learn. One implementation pays back across N call sites and M tests.
|
||||
|
||||
**Locality**
|
||||
What maintainers get from depth. Change, bugs, knowledge, and verification concentrate at one place rather than spreading across callers. Fix once, fixed everywhere.
|
||||
|
||||
## Principles
|
||||
|
||||
- **Depth is a property of the interface, not the implementation.** A deep module can be internally composed of small, mockable, swappable parts — they just aren't part of the interface. A module can have **internal seams** (private to its implementation, used by its own tests) as well as the **external seam** at its interface.
|
||||
- **The deletion test.** Imagine deleting the module. If complexity vanishes, the module wasn't hiding anything (it was a pass-through). If complexity reappears across N callers, the module was earning its keep.
|
||||
- **The interface is the test surface.** Callers and tests cross the same seam. If you want to test *past* the interface, the module is probably the wrong shape.
|
||||
- **One adapter means a hypothetical seam. Two adapters means a real one.** Don't introduce a seam unless something actually varies across it.
|
||||
|
||||
## Relationships
|
||||
|
||||
- A **Module** has exactly one **Interface** (the surface it presents to callers and tests).
|
||||
- **Depth** is a property of a **Module**, measured against its **Interface**.
|
||||
- A **Seam** is where a **Module**'s **Interface** lives.
|
||||
- An **Adapter** sits at a **Seam** and satisfies the **Interface**.
|
||||
- **Depth** produces **Leverage** for callers and **Locality** for maintainers.
|
||||
|
||||
## Rejected framings
|
||||
|
||||
- **Depth as ratio of implementation-lines to interface-lines** (Ousterhout): rewards padding the implementation. We use depth-as-leverage instead.
|
||||
- **"Interface" as the TypeScript `interface` keyword or a class's public methods**: too narrow — interface here includes every fact a caller must know.
|
||||
- **"Boundary"**: overloaded with DDD's bounded context. Say **seam** or **interface**.
|
||||
@@ -0,0 +1,71 @@
|
||||
---
|
||||
name: improve-codebase-architecture
|
||||
description: Find deepening opportunities in a codebase, informed by the domain language in CONTEXT.md and the decisions in docs/adr/. Use when the user wants to improve architecture, find refactoring opportunities, consolidate tightly-coupled modules, or make a codebase more testable and AI-navigable.
|
||||
---
|
||||
|
||||
# Improve Codebase Architecture
|
||||
|
||||
Surface architectural friction and propose **deepening opportunities** — refactors that turn shallow modules into deep ones. The aim is testability and AI-navigability.
|
||||
|
||||
## Glossary
|
||||
|
||||
Use these terms exactly in every suggestion. Consistent language is the point — don't drift into "component," "service," "API," or "boundary." Full definitions in [LANGUAGE.md](LANGUAGE.md).
|
||||
|
||||
- **Module** — anything with an interface and an implementation (function, class, package, slice).
|
||||
- **Interface** — everything a caller must know to use the module: types, invariants, error modes, ordering, config. Not just the type signature.
|
||||
- **Implementation** — the code inside.
|
||||
- **Depth** — leverage at the interface: a lot of behaviour behind a small interface. **Deep** = high leverage. **Shallow** = interface nearly as complex as the implementation.
|
||||
- **Seam** — where an interface lives; a place behaviour can be altered without editing in place. (Use this, not "boundary.")
|
||||
- **Adapter** — a concrete thing satisfying an interface at a seam.
|
||||
- **Leverage** — what callers get from depth.
|
||||
- **Locality** — what maintainers get from depth: change, bugs, knowledge concentrated in one place.
|
||||
|
||||
Key principles (see [LANGUAGE.md](LANGUAGE.md) for the full list):
|
||||
|
||||
- **Deletion test**: imagine deleting the module. If complexity vanishes, it was a pass-through. If complexity reappears across N callers, it was earning its keep.
|
||||
- **The interface is the test surface.**
|
||||
- **One adapter = hypothetical seam. Two adapters = real seam.**
|
||||
|
||||
This skill is _informed_ by the project's domain model. The domain language gives names to good seams; ADRs record decisions the skill should not re-litigate.
|
||||
|
||||
## Process
|
||||
|
||||
### 1. Explore
|
||||
|
||||
Read the project's domain glossary and any ADRs in the area you're touching first.
|
||||
|
||||
Then use the Agent tool with `subagent_type=Explore` to walk the codebase. Don't follow rigid heuristics — explore organically and note where you experience friction:
|
||||
|
||||
- Where does understanding one concept require bouncing between many small modules?
|
||||
- Where are modules **shallow** — interface nearly as complex as the implementation?
|
||||
- Where have pure functions been extracted just for testability, but the real bugs hide in how they're called (no **locality**)?
|
||||
- Where do tightly-coupled modules leak across their seams?
|
||||
- Which parts of the codebase are untested, or hard to test through their current interface?
|
||||
|
||||
Apply the **deletion test** to anything you suspect is shallow: would deleting it concentrate complexity, or just move it? A "yes, concentrates" is the signal you want.
|
||||
|
||||
### 2. Present candidates
|
||||
|
||||
Present a numbered list of deepening opportunities. For each candidate:
|
||||
|
||||
- **Files** — which files/modules are involved
|
||||
- **Problem** — why the current architecture is causing friction
|
||||
- **Solution** — plain English description of what would change
|
||||
- **Benefits** — explained in terms of locality and leverage, and also in how tests would improve
|
||||
|
||||
**Use CONTEXT.md vocabulary for the domain, and [LANGUAGE.md](LANGUAGE.md) vocabulary for the architecture.** If `CONTEXT.md` defines "Order," talk about "the Order intake module" — not "the FooBarHandler," and not "the Order service."
|
||||
|
||||
**ADR conflicts**: if a candidate contradicts an existing ADR, only surface it when the friction is real enough to warrant revisiting the ADR. Mark it clearly (e.g. _"contradicts ADR-0007 — but worth reopening because…"_). Don't list every theoretical refactor an ADR forbids.
|
||||
|
||||
Do NOT propose interfaces yet. Ask the user: "Which of these would you like to explore?"
|
||||
|
||||
### 3. Grilling loop
|
||||
|
||||
Once the user picks a candidate, drop into a grilling conversation. Walk the design tree with them — constraints, dependencies, the shape of the deepened module, what sits behind the seam, what tests survive.
|
||||
|
||||
Side effects happen inline as decisions crystallize:
|
||||
|
||||
- **Naming a deepened module after a concept not in `CONTEXT.md`?** Add the term to `CONTEXT.md` — same discipline as `/grill-with-docs` (see [CONTEXT-FORMAT.md](../grill-with-docs/CONTEXT-FORMAT.md)). Create the file lazily if it doesn't exist.
|
||||
- **Sharpening a fuzzy term during the conversation?** Update `CONTEXT.md` right there.
|
||||
- **User rejects the candidate with a load-bearing reason?** Offer an ADR, framed as: _"Want me to record this as an ADR so future architecture reviews don't re-suggest it?"_ Only offer when the reason would actually be needed by a future explorer to avoid re-suggesting the same thing — skip ephemeral reasons ("not worth it right now") and self-evident ones. See [ADR-FORMAT.md](../grill-with-docs/ADR-FORMAT.md).
|
||||
- **Want to explore alternative interfaces for the deepened module?** See [INTERFACE-DESIGN.md](INTERFACE-DESIGN.md).
|
||||
@@ -0,0 +1,206 @@
|
||||
---
|
||||
name: speckit-analyze
|
||||
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Consistency Analyst**. Your role is to identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. You act with strict adherence to the project constitution.
|
||||
|
||||
## Task
|
||||
|
||||
### Goal
|
||||
|
||||
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/speckit-tasks` has successfully produced a complete `tasks.md`.
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
||||
|
||||
**Constitution Authority**: The project constitution (`AGENTS.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit-analyze`.
|
||||
|
||||
### Steps
|
||||
|
||||
### 1. Initialize Analysis Context
|
||||
|
||||
Run `../scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
|
||||
|
||||
- SPEC = FEATURE_DIR/spec.md
|
||||
- PLAN = FEATURE_DIR/plan.md
|
||||
- TASKS = FEATURE_DIR/tasks.md
|
||||
|
||||
Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
|
||||
For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
### 2. Load Artifacts (Progressive Disclosure)
|
||||
|
||||
Load only the minimal necessary context from each artifact:
|
||||
|
||||
**From spec.md:**
|
||||
|
||||
- Overview/Context
|
||||
- Functional Requirements
|
||||
- Non-Functional Requirements
|
||||
- User Stories
|
||||
- Edge Cases (if present)
|
||||
|
||||
**From plan.md:**
|
||||
|
||||
- Architecture/stack choices
|
||||
- Data Model references
|
||||
- Phases
|
||||
- Technical constraints
|
||||
|
||||
**From tasks.md:**
|
||||
|
||||
- Task IDs
|
||||
- Descriptions
|
||||
- Phase grouping
|
||||
- Parallel markers [P]
|
||||
- Referenced file paths
|
||||
|
||||
**From constitution:**
|
||||
|
||||
- Load `AGENTS.md` for principle validation
|
||||
|
||||
### 3. Build Semantic Models
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
|
||||
- **User story/action inventory**: Discrete user actions with acceptance criteria
|
||||
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
|
||||
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
|
||||
|
||||
### 4. Detection Passes (Token-Efficient Analysis)
|
||||
|
||||
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
|
||||
|
||||
#### A. Duplication Detection
|
||||
|
||||
- Identify near-duplicate requirements
|
||||
- Mark lower-quality phrasing for consolidation
|
||||
|
||||
#### B. Ambiguity Detection
|
||||
|
||||
- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria
|
||||
- Flag unresolved placeholders (TODO, TKTK, ???, `<placeholder>`, etc.)
|
||||
|
||||
#### C. Underspecification
|
||||
|
||||
- Requirements with verbs but missing object or measurable outcome
|
||||
- User stories missing acceptance criteria alignment
|
||||
- Tasks referencing files or components not defined in spec/plan
|
||||
|
||||
#### D. Constitution Alignment
|
||||
|
||||
- Any requirement or plan element conflicting with a MUST principle
|
||||
- Missing mandated sections or quality gates from constitution
|
||||
|
||||
#### E. Coverage Gaps
|
||||
|
||||
- Requirements with zero associated tasks
|
||||
- Tasks with no mapped requirement/story
|
||||
- Non-functional requirements not reflected in tasks (e.g., performance, security)
|
||||
|
||||
#### F. Inconsistency
|
||||
|
||||
- Terminology drift (same concept named differently across files)
|
||||
- Data entities referenced in plan but absent in spec (or vice versa)
|
||||
- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note)
|
||||
- Conflicting requirements (e.g., one requires Next.js while other specifies Vue)
|
||||
|
||||
### 5. Severity Assignment
|
||||
|
||||
Use this heuristic to prioritize findings:
|
||||
|
||||
- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality
|
||||
- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion
|
||||
- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case
|
||||
- **LOW**: Style/wording improvements, minor redundancy not affecting execution order
|
||||
|
||||
### 6. Produce Compact Analysis Report
|
||||
|
||||
Output a Markdown report (no file writes) with the following structure:
|
||||
|
||||
## Specification Analysis Report
|
||||
|
||||
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
||||
| --- | ----------- | -------- | ---------------- | ---------------------------- | ------------------------------------ |
|
||||
| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
|
||||
|
||||
(Add one row per finding; generate stable IDs prefixed by category initial.)
|
||||
|
||||
**Coverage Summary Table:**
|
||||
|
||||
| Requirement Key | Has Task? | Task IDs | Notes |
|
||||
| --------------- | --------- | -------- | ----- |
|
||||
|
||||
**Constitution Alignment Issues:** (if any)
|
||||
|
||||
**Unmapped Tasks:** (if any)
|
||||
|
||||
**Metrics:**
|
||||
|
||||
- Total Requirements
|
||||
- Total Tasks
|
||||
- Coverage % (requirements with >=1 task)
|
||||
- Ambiguity Count
|
||||
- Duplication Count
|
||||
- Critical Issues Count
|
||||
|
||||
### 7. Provide Next Actions
|
||||
|
||||
At end of report, output a concise Next Actions block:
|
||||
|
||||
- If CRITICAL issues exist: Recommend resolving before `/speckit-implement`
|
||||
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
|
||||
- Provide explicit command suggestions: e.g., "Run /speckit-specify with refinement", "Run /speckit-plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
|
||||
|
||||
### 8. Offer Remediation
|
||||
|
||||
Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
|
||||
|
||||
## Operating Principles
|
||||
|
||||
### Context Efficiency
|
||||
|
||||
- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation
|
||||
- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis
|
||||
- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow
|
||||
- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts
|
||||
|
||||
### Analysis Guidelines
|
||||
|
||||
- **NEVER modify files** (this is read-only analysis)
|
||||
- **NEVER hallucinate missing sections** (if absent, report them accurately)
|
||||
- **Prioritize constitution violations** (these are always CRITICAL)
|
||||
- **Use examples over exhaustive rules** (cite specific instances, not generic patterns)
|
||||
- **Report zero issues gracefully** (emit success report with coverage statistics)
|
||||
|
||||
## Context
|
||||
|
||||
{{args}}
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,171 @@
|
||||
---
|
||||
name: speckit-checker
|
||||
description: Run static analysis tools and aggregate results.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Static Analyzer**. Your role is to run all applicable static analysis tools and provide a unified report of issues.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Auto-detect available tools, run them, and aggregate results into a prioritized report.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Detect Project Type and Tools**:
|
||||
|
||||
```bash
|
||||
# Check for config files
|
||||
ls -la | grep -E "(package.json|pyproject.toml|go.mod|Cargo.toml|pom.xml)"
|
||||
|
||||
# Check for linter configs
|
||||
ls -la | grep -E "(eslint|prettier|pylint|golangci|rustfmt)"
|
||||
```
|
||||
|
||||
| Config | Tools to Run |
|
||||
| ---------------- | ----------------------------- |
|
||||
| `package.json` | ESLint, TypeScript, npm audit |
|
||||
| `pyproject.toml` | Pylint/Ruff, mypy, bandit |
|
||||
| `go.mod` | golangci-lint, go vet |
|
||||
| `Cargo.toml` | clippy, cargo audit |
|
||||
| `pom.xml` | SpotBugs, PMD |
|
||||
|
||||
2. **Run Linting**:
|
||||
|
||||
| Stack | Command |
|
||||
| ------- | ---------------------------------------------- | --- | ------------------------------------- |
|
||||
| Node/TS | `npx eslint . --format json 2>/dev/null` |
|
||||
| Python | `ruff check . --output-format json 2>/dev/null | | pylint --output-format=json \*_/_.py` |
|
||||
| Go | `golangci-lint run --out-format json` |
|
||||
| Rust | `cargo clippy --message-format=json` |
|
||||
|
||||
3. **Run Type Checking**:
|
||||
|
||||
| Stack | Command |
|
||||
| ---------- | ------------------------------------------ |
|
||||
| TypeScript | `npx tsc --noEmit 2>&1` |
|
||||
| Python | `mypy . --no-error-summary 2>&1` |
|
||||
| Go | `go build ./... 2>&1` (types are built-in) |
|
||||
|
||||
4. **Run Security Scanning**:
|
||||
|
||||
| Stack | Command |
|
||||
| ------ | -------------------------------- | --- | -------------------- |
|
||||
| Node | `npm audit --json` |
|
||||
| Python | `bandit -r . -f json 2>/dev/null | | safety check --json` |
|
||||
| Go | `govulncheck ./... 2>&1` |
|
||||
| Rust | `cargo audit --json` |
|
||||
|
||||
5. **Aggregate and Prioritize**:
|
||||
|
||||
| Category | Priority |
|
||||
| ------------------------ | -------- |
|
||||
| Security (Critical/High) | 🔴 P1 |
|
||||
| Type Errors | 🟠 P2 |
|
||||
| Security (Medium/Low) | 🟡 P3 |
|
||||
| Lint Errors | 🟡 P3 |
|
||||
| Lint Warnings | 🟢 P4 |
|
||||
| Style Issues | ⚪ P5 |
|
||||
|
||||
6. **Generate Report**:
|
||||
|
||||
````markdown
|
||||
# Static Analysis Report
|
||||
|
||||
**Date**: [timestamp]
|
||||
**Project**: [name from package.json/pyproject.toml]
|
||||
**Status**: CLEAN | ISSUES FOUND
|
||||
|
||||
## Tools Run
|
||||
|
||||
| Tool | Status | Issues |
|
||||
| ---------- | ------ | ----------------- |
|
||||
| ESLint | ✅ | 12 |
|
||||
| TypeScript | ✅ | 3 |
|
||||
| npm audit | ⚠️ | 2 vulnerabilities |
|
||||
|
||||
## Summary by Priority
|
||||
|
||||
| Priority | Count |
|
||||
| -------------- | ----- |
|
||||
| 🔴 P1 Critical | X |
|
||||
| 🟠 P2 High | X |
|
||||
| 🟡 P3 Medium | X |
|
||||
| 🟢 P4 Low | X |
|
||||
|
||||
## Issues
|
||||
|
||||
### 🔴 P1: Security Vulnerabilities
|
||||
|
||||
| Package | Severity | Issue | Fix |
|
||||
| ------- | -------- | ------------------- | ------------------ |
|
||||
| lodash | HIGH | Prototype Pollution | Upgrade to 4.17.21 |
|
||||
|
||||
### 🟠 P2: Type Errors
|
||||
|
||||
| File | Line | Error |
|
||||
| ---------- | ---- | ------------------------------------------------ |
|
||||
| src/api.ts | 45 | Type 'string' is not assignable to type 'number' |
|
||||
|
||||
### 🟡 P3: Lint Issues
|
||||
|
||||
| File | Line | Rule | Message |
|
||||
| ------------ | ---- | -------------- | ------------------------------- |
|
||||
| src/utils.ts | 12 | no-unused-vars | 'foo' is defined but never used |
|
||||
|
||||
## Quick Fixes
|
||||
|
||||
```bash
|
||||
# Fix security issues
|
||||
npm audit fix
|
||||
|
||||
# Auto-fix lint issues
|
||||
npx eslint . --fix
|
||||
```
|
||||
````
|
||||
|
||||
## Recommendations
|
||||
1. **Immediate**: Fix P1 security issues
|
||||
2. **Before merge**: Fix P2 type errors
|
||||
3. **Tech debt**: Address P3/P4 lint issues
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
7. **Output**:
|
||||
- Display report
|
||||
- Exit with non-zero if P1 or P2 issues exist
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Run Everything**: Don't skip tools, aggregate all results
|
||||
- **Be Fast**: Run tools in parallel when possible
|
||||
- **Be Actionable**: Every issue should have a clear fix path
|
||||
- **Don't Duplicate**: Dedupe issues found by multiple tools
|
||||
- **Respect Configs**: Honor project's existing linter configs
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,314 @@
|
||||
---
|
||||
name: speckit-checklist
|
||||
description: Generate a custom checklist for the current feature based on user requirements.
|
||||
version: 1.8.9
|
||||
---
|
||||
|
||||
## Checklist Purpose: "Unit Tests for English"
|
||||
|
||||
**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain.
|
||||
|
||||
**NOT for verification/testing**:
|
||||
|
||||
- ❌ NOT "Verify the button clicks correctly"
|
||||
- ❌ NOT "Test error handling works"
|
||||
- ❌ NOT "Confirm the API returns 200"
|
||||
- ❌ NOT checking if code/implementation matches the spec
|
||||
|
||||
**FOR requirements quality validation**:
|
||||
|
||||
- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness)
|
||||
- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity)
|
||||
- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency)
|
||||
- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage)
|
||||
- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases)
|
||||
|
||||
**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works.
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Quality Gatekeeper**. Your role is to validate the quality of requirements by generating "Unit Tests for English"—checklists that ensure specifications are complete, clear, consistent, and measurable. You don't test the code; you test the documentation that defines it.
|
||||
|
||||
## Task
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Setup**: Run `../scripts/bash/check-prerequisites.sh --json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list.
|
||||
- All file paths must be absolute.
|
||||
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST:
|
||||
- Be generated from the user's phrasing + extracted signals from spec/plan/tasks
|
||||
- Only ask about information that materially changes checklist content
|
||||
- Be skipped individually if already unambiguous in `$ARGUMENTS`
|
||||
- Prefer precision over breadth
|
||||
|
||||
Generation algorithm:
|
||||
1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts").
|
||||
2. Cluster signals into candidate focus areas (max 4) ranked by relevance.
|
||||
3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit.
|
||||
4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria.
|
||||
5. Formulate questions chosen from these archetypes:
|
||||
- Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?")
|
||||
- Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?")
|
||||
- Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?")
|
||||
- Audience framing (e.g., "Will this be used by the author only or peers during PR review?")
|
||||
- Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?")
|
||||
- Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?")
|
||||
|
||||
Question formatting rules:
|
||||
- If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters
|
||||
- Limit to A–E options maximum; omit table if a free-form answer is clearer
|
||||
- Never ask the user to restate what they already said
|
||||
- Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope."
|
||||
|
||||
Defaults when interaction impossible:
|
||||
- Depth: Standard
|
||||
- Audience: Reviewer (PR) if code-related; Author otherwise
|
||||
- Focus: Top 2 relevance clusters
|
||||
|
||||
Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more.
|
||||
|
||||
3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers:
|
||||
- Derive checklist theme (e.g., security, review, deploy, ux)
|
||||
- Consolidate explicit must-have items mentioned by user
|
||||
- Map focus selections to category scaffolding
|
||||
- Infer any missing context from spec/plan/tasks (do NOT hallucinate)
|
||||
|
||||
4. **Load feature context**: Read from FEATURE_DIR:
|
||||
- spec.md: Feature requirements and scope
|
||||
- plan.md (if exists): Technical details, dependencies
|
||||
- tasks.md (if exists): Implementation tasks
|
||||
|
||||
**Context Loading Strategy**:
|
||||
- Load only necessary portions relevant to active focus areas (avoid full-file dumping)
|
||||
- Prefer summarizing long sections into concise scenario/requirement bullets
|
||||
- Use progressive disclosure: add follow-on retrieval only if gaps detected
|
||||
- If source docs are large, generate interim summary items instead of embedding raw text
|
||||
|
||||
5. **Generate checklist** - Create "Unit Tests for Requirements":
|
||||
- Create `FEATURE_DIR/checklists/` directory if it doesn't exist
|
||||
- Generate unique checklist filename:
|
||||
- Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
|
||||
- Format: `[domain].md`
|
||||
- If file exists, append to existing file
|
||||
- Number items sequentially starting from CHK001
|
||||
- Each `/speckit-checklist` run creates a NEW file (never overwrites existing checklists)
|
||||
|
||||
**CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
|
||||
Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
|
||||
- **Completeness**: Are all necessary requirements present?
|
||||
- **Clarity**: Are requirements unambiguous and specific?
|
||||
- **Consistency**: Do requirements align with each other?
|
||||
- **Measurability**: Can requirements be objectively verified?
|
||||
- **Coverage**: Are all scenarios/edge cases addressed?
|
||||
|
||||
**Category Structure** - Group items by requirement quality dimensions:
|
||||
- **Requirement Completeness** (Are all necessary requirements documented?)
|
||||
- **Requirement Clarity** (Are requirements specific and unambiguous?)
|
||||
- **Requirement Consistency** (Do requirements align without conflicts?)
|
||||
- **Acceptance Criteria Quality** (Are success criteria measurable?)
|
||||
- **Scenario Coverage** (Are all flows/cases addressed?)
|
||||
- **Edge Case Coverage** (Are boundary conditions defined?)
|
||||
- **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?)
|
||||
- **Dependencies & Assumptions** (Are they documented and validated?)
|
||||
- **Ambiguities & Conflicts** (What needs clarification?)
|
||||
|
||||
**HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**:
|
||||
|
||||
❌ **WRONG** (Testing implementation):
|
||||
- "Verify landing page displays 3 episode cards"
|
||||
- "Test hover states work on desktop"
|
||||
- "Confirm logo click navigates home"
|
||||
|
||||
✅ **CORRECT** (Testing requirements quality):
|
||||
- "Are the exact number and layout of featured episodes specified?" [Completeness]
|
||||
- "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity]
|
||||
- "Are hover state requirements consistent across all interactive elements?" [Consistency]
|
||||
- "Are keyboard navigation requirements defined for all interactive UI?" [Coverage]
|
||||
- "Is the fallback behavior specified when logo image fails to load?" [Edge Cases]
|
||||
- "Are loading states defined for asynchronous episode data?" [Completeness]
|
||||
- "Does the spec define visual hierarchy for competing UI elements?" [Clarity]
|
||||
|
||||
**ITEM STRUCTURE**:
|
||||
Each item should follow this pattern:
|
||||
- Question format asking about requirement quality
|
||||
- Focus on what's WRITTEN (or not written) in the spec/plan
|
||||
- Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.]
|
||||
- Reference spec section `[Spec §X.Y]` when checking existing requirements
|
||||
- Use `[Gap]` marker when checking for missing requirements
|
||||
|
||||
**EXAMPLES BY QUALITY DIMENSION**:
|
||||
|
||||
Completeness:
|
||||
- "Are error handling requirements defined for all API failure modes? [Gap]"
|
||||
- "Are accessibility requirements specified for all interactive elements? [Completeness]"
|
||||
- "Are mobile breakpoint requirements defined for responsive layouts? [Gap]"
|
||||
|
||||
Clarity:
|
||||
- "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]"
|
||||
- "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]"
|
||||
- "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]"
|
||||
|
||||
Consistency:
|
||||
- "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]"
|
||||
- "Are card component requirements consistent between landing and detail pages? [Consistency]"
|
||||
|
||||
Coverage:
|
||||
- "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]"
|
||||
- "Are concurrent user interaction scenarios addressed? [Coverage, Gap]"
|
||||
- "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]"
|
||||
|
||||
Measurability:
|
||||
- "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]"
|
||||
- "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]"
|
||||
|
||||
**Scenario Classification & Coverage** (Requirements Quality Focus):
|
||||
- Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios
|
||||
- For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?"
|
||||
- If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]"
|
||||
- Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]"
|
||||
|
||||
**Traceability Requirements**:
|
||||
- MINIMUM: ≥80% of items MUST include at least one traceability reference
|
||||
- Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]`
|
||||
- If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]"
|
||||
|
||||
**Surface & Resolve Issues** (Requirements Quality Problems):
|
||||
- Ask questions about the requirements themselves:
|
||||
- Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]"
|
||||
- Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]"
|
||||
- Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]"
|
||||
- Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]"
|
||||
- Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]"
|
||||
|
||||
**Content Consolidation**:
|
||||
- Soft cap: If raw candidate items > 40, prioritize by risk/impact
|
||||
- Merge near-duplicates checking the same requirement aspect
|
||||
- If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]"
|
||||
|
||||
**🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test:
|
||||
- ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior
|
||||
- ❌ References to code execution, user actions, system behavior
|
||||
- ❌ "Displays correctly", "works properly", "functions as expected"
|
||||
- ❌ "Click", "navigate", "render", "load", "execute"
|
||||
- ❌ Test cases, test plans, QA procedures
|
||||
- ❌ Implementation details (frameworks, APIs, algorithms)
|
||||
|
||||
**✅ REQUIRED PATTERNS** - These test requirements quality:
|
||||
- ✅ "Are [requirement type] defined/specified/documented for [scenario]?"
|
||||
- ✅ "Is [vague term] quantified/clarified with specific criteria?"
|
||||
- ✅ "Are requirements consistent between [section A] and [section B]?"
|
||||
- ✅ "Can [requirement] be objectively measured/verified?"
|
||||
- ✅ "Are [edge cases/scenarios] addressed in requirements?"
|
||||
- ✅ "Does the spec define [missing aspect]?"
|
||||
|
||||
b. **Structure Reference**: Generate the checklist following the canonical template in `templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### <requirement item>` lines with globally incrementing IDs starting at CHK001.
|
||||
|
||||
6. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
|
||||
- Focus areas selected
|
||||
- Depth level
|
||||
- Actor/timing
|
||||
- Any explicit user-specified must-have items incorporated
|
||||
|
||||
**Important**: Each `/speckit-checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
|
||||
|
||||
- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
|
||||
- Simple, memorable filenames that indicate checklist purpose
|
||||
- Easy identification and navigation in the `checklists/` folder
|
||||
|
||||
To avoid clutter, use descriptive types and clean up obsolete checklists when done.
|
||||
|
||||
## Example Checklist Types & Sample Items
|
||||
|
||||
**UX Requirements Quality:** `ux.md`
|
||||
|
||||
Sample items (testing the requirements, NOT the implementation):
|
||||
|
||||
- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]"
|
||||
- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]"
|
||||
- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]"
|
||||
- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]"
|
||||
- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]"
|
||||
- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]"
|
||||
|
||||
**API Requirements Quality:** `api.md`
|
||||
|
||||
Sample items:
|
||||
|
||||
- "Are error response formats specified for all failure scenarios? [Completeness]"
|
||||
- "Are rate limiting requirements quantified with specific thresholds? [Clarity]"
|
||||
- "Are authentication requirements consistent across all endpoints? [Consistency]"
|
||||
- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]"
|
||||
- "Is versioning strategy documented in requirements? [Gap]"
|
||||
|
||||
**Performance Requirements Quality:** `performance.md`
|
||||
|
||||
Sample items:
|
||||
|
||||
- "Are performance requirements quantified with specific metrics? [Clarity]"
|
||||
- "Are performance targets defined for all critical user journeys? [Coverage]"
|
||||
- "Are performance requirements under different load conditions specified? [Completeness]"
|
||||
- "Can performance requirements be objectively measured? [Measurability]"
|
||||
- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]"
|
||||
|
||||
**Security Requirements Quality:** `security.md`
|
||||
|
||||
Sample items:
|
||||
|
||||
- "Are authentication requirements specified for all protected resources? [Coverage]"
|
||||
- "Are data protection requirements defined for sensitive information? [Completeness]"
|
||||
- "Is the threat model documented and requirements aligned to it? [Traceability]"
|
||||
- "Are security requirements consistent with compliance obligations? [Consistency]"
|
||||
- "Are security failure/breach response requirements defined? [Gap, Exception Flow]"
|
||||
|
||||
## Anti-Examples: What NOT To Do
|
||||
|
||||
**❌ WRONG - These test implementation, not requirements:**
|
||||
|
||||
```markdown
|
||||
- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001]
|
||||
- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003]
|
||||
- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010]
|
||||
- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005]
|
||||
```
|
||||
|
||||
**✅ CORRECT - These test requirements quality:**
|
||||
|
||||
```markdown
|
||||
- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001]
|
||||
- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003]
|
||||
- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010]
|
||||
- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005]
|
||||
- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap]
|
||||
- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001]
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
|
||||
- Wrong: Tests if the system works correctly
|
||||
- Correct: Tests if the requirements are written correctly
|
||||
- Wrong: Verification of behavior
|
||||
- Correct: Validation of requirement quality
|
||||
- Wrong: "Does it do X?"
|
||||
- Correct: "Is X clearly specified?"
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,40 @@
|
||||
# [CHECKLIST TYPE] Checklist: [FEATURE NAME]
|
||||
|
||||
**Purpose**: [Brief description of what this checklist covers]
|
||||
**Created**: [DATE]
|
||||
**Feature**: [Link to spec.md or relevant documentation]
|
||||
|
||||
**Note**: This checklist is generated by the `/speckit-checklist` command based on feature context and requirements.
|
||||
|
||||
<!--
|
||||
============================================================================
|
||||
IMPORTANT: The checklist items below are SAMPLE ITEMS for illustration only.
|
||||
|
||||
The /speckit-checklist command MUST replace these with actual items based on:
|
||||
- User's specific checklist request
|
||||
- Feature requirements from spec.md
|
||||
- Technical context from plan.md
|
||||
- Implementation details from tasks.md
|
||||
|
||||
DO NOT keep these sample items in the generated checklist file.
|
||||
============================================================================
|
||||
-->
|
||||
|
||||
## [Category 1]
|
||||
|
||||
- [ ] CHK001 First checklist item with clear action
|
||||
- [ ] CHK002 Second checklist item
|
||||
- [ ] CHK003 Third checklist item
|
||||
|
||||
## [Category 2]
|
||||
|
||||
- [ ] CHK004 Another category item
|
||||
- [ ] CHK005 Item with specific criteria
|
||||
- [ ] CHK006 Final item in this category
|
||||
|
||||
## Notes
|
||||
|
||||
- Check items off as completed: `[x]`
|
||||
- Add comments or findings inline
|
||||
- Link to relevant resources or documentation
|
||||
- Items are numbered sequentially for easy reference
|
||||
@@ -0,0 +1,203 @@
|
||||
---
|
||||
name: speckit-clarify
|
||||
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-specify
|
||||
handoffs:
|
||||
- label: Build Technical Plan
|
||||
agent: speckit-plan
|
||||
prompt: Create a plan for the spec. I am building with...
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Ambiguity Buster**. Your role is to interrogate specifications for logical gaps, missing constraints, or vague requirements. You resolve these via structured questioning to minimize rework risk.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
|
||||
|
||||
Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit-plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
|
||||
|
||||
Execution steps:
|
||||
|
||||
1. Run `../scripts/bash/check-prerequisites.sh --json --paths-only` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
|
||||
- `FEATURE_DIR`
|
||||
- `FEATURE_SPEC`
|
||||
- (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
|
||||
- If JSON parsing fails, abort and instruct user to re-run `/speckit-specify` or verify feature branch environment.
|
||||
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
|
||||
|
||||
Functional Scope & Behavior:
|
||||
- Core user goals & success criteria
|
||||
- Explicit out-of-scope declarations
|
||||
- User roles / personas differentiation
|
||||
|
||||
Domain & Data Model:
|
||||
- Entities, attributes, relationships
|
||||
- Identity & uniqueness rules
|
||||
- Lifecycle/state transitions
|
||||
- Data volume / scale assumptions
|
||||
|
||||
Interaction & UX Flow:
|
||||
- Critical user journeys / sequences
|
||||
- Error/empty/loading states
|
||||
- Accessibility or localization notes
|
||||
|
||||
Non-Functional Quality Attributes:
|
||||
- Performance (latency, throughput targets)
|
||||
- Scalability (horizontal/vertical, limits)
|
||||
- Reliability & availability (uptime, recovery expectations)
|
||||
- Observability (logging, metrics, tracing signals)
|
||||
- Security & privacy (authN/Z, data protection, threat assumptions)
|
||||
- Compliance / regulatory constraints (if any)
|
||||
|
||||
Integration & External Dependencies:
|
||||
- External services/APIs and failure modes
|
||||
- Data import/export formats
|
||||
- Protocol/versioning assumptions
|
||||
|
||||
Edge Cases & Failure Handling:
|
||||
- Negative scenarios
|
||||
- Rate limiting / throttling
|
||||
- Conflict resolution (e.g., concurrent edits)
|
||||
|
||||
Constraints & Tradeoffs:
|
||||
- Technical constraints (language, storage, hosting)
|
||||
- Explicit tradeoffs or rejected alternatives
|
||||
|
||||
Terminology & Consistency:
|
||||
- Canonical glossary terms
|
||||
- Avoided synonyms / deprecated terms
|
||||
|
||||
Completion Signals:
|
||||
- Acceptance criteria testability
|
||||
- Measurable Definition of Done style indicators
|
||||
|
||||
Misc / Placeholders:
|
||||
- TODO markers / unresolved decisions
|
||||
- Ambiguous adjectives ("robust", "intuitive") lacking quantification
|
||||
|
||||
For each category with Partial or Missing status, add a candidate question opportunity unless:
|
||||
- Clarification would not materially change implementation or validation strategy
|
||||
- Information is better deferred to planning phase (note internally)
|
||||
|
||||
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
|
||||
- Maximum of 10 total questions across the whole session.
|
||||
- Each question must be answerable with EITHER:
|
||||
- A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
|
||||
- A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
|
||||
- Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
|
||||
- Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
|
||||
- Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
|
||||
- Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
|
||||
- If more than 5 categories remain unresolved, select the top 5 by (Impact \* Uncertainty) heuristic.
|
||||
|
||||
4. Sequential questioning loop (interactive):
|
||||
- Present EXACTLY ONE question at a time.
|
||||
- For multiple‑choice questions:
|
||||
- **Analyze all options** and determine the **most suitable option** based on:
|
||||
- Best practices for the project type
|
||||
- Common patterns in similar implementations
|
||||
- Risk reduction (security, performance, maintainability)
|
||||
- Alignment with any explicit project goals or constraints visible in the spec
|
||||
- Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice).
|
||||
- Format as: `**Recommended:** Option [X] - <reasoning>`
|
||||
- Then render all options as a Markdown table:
|
||||
|
||||
| Option | Description |
|
||||
| ------ | --------------------------------------------------------------------------------------------------- |
|
||||
| A | <Option A description> |
|
||||
| B | <Option B description> |
|
||||
| C | <Option C description> (add D/E as needed up to 5) |
|
||||
| Short | Provide a different short answer (<=5 words) (Include only if free-form alternative is appropriate) |
|
||||
- After the table, add: `You can reply with the option letter (e.g., "A"), accept the recommendation by saying "yes" or "recommended", or provide your own short answer.`
|
||||
|
||||
- For short‑answer style (no meaningful discrete options):
|
||||
- Provide your **suggested answer** based on best practices and context.
|
||||
- Format as: `**Suggested:** <your proposed answer> - <brief reasoning>`
|
||||
- Then output: `Format: Short answer (<=5 words). You can accept the suggestion by saying "yes" or "suggested", or provide your own answer.`
|
||||
- After the user answers:
|
||||
- If the user replies with "yes", "recommended", or "suggested", use your previously stated recommendation/suggestion as the answer.
|
||||
- Otherwise, validate the answer maps to one option or fits the <=5 word constraint.
|
||||
- If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
|
||||
- Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
|
||||
- Stop asking further questions when:
|
||||
- All critical ambiguities resolved early (remaining queued items become unnecessary), OR
|
||||
- User signals completion ("done", "good", "no more"), OR
|
||||
- You reach 5 asked questions.
|
||||
- Never reveal future queued questions in advance.
|
||||
- If no valid questions exist at start, immediately report no critical ambiguities.
|
||||
|
||||
5. Integration after EACH accepted answer (incremental update approach):
|
||||
- Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
|
||||
- For the first integrated answer in this session:
|
||||
- Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
|
||||
- Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
|
||||
- Append a bullet line immediately after acceptance: `- Q: <question> → A: <final answer>`.
|
||||
- Then immediately apply the clarification to the most appropriate section(s):
|
||||
- Functional ambiguity → Update or add a bullet in Functional Requirements.
|
||||
- User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
||||
- Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
||||
- Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
||||
- Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
||||
- Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
||||
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
||||
- Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
|
||||
- Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
|
||||
- Keep each inserted clarification minimal and testable (avoid narrative drift).
|
||||
|
||||
6. Validation (performed after EACH write plus final pass):
|
||||
- Clarifications session contains exactly one bullet per accepted answer (no duplicates).
|
||||
- Total asked (accepted) questions ≤ 5.
|
||||
- Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
|
||||
- No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
|
||||
- Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
|
||||
- Terminology consistency: same canonical term used across all updated sections.
|
||||
|
||||
7. Write the updated spec back to `FEATURE_SPEC`.
|
||||
|
||||
8. Report completion (after questioning loop ends or early termination):
|
||||
- Number of questions asked & answered.
|
||||
- Path to updated spec.
|
||||
- Sections touched (list names).
|
||||
- Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
|
||||
- If any Outstanding or Deferred remain, recommend whether to proceed to `/speckit-plan` or run `/speckit-clarify` again later post-plan.
|
||||
- Suggested next command.
|
||||
|
||||
Behavior rules:
|
||||
|
||||
- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
|
||||
- If spec file missing, instruct user to run `/speckit-specify` first (do not create a new spec here).
|
||||
- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
|
||||
- Avoid speculative tech stack questions unless the absence blocks functional clarity.
|
||||
- Respect user early termination signals ("stop", "done", "proceed").
|
||||
- If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
|
||||
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
||||
|
||||
Context for prioritization: {{args}}
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,102 @@
|
||||
---
|
||||
name: speckit-constitution
|
||||
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Build Specification
|
||||
agent: speckit-specify
|
||||
prompt: Implement the feature specification based on the updated constitution. I want to build...
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Governance Architect**. Your role is to establish and maintain the project's "Source of Law"—the constitution. You ensure that all project principles, standards, and non-negotiables are clearly documented and kept in sync across all templates and workflows.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
You are updating the project constitution at `AGENTS.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
||||
|
||||
Follow this execution flow:
|
||||
|
||||
1. Load the existing constitution template at `AGENTS.md`.
|
||||
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
|
||||
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
|
||||
|
||||
2. Collect/derive values for placeholders:
|
||||
- If user input (conversation) supplies a value, use it.
|
||||
- Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
|
||||
- For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
|
||||
- `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
|
||||
- MAJOR: Backward incompatible governance/principle removals or redefinitions.
|
||||
- MINOR: New principle/section added or materially expanded guidance.
|
||||
- PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
|
||||
- If version bump type ambiguous, propose reasoning before finalizing.
|
||||
|
||||
3. Draft the updated constitution content:
|
||||
- Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
|
||||
- Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
|
||||
- Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing non‑negotiable rules, explicit rationale if not obvious.
|
||||
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
|
||||
|
||||
4. Consistency propagation checklist (convert prior checklist into active validations):
|
||||
- Read `.agents/skills/speckit-plan/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
||||
- Read `.agents/skills/speckit-specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
||||
- Read `.agents/skills/speckit-tasks/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
||||
- Read each command file in `.agents/skills/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
||||
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
|
||||
|
||||
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
|
||||
- Version change: old → new
|
||||
- List of modified principles (old title → new title if renamed)
|
||||
- Added sections
|
||||
- Removed sections
|
||||
- Templates requiring updates (✅ updated / ⚠ pending) with file paths
|
||||
- Follow-up TODOs if any placeholders intentionally deferred.
|
||||
|
||||
6. Validation before final output:
|
||||
- No remaining unexplained bracket tokens.
|
||||
- Version line matches report.
|
||||
- Dates ISO format YYYY-MM-DD.
|
||||
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
|
||||
|
||||
7. Write the completed constitution back to `AGENTS.md` (overwrite).
|
||||
|
||||
8. Output a final summary to the user with:
|
||||
- New version and bump rationale.
|
||||
- Any files flagged for manual follow-up.
|
||||
- Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
|
||||
|
||||
Formatting & Style Requirements:
|
||||
|
||||
- Use Markdown headings exactly as in the template (do not demote/promote levels).
|
||||
- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
|
||||
- Keep a single blank line between sections.
|
||||
- Avoid trailing whitespace.
|
||||
|
||||
If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
|
||||
|
||||
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
||||
|
||||
Do not create a new template; always operate on the existing `AGENTS.md` file.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,98 @@
|
||||
---
|
||||
name: speckit-diff
|
||||
description: Compare two versions of a spec or plan to highlight changes.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Diff Analyst**. Your role is to compare specification/plan versions and produce clear, actionable change summaries.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Compare two versions of a specification artifact and produce a structured diff report.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Parse Arguments**:
|
||||
- If user provides two file paths: Compare those files directly
|
||||
- If user provides one file path: Compare current version with git HEAD
|
||||
- If no arguments: Use `check-prerequisites.sh` to find current feature's spec.md and compare with HEAD
|
||||
|
||||
2. **Load Files**:
|
||||
|
||||
```bash
|
||||
# For git comparison
|
||||
git show HEAD:<relative-path> > /tmp/old_version.md
|
||||
```
|
||||
|
||||
- Read both versions into memory
|
||||
|
||||
3. **Semantic Diff Analysis**:
|
||||
Analyze changes by section:
|
||||
- **Added**: New sections, requirements, or criteria
|
||||
- **Removed**: Deleted content
|
||||
- **Modified**: Changed wording or values
|
||||
- **Moved**: Reorganized content (same meaning, different location)
|
||||
|
||||
4. **Generate Report**:
|
||||
|
||||
```markdown
|
||||
# Diff Report: [filename]
|
||||
|
||||
**Compared**: [version A] → [version B]
|
||||
**Date**: [timestamp]
|
||||
|
||||
## Summary
|
||||
|
||||
- X additions, Y removals, Z modifications
|
||||
|
||||
## Changes by Section
|
||||
|
||||
### [Section Name]
|
||||
|
||||
| Type | Content | Impact |
|
||||
| ---------- | ------------------ | ----------------- |
|
||||
| + Added | [new text] | [what this means] |
|
||||
| - Removed | [old text] | [what this means] |
|
||||
| ~ Modified | [before] → [after] | [what this means] |
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
- Breaking changes: [list any]
|
||||
- Scope changes: [list any]
|
||||
```
|
||||
|
||||
5. **Output**:
|
||||
- Display report in terminal (do NOT write to file unless requested)
|
||||
- Offer to save report to `FEATURE_DIR/diffs/[timestamp].md`
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Be Precise**: Quote exact text changes
|
||||
- **Highlight Impact**: Explain what each change means for implementation
|
||||
- **Flag Breaking Changes**: Any change that invalidates existing work
|
||||
- **Ignore Whitespace**: Focus on semantic changes, not formatting
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,260 @@
|
||||
---
|
||||
name: speckit-implement
|
||||
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md (with Ironclad Anti-Regression Protocols)
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Master Builder**. Your role is to execute the implementation plan with precision, processing tasks from `tasks.md` and ensuring that the final codebase aligns perfectly with the specification, plan, and quality standards.
|
||||
|
||||
**CORE OBJECTIVE:** Fix bugs and implement features with **ZERO REGRESSION**.
|
||||
**YOUR MOTTO:** "Measure twice, cut once. If you can't prove it's broken, don't fix it."
|
||||
|
||||
---
|
||||
|
||||
## 🛡️ IRONCLAD PROTOCOLS (Non-Negotiable)
|
||||
|
||||
These protocols MUST be followed for EVERY task before any production code modification:
|
||||
|
||||
### Protocol 1: Blast Radius Analysis
|
||||
|
||||
**BEFORE** writing a single line of production code modification, you MUST:
|
||||
|
||||
1. **Read**: Read the target file(s) to understand current implementation.
|
||||
2. **Trace**: Use `grep` or search tools to find ALL other files importing or using the function/class you intend to modify.
|
||||
3. **Report**: Output a precise list:
|
||||
```
|
||||
🔍 BLAST RADIUS ANALYSIS
|
||||
─────────────────────────
|
||||
Modifying: `[Function/Class X]` in `[file.ts]`
|
||||
Affected files: [A.ts, B.ts, C.ts]
|
||||
Risk Level: [LOW (<3 files) | MEDIUM (3-5 files) | HIGH (>5 files)]
|
||||
```
|
||||
4. **Decide**: If > 2 files are affected, **DO NOT MODIFY inline**. Trigger **Protocol 2 (Strangler Pattern)**.
|
||||
|
||||
### Protocol 2: Strangler Pattern (Immutable Core)
|
||||
|
||||
If a file is critical, complex, or has high dependencies (>2 affected files):
|
||||
|
||||
1. **DO NOT EDIT** the existing function inside the old file.
|
||||
2. **CREATE** a new file/module (e.g., `feature_v2.ts` or `utils_patch.ts`).
|
||||
3. **IMPLEMENT** the improved logic there.
|
||||
4. **SWITCH** the imports in the consuming files one by one.
|
||||
5. **ANNOUNCE**: "Applying Strangler Pattern to avoid regression."
|
||||
|
||||
_Benefit: If it breaks, we simply revert the import, not the whole logic._
|
||||
|
||||
### Protocol 3: Reproduction Script First (TDD)
|
||||
|
||||
You are **FORBIDDEN** from fixing a bug or implementing a feature without evidence:
|
||||
|
||||
1. Create a temporary script `repro_task_[id].ts` (or .js/.py/.go based on stack).
|
||||
2. This script MUST:
|
||||
- For bugs: **FAIL** when run against the current code (demonstrating the bug).
|
||||
- For features: **FAIL** when run against current code (feature doesn't exist).
|
||||
3. Run it and show the failure output.
|
||||
4. **ONLY THEN**, implement the fix/feature.
|
||||
5. Run the script again to prove it passes.
|
||||
6. Delete the temporary script OR convert it to a permanent test.
|
||||
|
||||
### Protocol 4: Context Anchoring
|
||||
|
||||
At the start of execution and after every 3 modifications:
|
||||
|
||||
1. Run `tree -L 2` (or equivalent) to visualize the file structure.
|
||||
2. Update `ARCHITECTURE.md` if it exists, or create it to reflect the current reality.
|
||||
|
||||
---
|
||||
|
||||
## Task Execution
|
||||
|
||||
### Outline
|
||||
|
||||
1. Run `../scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Check checklists status** (if FEATURE_DIR/checklists/ exists):
|
||||
- Scan all checklist files in the checklists/ directory
|
||||
- For each checklist, count:
|
||||
- Total items: All lines matching `- [ ]` or `- [X]` or `- [x]`
|
||||
- Completed items: Lines matching `- [X]` or `- [x]`
|
||||
- Incomplete items: Lines matching `- [ ]`
|
||||
- Create a status table:
|
||||
|
||||
```text
|
||||
| Checklist | Total | Completed | Incomplete | Status |
|
||||
|-----------|-------|-----------|------------|--------|
|
||||
| ux.md | 12 | 12 | 0 | ✓ PASS |
|
||||
| test.md | 8 | 5 | 3 | ✗ FAIL |
|
||||
| security.md | 6 | 6 | 0 | ✓ PASS |
|
||||
```
|
||||
|
||||
- Calculate overall status:
|
||||
- **PASS**: All checklists have 0 incomplete items
|
||||
- **FAIL**: One or more checklists have incomplete items
|
||||
|
||||
- **If any checklist is incomplete**:
|
||||
- Display the table with incomplete item counts
|
||||
- **STOP** and ask: "Some checklists are incomplete. Do you want to proceed with implementation anyway? (yes/no)"
|
||||
- Wait for user response before continuing
|
||||
- If user says "no" or "wait" or "stop", halt execution
|
||||
- If user says "yes" or "proceed" or "continue", proceed to step 3
|
||||
|
||||
- **If all checklists are complete**:
|
||||
- Display the table showing all checklists passed
|
||||
- Automatically proceed to step 3
|
||||
|
||||
3. Load and analyze the implementation context:
|
||||
- **REQUIRED**: Read tasks.md for the complete task list and execution plan
|
||||
- **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
|
||||
- **IF EXISTS**: Read data-model.md for entities and relationships
|
||||
- **IF EXISTS**: Read contracts/ for API specifications and test requirements
|
||||
- **IF EXISTS**: Read research.md for technical decisions and constraints
|
||||
- **IF EXISTS**: Read quickstart.md for integration scenarios
|
||||
|
||||
4. **Context Anchoring (Protocol 4)**:
|
||||
- Run `tree -L 2` to visualize the current file structure
|
||||
- Document the initial state before any modifications
|
||||
|
||||
5. **Project Setup Verification**:
|
||||
- **REQUIRED**: Create/verify ignore files based on actual project setup:
|
||||
|
||||
**Detection & Creation Logic**:
|
||||
- Check if the following command succeeds to determine if the repository is a git repo (create/verify .gitignore if so):
|
||||
|
||||
```sh
|
||||
git rev-parse --git-dir 2>/dev/null
|
||||
```
|
||||
|
||||
- Check if Dockerfile\* exists or Docker in plan.md → create/verify .dockerignore
|
||||
- Check if .eslintrc\* exists → create/verify .eslintignore
|
||||
- Check if eslint.config.\* exists → ensure the config's `ignores` entries cover required patterns
|
||||
- Check if .prettierrc\* exists → create/verify .prettierignore
|
||||
- Check if .npmrc or package.json exists → create/verify .npmignore (if publishing)
|
||||
- Check if terraform files (\*.tf) exist → create/verify .terraformignore
|
||||
- Check if .helmignore needed (helm charts present) → create/verify .helmignore
|
||||
|
||||
**If ignore file already exists**: Verify it contains essential patterns, append missing critical patterns only
|
||||
**If ignore file missing**: Create with full pattern set for detected technology
|
||||
|
||||
**Common Patterns by Technology** (from plan.md tech stack):
|
||||
- **Node.js/JavaScript/TypeScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
|
||||
- **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
|
||||
- **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
|
||||
- **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
|
||||
- **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
|
||||
- **Ruby**: `.bundle/`, `log/`, `tmp/`, `*.gem`, `vendor/bundle/`
|
||||
- **PHP**: `vendor/`, `*.log`, `*.cache`, `*.env`
|
||||
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
|
||||
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
|
||||
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
|
||||
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
|
||||
- **Swift**: `.build/`, `DerivedData/`, `*.swiftpm/`, `Packages/`
|
||||
- **R**: `.Rproj.user/`, `.Rhistory`, `.RData`, `.Ruserdata`, `*.Rproj`, `packrat/`, `renv/`
|
||||
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
|
||||
|
||||
**Tool-Specific Patterns**:
|
||||
- **Docker**: `node_modules/`, `.git/`, `Dockerfile*`, `.dockerignore`, `*.log*`, `.env*`, `coverage/`
|
||||
- **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
|
||||
- **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
|
||||
- **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
|
||||
- **Kubernetes/k8s**: `*.secret.yaml`, `secrets/`, `.kube/`, `kubeconfig*`, `*.key`, `*.crt`
|
||||
|
||||
6. Parse tasks.md structure and extract:
|
||||
- **Task phases**: Setup, Tests, Core, Integration, Polish
|
||||
- **Task dependencies**: Sequential vs parallel execution rules
|
||||
- **Task details**: ID, description, file paths, parallel markers [P]
|
||||
- **Execution flow**: Order and dependency requirements
|
||||
|
||||
7. **Execute implementation following the task plan with Ironclad Protocols**:
|
||||
|
||||
**For EACH task**, follow this sequence:
|
||||
|
||||
a. **Blast Radius Analysis (Protocol 1)**:
|
||||
- Identify all files that will be modified
|
||||
- Run `grep` to find all dependents
|
||||
- Report the blast radius
|
||||
|
||||
b. **Strategy Decision**:
|
||||
- If LOW risk (≤2 affected files): Proceed with inline modification
|
||||
- If MEDIUM/HIGH risk (>2 files): Apply Strangler Pattern (Protocol 2)
|
||||
|
||||
c. **Reproduction Script (Protocol 3)**:
|
||||
- Create `repro_task_[ID].ts` that demonstrates expected behavior
|
||||
- Run it to confirm current state (should fail for new features, or fail for bugs)
|
||||
|
||||
d. **Implementation**:
|
||||
- Execute the task according to plan
|
||||
- **Phase-by-phase execution**: Complete each phase before moving to the next
|
||||
- **Respect dependencies**: Run sequential tasks in order, parallel tasks [P] can run together
|
||||
- **Follow TDD approach**: Execute test tasks before their corresponding implementation tasks
|
||||
- **File-based coordination**: Tasks affecting the same files must run sequentially
|
||||
|
||||
e. **Verification**:
|
||||
- Run the reproduction script again (should now pass)
|
||||
- Run existing tests to ensure no regression
|
||||
- If any test fails: **STOP** and report the regression
|
||||
|
||||
f. **Cleanup**:
|
||||
- Delete temporary repro scripts OR convert to permanent tests
|
||||
- Mark task as complete `[X]` in tasks.md
|
||||
|
||||
8. **Progress tracking and error handling**:
|
||||
- Report progress after each completed task with this format:
|
||||
```
|
||||
✅ TASK [ID] COMPLETE
|
||||
─────────────────────
|
||||
Modified files: [list]
|
||||
Tests passed: [count]
|
||||
Blast radius: [LOW/MEDIUM/HIGH]
|
||||
```
|
||||
- Halt execution if any non-parallel task fails
|
||||
- For parallel tasks [P], continue with successful tasks, report failed ones
|
||||
- Provide clear error messages with context for debugging
|
||||
- Suggest next steps if implementation cannot proceed
|
||||
- **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
|
||||
|
||||
9. **Context Re-anchoring (every 3 tasks)**:
|
||||
- Run `tree -L 2` to verify file structure
|
||||
- Update ARCHITECTURE.md if structure has changed
|
||||
|
||||
10. **Completion validation**:
|
||||
- Verify all required tasks are completed
|
||||
- Check that implemented features match the original specification
|
||||
- Validate that tests pass and coverage meets requirements
|
||||
- Confirm the implementation follows the technical plan
|
||||
- Report final status with summary of completed work
|
||||
|
||||
---
|
||||
|
||||
## 🚫 Anti-Hallucination Rules
|
||||
|
||||
1. **No Magic Imports:** Never import a library or file without checking `ls` or `package.json` first.
|
||||
2. **Strict Diff-Only:** When modifying existing files, use minimal edits.
|
||||
3. **Stop & Ask:** If you find yourself editing more than 3 files for a "simple fix," **STOP**. You are likely cascading a regression. Ask for strategic guidance.
|
||||
|
||||
---
|
||||
|
||||
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/speckit-tasks` first to regenerate the task list.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,130 @@
|
||||
---
|
||||
name: speckit-migrate
|
||||
description: Migrate existing projects into the speckit structure by generating spec.md, plan.md, and tasks.md from existing code.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Migration Specialist**. Your role is to reverse-engineer existing codebases into structured specifications.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Analyze an existing codebase and generate speckit artifacts (spec.md, plan.md, tasks.md) that document what currently exists.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Parse Arguments**:
|
||||
- `--path <dir>`: Directory to analyze (default: current repo root)
|
||||
- `--feature <name>`: Feature name for output directory
|
||||
- `--depth <n>`: Analysis depth (1=overview, 2=detailed, 3=exhaustive)
|
||||
|
||||
2. **Codebase Discovery**:
|
||||
|
||||
```bash
|
||||
# Get project structure
|
||||
tree -L 3 --dirsfirst -I 'node_modules|.git|dist|build' > /tmp/structure.txt
|
||||
|
||||
# Find key files
|
||||
find . -name "*.md" -o -name "package.json" -o -name "*.config.*" | head -50
|
||||
```
|
||||
|
||||
3. **Analyze Architecture**:
|
||||
- Identify framework/stack from config files
|
||||
- Map directory structure to components
|
||||
- Find entry points (main, index, app)
|
||||
- Identify data models/entities
|
||||
- Map API endpoints (if applicable)
|
||||
|
||||
4. **Generate spec.md** (reverse-engineered):
|
||||
|
||||
```markdown
|
||||
# [Feature Name] - Specification (Migrated)
|
||||
|
||||
> This specification was auto-generated from existing code.
|
||||
> Review and refine before using for future development.
|
||||
|
||||
## Overview
|
||||
|
||||
[Inferred from README, comments, and code structure]
|
||||
|
||||
## Functional Requirements
|
||||
|
||||
[Extracted from existing functionality]
|
||||
|
||||
## Key Entities
|
||||
|
||||
[From data models, schemas, types]
|
||||
```
|
||||
|
||||
5. **Generate plan.md** (reverse-engineered):
|
||||
|
||||
```markdown
|
||||
# [Feature Name] - Technical Plan (Migrated)
|
||||
|
||||
## Current Architecture
|
||||
|
||||
[Documented from codebase analysis]
|
||||
|
||||
## Technology Stack
|
||||
|
||||
[From package.json, imports, configs]
|
||||
|
||||
## Component Map
|
||||
|
||||
[Directory → responsibility mapping]
|
||||
```
|
||||
|
||||
6. **Generate tasks.md** (completion status):
|
||||
|
||||
```markdown
|
||||
# [Feature Name] - Tasks (Migrated)
|
||||
|
||||
All tasks marked [x] represent existing implemented functionality.
|
||||
Tasks marked [ ] are inferred gaps or TODOs found in code.
|
||||
|
||||
## Existing Implementation
|
||||
|
||||
- [x] [Component A] - Implemented in `src/componentA/`
|
||||
- [x] [Component B] - Implemented in `src/componentB/`
|
||||
|
||||
## Identified Gaps
|
||||
|
||||
- [ ] [Missing tests for X]
|
||||
- [ ] [TODO comment at Y]
|
||||
```
|
||||
|
||||
7. **Output**:
|
||||
- Create feature directory: `.specify/features/[feature-name]/`
|
||||
- Write all three files
|
||||
- Report summary with confidence scores
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Don't Invent**: Only document what exists, mark uncertainties as [INFERRED]
|
||||
- **Preserve Intent**: Use code comments and naming to understand purpose
|
||||
- **Flag TODOs**: Any TODO/FIXME/HACK in code becomes an open task
|
||||
- **Be Conservative**: When unsure, ask rather than assume
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,111 @@
|
||||
---
|
||||
name: speckit-plan
|
||||
description: Execute the implementation planning workflow using the plan template to generate design artifacts.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-specify
|
||||
handoffs:
|
||||
- label: Create Tasks
|
||||
agent: speckit-tasks
|
||||
prompt: Break the plan into tasks
|
||||
send: true
|
||||
- label: Create Checklist
|
||||
agent: speckit-checklist
|
||||
prompt: Create a checklist for the following domain...
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity System Architect**. Your role is to bridge the gap between functional specifications and technical implementation. You design data models, define API contracts, and perform technical research to ensure a robust and scalable architecture.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
1. **Setup**: Run `../scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Load context**: Read FEATURE_SPEC and `AGENTS.md`. Load IMPL_PLAN template from `templates/plan-template.md`.
|
||||
|
||||
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
|
||||
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
|
||||
- Fill Constitution Check section from constitution
|
||||
- Evaluate gates (ERROR if violations unjustified)
|
||||
- Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
|
||||
- Phase 1: Generate data-model.md, contracts/, quickstart.md
|
||||
- Phase 1: Update agent context by running the agent script
|
||||
- Re-evaluate Constitution Check post-design
|
||||
|
||||
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
||||
|
||||
## Phases
|
||||
|
||||
### Phase 0: Outline & Research
|
||||
|
||||
1. **Extract unknowns from Technical Context** above:
|
||||
- For each NEEDS CLARIFICATION → research task
|
||||
- For each dependency → best practices task
|
||||
- For each integration → patterns task
|
||||
|
||||
2. **Generate and dispatch research agents**:
|
||||
|
||||
```text
|
||||
For each unknown in Technical Context:
|
||||
Task: "Research {unknown} for {feature context}"
|
||||
For each technology choice:
|
||||
Task: "Find best practices for {tech} in {domain}"
|
||||
```
|
||||
|
||||
3. **Consolidate findings** in `research.md` using format:
|
||||
- Decision: [what was chosen]
|
||||
- Rationale: [why chosen]
|
||||
- Alternatives considered: [what else evaluated]
|
||||
|
||||
**Output**: research.md with all NEEDS CLARIFICATION resolved
|
||||
|
||||
### Phase 1: Design & Contracts
|
||||
|
||||
**Prerequisites:** `research.md` complete
|
||||
|
||||
1. **Extract entities from feature spec** → `data-model.md`:
|
||||
- Entity name, fields, relationships
|
||||
- Validation rules from requirements
|
||||
- State transitions if applicable
|
||||
|
||||
2. **Generate API contracts** from functional requirements:
|
||||
- For each user action → endpoint
|
||||
- Use standard REST/GraphQL patterns
|
||||
- Output OpenAPI/GraphQL schema to `/contracts/`
|
||||
|
||||
3. **Agent context update**:
|
||||
- Run `../scripts/bash/update-agent-context.sh windsurf`
|
||||
- These scripts detect which AI agent is in use
|
||||
- Update the appropriate agent-specific context file
|
||||
- Add only new technology from current plan
|
||||
- Preserve manual additions between markers
|
||||
|
||||
**Output**: data-model.md, /contracts/\*, quickstart.md, agent-specific file
|
||||
|
||||
## Key rules
|
||||
|
||||
- Use absolute paths
|
||||
- ERROR on gate failures or unresolved clarifications
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,28 @@
|
||||
# [PROJECT NAME] Development Guidelines
|
||||
|
||||
Auto-generated from all feature plans. Last updated: [DATE]
|
||||
|
||||
## Active Technologies
|
||||
|
||||
[EXTRACTED FROM ALL PLAN.MD FILES]
|
||||
|
||||
## Project Structure
|
||||
|
||||
```text
|
||||
[ACTUAL STRUCTURE FROM PLANS]
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]
|
||||
|
||||
## Code Style
|
||||
|
||||
[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]
|
||||
|
||||
## Recent Changes
|
||||
|
||||
[LAST 3 FEATURES AND WHAT THEY ADDED]
|
||||
|
||||
<!-- MANUAL ADDITIONS START -->
|
||||
<!-- MANUAL ADDITIONS END -->
|
||||
+7
-7
@@ -3,7 +3,7 @@
|
||||
**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
|
||||
**Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
|
||||
|
||||
**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
|
||||
**Note**: This template is filled in by the `/speckit-plan` command. See `.agents/skills/plan.md` for the execution workflow.
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -39,12 +39,12 @@ _GATE: Must pass before Phase 0 research. Re-check after Phase 1 design._
|
||||
|
||||
```text
|
||||
specs/[###-feature]/
|
||||
├── plan.md # This file (/speckit.plan command output)
|
||||
├── research.md # Phase 0 output (/speckit.plan command)
|
||||
├── data-model.md # Phase 1 output (/speckit.plan command)
|
||||
├── quickstart.md # Phase 1 output (/speckit.plan command)
|
||||
├── contracts/ # Phase 1 output (/speckit.plan command)
|
||||
└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan)
|
||||
├── plan.md # This file (/speckit-plan command output)
|
||||
├── research.md # Phase 0 output (/speckit-plan command)
|
||||
├── data-model.md # Phase 1 output (/speckit-plan command)
|
||||
├── quickstart.md # Phase 1 output (/speckit-plan command)
|
||||
├── contracts/ # Phase 1 output (/speckit-plan command)
|
||||
└── tasks.md # Phase 2 output (/speckit-tasks command - NOT created by /speckit-plan)
|
||||
```
|
||||
|
||||
### Source Code (repository root)
|
||||
@@ -0,0 +1,79 @@
|
||||
---
|
||||
name: speckit-quizme
|
||||
description: Challenge the specification with Socratic questioning to identify logical gaps, unhandled edge cases, and robustness issues.
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Clarify Spec Requirements
|
||||
agent: speckit-clarify
|
||||
prompt: Clarify specification requirements
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Red Teamer**. Your role is to play the "Socratic Teacher" and challenge specifications for logical fallacies, naive assumptions, and happy-path bias. You find the edge cases that others miss and force robustness into the design.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Goal: Act as a "Red Team" or "Socratic Teacher" to challenge the current feature specification. Unlike `speckit-clarify` (which looks for missing definitions), `speckit-quizme` looks for logical fallacies, race conditions, naive assumptions, and "happy path" bias.
|
||||
|
||||
Execution steps:
|
||||
|
||||
1. **Setup**: Run `../scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR.
|
||||
|
||||
2. **Load Spec**: Read `spec.md` and `plan.md` (if exists).
|
||||
|
||||
3. **Analyze for Weaknesses** (Internal Thought Process):
|
||||
- Identify "Happy Path" assumptions (e.g., "User clicks button and saves").
|
||||
- Look for temporal/state gaps (e.g., "What if the user clicks twice?", "What if the network fails mid-save?").
|
||||
- Challenge business logic (e.g., "You allow deleting users, but what happens to their data?").
|
||||
- Challenge security (e.g., "You rely on client-side validation here, but what if I curl the API?").
|
||||
|
||||
4. **The Quiz Loop**:
|
||||
- Present 3-5 challenging scenarios _one by one_.
|
||||
- Format:
|
||||
|
||||
> **Scenario**: [Describe a plausible edge case or failure]
|
||||
> **Current Spec**: [Quote where the spec implies behavior or is silent]
|
||||
> **The Quiz**: What should the system do here?
|
||||
|
||||
- Wait for user answer.
|
||||
- Critique the answer:
|
||||
- If user says "It errors", ask "What error? To whom? Logged where?"
|
||||
- If user says "It shouldn't happen", ask "How do you prevent it?"
|
||||
|
||||
5. **Capture & Refine**:
|
||||
- For each resolved scenario, generate a new requirement or edge case bullet.
|
||||
- Ask user for permission to add it to `spec.md`.
|
||||
- On approval, append to `Edge Cases` or `Requirements` section.
|
||||
|
||||
6. **Completion**:
|
||||
- Report number of scenarios covered.
|
||||
- List new requirements added.
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Be a Skeptic**: Don't assume the happy path works.
|
||||
- **Focus on "When" and "If"**: When high load, If network drops, When concurrent edits.
|
||||
- **Don't be annoying**: Focus on _critical_ flaws, not nitpicks.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: speckit-reviewer
|
||||
description: Perform code review with actionable feedback and suggestions.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Code Reviewer**. Your role is to perform thorough code reviews, identify issues, and provide constructive, actionable feedback.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Review code changes and provide structured feedback with severity levels.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Determine Review Scope**:
|
||||
- If user provides file paths: Review those files
|
||||
- If user says "staged" or no args: Review git staged changes
|
||||
- If user says "branch": Compare current branch to main/master
|
||||
|
||||
```bash
|
||||
# Get staged changes
|
||||
git diff --cached --name-only
|
||||
|
||||
# Get branch changes
|
||||
git diff main...HEAD --name-only
|
||||
```
|
||||
|
||||
2. **Load Files for Review**:
|
||||
- Read each file in scope
|
||||
- For diffs, focus on changed lines with context
|
||||
|
||||
3. **Review Categories**:
|
||||
|
||||
| Category | What to Check |
|
||||
| ------------------- | -------------------------------------------- |
|
||||
| **Correctness** | Logic errors, off-by-one, null handling |
|
||||
| **Security** | SQL injection, XSS, secrets in code |
|
||||
| **Performance** | N+1 queries, unnecessary loops, memory leaks |
|
||||
| **Maintainability** | Complexity, duplication, naming |
|
||||
| **Best Practices** | Error handling, logging, typing |
|
||||
| **Style** | Consistency, formatting (if no linter) |
|
||||
|
||||
4. **Analyze Each File**:
|
||||
For each file, check:
|
||||
- Does the code do what it claims?
|
||||
- Are edge cases handled?
|
||||
- Is error handling appropriate?
|
||||
- Are there security concerns?
|
||||
- Is the code testable?
|
||||
- Is the naming clear and consistent?
|
||||
|
||||
5. **Severity Levels**:
|
||||
|
||||
| Level | Meaning | Block Merge? |
|
||||
| ------------- | ------------------------------ | ------------ |
|
||||
| 🔴 CRITICAL | Security issue, data loss risk | Yes |
|
||||
| 🟠 HIGH | Bug, logic error | Yes |
|
||||
| 🟡 MEDIUM | Code smell, maintainability | Maybe |
|
||||
| 🟢 LOW | Style, minor improvement | No |
|
||||
| 💡 SUGGESTION | Nice-to-have, optional | No |
|
||||
|
||||
6. **Generate Review Report**:
|
||||
|
||||
````markdown
|
||||
# Code Review Report
|
||||
|
||||
**Date**: [timestamp]
|
||||
**Scope**: [files reviewed]
|
||||
**Overall**: APPROVE | REQUEST CHANGES | NEEDS DISCUSSION
|
||||
|
||||
## Summary
|
||||
|
||||
| Severity | Count |
|
||||
| -------------- | ----- |
|
||||
| 🔴 Critical | X |
|
||||
| 🟠 High | X |
|
||||
| 🟡 Medium | X |
|
||||
| 🟢 Low | X |
|
||||
| 💡 Suggestions | X |
|
||||
|
||||
## Findings
|
||||
|
||||
### 🔴 CRITICAL: SQL Injection Risk
|
||||
|
||||
**File**: `src/db/queries.ts:45`
|
||||
**Code**:
|
||||
|
||||
```typescript
|
||||
const query = `SELECT * FROM users WHERE id = ${userId}`;
|
||||
```
|
||||
````
|
||||
|
||||
**Issue**: User input directly concatenated into SQL query
|
||||
**Fix**: Use parameterized queries:
|
||||
|
||||
```typescript
|
||||
const query = 'SELECT * FROM users WHERE id = $1';
|
||||
await db.query(query, [userId]);
|
||||
```
|
||||
|
||||
### 🟡 MEDIUM: Complex Function
|
||||
|
||||
**File**: `src/auth/handler.ts:120`
|
||||
**Issue**: Function has cyclomatic complexity of 15
|
||||
**Suggestion**: Extract into smaller functions
|
||||
|
||||
## What's Good
|
||||
- Clear naming conventions
|
||||
- Good test coverage
|
||||
- Proper TypeScript types
|
||||
|
||||
## Recommended Actions
|
||||
1. **Must fix before merge**: [critical/high items]
|
||||
2. **Should address**: [medium items]
|
||||
3. **Consider for later**: [low/suggestions]
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
7. **Output**:
|
||||
- Display report
|
||||
- If CRITICAL or HIGH issues: Recommend blocking merge
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Be Constructive**: Every criticism should have a fix suggestion
|
||||
- **Be Specific**: Quote exact code, provide exact line numbers
|
||||
- **Be Balanced**: Mention what's good, not just what's wrong
|
||||
- **Prioritize**: Focus on real issues, not style nitpicks
|
||||
- **Be Educational**: Explain WHY something is an issue
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,211 @@
|
||||
---
|
||||
name: speckit-security-audit
|
||||
description: Perform a security-focused audit of the codebase against OWASP Top 10, CASL authorization, and LCBP3-DMS security requirements.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-checker
|
||||
---
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Security Sentinel**. Your mission is to identify security vulnerabilities, authorization gaps, and compliance issues specific to the LCBP3-DMS project before they reach production.
|
||||
|
||||
## Task
|
||||
|
||||
Perform a comprehensive security audit covering OWASP Top 10, CASL permission enforcement, file upload safety, and project-specific security rules defined in `specs/06-Decision-Records/ADR-016-security-authentication.md`.
|
||||
|
||||
## Context Loading
|
||||
|
||||
Before auditing, load the security context:
|
||||
|
||||
1. Read `specs/06-Decision-Records/ADR-016-security-authentication.md` for project security decisions
|
||||
2. Read `specs/05-Engineering-Guidelines/05-02-backend-guidelines.md` for backend security patterns
|
||||
3. Read `specs/03-Data-and-Storage/lcbp3-v1.8.0-seed-permissions.sql` for CASL permission definitions
|
||||
4. Read `AGENTS.md` for security rules (Section: Security Rules Non-Negotiable + Security & Integrity Audit Protocol)
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Phase 1: OWASP Top 10 Scan
|
||||
|
||||
Scan the `backend/src/` directory for each OWASP category:
|
||||
|
||||
| # | OWASP Category | What to Check | Files to Scan |
|
||||
| --- | ------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------- |
|
||||
| A01 | Broken Access Control | Missing `@UseGuards(JwtAuthGuard, CaslAbilityGuard)` on controllers, unprotected routes | `**/*.controller.ts` |
|
||||
| A02 | Cryptographic Failures | Hardcoded secrets, weak hashing, missing HTTPS enforcement | `**/*.ts`, `docker-compose*.yml` |
|
||||
| A03 | Injection | Raw SQL queries, unsanitized user input in TypeORM queries, template literals in queries | `**/*.service.ts`, `**/*.repository.ts` |
|
||||
| A04 | Insecure Design | Missing rate limiting on auth endpoints, no idempotency checks on mutations | `**/*.controller.ts`, `**/*.guard.ts` |
|
||||
| A05 | Security Misconfiguration | Missing Helmet.js, CORS misconfiguration, debug mode in production | `main.ts`, `app.module.ts`, `docker-compose*.yml` |
|
||||
| A06 | Vulnerable Components | Outdated dependencies with known CVEs | `package.json`, `pnpm-lock.yaml` |
|
||||
| A07 | Auth Failures | Missing brute-force protection, weak password policy, JWT misconfiguration | `auth/`, `**/*.strategy.ts` |
|
||||
| A08 | Data Integrity | Missing input validation, unvalidated file types, missing CSRF protection | `**/*.dto.ts`, `**/*.interceptor.ts` |
|
||||
| A09 | Logging Failures | Missing audit logs for security events, sensitive data in logs | `**/*.service.ts`, `**/*.interceptor.ts` |
|
||||
| A10 | SSRF | Unrestricted outbound requests, user-controlled URLs | `**/*.service.ts` |
|
||||
|
||||
### Phase 2: CASL Authorization Audit
|
||||
|
||||
1. **Load permission matrix** from `specs/03-Data-and-Storage/lcbp3-v1.8.0-seed-permissions.sql`
|
||||
2. **Scan all controllers** for `@UseGuards(CaslAbilityGuard)` coverage:
|
||||
|
||||
```bash
|
||||
# Find controllers without CASL guard
|
||||
grep -rL "CaslAbilityGuard" backend/src/modules/*/\*.controller.ts
|
||||
```
|
||||
|
||||
3. **Verify 4-Level RBAC enforcement**:
|
||||
- Level 1: System Admin (full access)
|
||||
- Level 2: Project Admin (project-scoped)
|
||||
- Level 3: Department Lead (department-scoped)
|
||||
- Level 4: User (own-records only)
|
||||
|
||||
4. **Check ability definitions** — ensure every endpoint has:
|
||||
- `@CheckPolicies()` or `@Can()` decorator
|
||||
- Correct action (`read`, `create`, `update`, `delete`, `manage`)
|
||||
- Correct subject (entity class, not string)
|
||||
|
||||
5. **Cross-reference with routes** — verify:
|
||||
- No public endpoints that should be protected
|
||||
- No endpoints with broader permissions than required (principle of least privilege)
|
||||
- Query scoping: users can only query their own records (unless admin)
|
||||
|
||||
### Phase 3: File Upload Security (ClamAV)
|
||||
|
||||
Check LCBP3-DMS-specific file handling per ADR-016:
|
||||
|
||||
1. **Two-Phase Storage verification**:
|
||||
- Upload goes to temp directory first → scanned by ClamAV → moved to permanent
|
||||
- Check for direct writes to permanent storage (violation)
|
||||
|
||||
2. **ClamAV integration**:
|
||||
- Verify ClamAV service is configured in `docker-compose*.yml`
|
||||
- Check that file upload endpoints call ClamAV scan before commit
|
||||
- Verify rejection flow for infected files
|
||||
|
||||
3. **File type validation**:
|
||||
- Check allowed MIME types against whitelist
|
||||
- Verify file extension validation exists
|
||||
- Check for double-extension attacks (e.g., `file.pdf.exe`)
|
||||
|
||||
4. **File size limits**:
|
||||
- Verify upload size limits are enforced
|
||||
- Check for path traversal in filenames (`../`, `..\\`)
|
||||
|
||||
### Phase 4: LCBP3-DMS-Specific Checks
|
||||
|
||||
1. **Idempotency** — verify all POST/PUT/PATCH endpoints check `Idempotency-Key` header:
|
||||
|
||||
```bash
|
||||
# Find mutation endpoints without idempotency
|
||||
grep -rn "@Post\|@Put\|@Patch" backend/src/modules/*/\*.controller.ts
|
||||
# Cross-reference with idempotency guard usage
|
||||
grep -rn "IdempotencyGuard\|Idempotency-Key" backend/src/
|
||||
```
|
||||
|
||||
2. **Optimistic Locking** — verify document entities use `@VersionColumn()`:
|
||||
|
||||
```bash
|
||||
grep -rn "VersionColumn" backend/src/modules/*/entities/*.entity.ts
|
||||
```
|
||||
|
||||
3. **Redis Redlock** — verify document numbering uses distributed locks:
|
||||
|
||||
```bash
|
||||
grep -rn "Redlock\|redlock\|acquireLock" backend/src/
|
||||
```
|
||||
|
||||
4. **Password Security** — verify bcrypt with 12+ salt rounds:
|
||||
|
||||
```bash
|
||||
grep -rn "bcrypt\|saltRounds\|genSalt" backend/src/
|
||||
```
|
||||
|
||||
5. **Rate Limiting** — verify throttle guard on auth endpoints:
|
||||
|
||||
```bash
|
||||
grep -rn "ThrottlerGuard\|@Throttle" backend/src/modules/auth/
|
||||
```
|
||||
|
||||
6. **Environment Variables** — ensure no `.env` files for production:
|
||||
- Check for `.env` files committed to git
|
||||
- Verify Docker compose uses `environment:` section, not `env_file:`
|
||||
|
||||
## Severity Classification
|
||||
|
||||
| Severity | Description | Response |
|
||||
| --------------- | ----------------------------------------------------- | ----------------------- |
|
||||
| 🔴 **Critical** | Exploitable vulnerability, data exposure, auth bypass | Immediate fix required |
|
||||
| 🟠 **High** | Missing security control, potential escalation path | Fix before next release |
|
||||
| 🟡 **Medium** | Best practice violation, defense-in-depth gap | Plan fix in sprint |
|
||||
| 🟢 **Low** | Informational, minor hardening opportunity | Track in backlog |
|
||||
|
||||
## Report Format
|
||||
|
||||
Generate a structured report:
|
||||
|
||||
```markdown
|
||||
# 🔒 Security Audit Report
|
||||
|
||||
**Date**: <date>
|
||||
**Scope**: <backend/frontend/both>
|
||||
**Auditor**: Antigravity Security Sentinel
|
||||
|
||||
## Summary
|
||||
|
||||
| Severity | Count |
|
||||
| ----------- | ----- |
|
||||
| 🔴 Critical | X |
|
||||
| 🟠 High | X |
|
||||
| 🟡 Medium | X |
|
||||
| 🟢 Low | X |
|
||||
|
||||
## Findings
|
||||
|
||||
### [SEV-001] <Title> — 🔴 Critical
|
||||
|
||||
**Category**: OWASP A01 / CASL / ClamAV / LCBP3-Specific
|
||||
**File**: `<path>:<line>`
|
||||
**Description**: <what is wrong>
|
||||
**Impact**: <what could happen>
|
||||
**Recommendation**: <how to fix>
|
||||
**Code Example**:
|
||||
\`\`\`typescript
|
||||
// Before (vulnerable)
|
||||
...
|
||||
// After (fixed)
|
||||
...
|
||||
\`\`\`
|
||||
|
||||
## CASL Coverage Matrix
|
||||
|
||||
| Module | Controller | Guard? | Policies? | Level |
|
||||
| ------ | --------------- | ------ | --------- | ------------ |
|
||||
| auth | AuthController | ✅ | ✅ | N/A (public) |
|
||||
| users | UsersController | ✅ | ✅ | L1-L4 |
|
||||
| ... | ... | ... | ... | ... |
|
||||
|
||||
## Recommendations Priority
|
||||
|
||||
1. <Critical fix 1>
|
||||
2. <Critical fix 2>
|
||||
...
|
||||
```
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Read-Only**: This skill only reads and reports. Never modify code.
|
||||
- **Evidence-Based**: Every finding must include the exact file path and line number.
|
||||
- **No False Confidence**: If a check is inconclusive, mark it as "⚠️ Needs Manual Review" rather than passing.
|
||||
- **LCBP3-Specific**: Prioritize project-specific rules (idempotency, ClamAV, Redlock) over generic checks.
|
||||
- **Frontend Too**: If scope includes frontend, also check for XSS in React components, unescaped user data, and exposed API keys.
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,276 @@
|
||||
---
|
||||
name: speckit-specify
|
||||
description: Create or update the feature specification from a natural language feature description.
|
||||
version: 1.8.9
|
||||
handoffs:
|
||||
- label: Build Technical Plan
|
||||
agent: speckit-plan
|
||||
prompt: Create a plan for the spec. I am building with...
|
||||
- label: Clarify Spec Requirements
|
||||
agent: speckit-clarify
|
||||
prompt: Clarify specification requirements
|
||||
send: true
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Domain Scribe**. Your role is to translate natural language feature descriptions into highly structured, high-quality feature specifications (`spec.md`). You ensure clarity, testability, and alignment with the project's success criteria.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
The text the user typed after `/speckit-specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `{{args}}` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
||||
|
||||
Given that feature description, do this:
|
||||
|
||||
1. **Generate a concise short name** (2-4 words) for the branch:
|
||||
- Analyze the feature description and extract the most meaningful keywords
|
||||
- Create a 2-4 word short name that captures the essence of the feature
|
||||
- Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
|
||||
- Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
|
||||
- Keep it concise but descriptive enough to understand the feature at a glance
|
||||
- Examples:
|
||||
- "I want to add user authentication" → "user-auth"
|
||||
- "Implement OAuth2 integration for the API" → "oauth2-api-integration"
|
||||
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||
|
||||
2. **Check for existing branches before creating new one**:
|
||||
|
||||
a. First, fetch all remote branches to ensure we have the latest information:
|
||||
|
||||
```bash
|
||||
git fetch --all --prune
|
||||
```
|
||||
|
||||
b. Find the highest feature number across all sources for the short-name:
|
||||
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
|
||||
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
|
||||
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
|
||||
|
||||
c. Determine the next available number:
|
||||
- Extract all numbers from all three sources
|
||||
- Find the highest number N
|
||||
- Use N+1 for the new branch number
|
||||
|
||||
d. Run the script `../scripts/bash/create-new-feature.sh --json "{{args}}"` with the calculated number and short-name:
|
||||
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||
- Bash example: `.agents/scripts/bash/create-new-feature.sh --json "{{args}}" --number 5 --short-name "user-auth" "Add user authentication"`
|
||||
- PowerShell example: `.agents/scripts/powershell/create-new-feature.ps1 -Json -Args '{{args}}' -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||
|
||||
**IMPORTANT**:
|
||||
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||
- Only match branches/directories with the exact short-name pattern
|
||||
- If no existing branches/directories found with this short-name, start with number 1
|
||||
- You must only ever run this script once per feature
|
||||
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
|
||||
- The JSON output will contain BRANCH_NAME and SPEC_FILE paths
|
||||
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot")
|
||||
|
||||
3. Load `templates/spec-template.md` to understand required sections.
|
||||
|
||||
4. Follow this execution flow:
|
||||
1. Parse user description from Input
|
||||
If empty: ERROR "No feature description provided"
|
||||
2. Extract key concepts from description
|
||||
Identify: actors, actions, data, constraints
|
||||
3. For unclear aspects:
|
||||
- Make informed guesses based on context and industry standards
|
||||
- Only mark with [NEEDS CLARIFICATION: specific question] if:
|
||||
- The choice significantly impacts feature scope or user experience
|
||||
- Multiple reasonable interpretations exist with different implications
|
||||
- No reasonable default exists
|
||||
- **LIMIT: Maximum 3 [NEEDS CLARIFICATION] markers total**
|
||||
- Prioritize clarifications by impact: scope > security/privacy > user experience > technical details
|
||||
4. Fill User Scenarios & Testing section
|
||||
If no clear user flow: ERROR "Cannot determine user scenarios"
|
||||
5. Generate Functional Requirements
|
||||
Each requirement must be testable
|
||||
Use reasonable defaults for unspecified details (document assumptions in Assumptions section)
|
||||
6. Define Success Criteria
|
||||
Create measurable, technology-agnostic outcomes
|
||||
Include both quantitative metrics (time, performance, volume) and qualitative measures (user satisfaction, task completion)
|
||||
Each criterion must be verifiable without implementation details
|
||||
7. Identify Key Entities (if data involved)
|
||||
8. Return: SUCCESS (spec ready for planning)
|
||||
|
||||
5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
|
||||
|
||||
6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
|
||||
|
||||
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
|
||||
|
||||
```markdown
|
||||
# Specification Quality Checklist: [FEATURE NAME]
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: [DATE]
|
||||
**Feature**: [Link to spec.md]
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [ ] No implementation details (languages, frameworks, APIs)
|
||||
- [ ] Focused on user value and business needs
|
||||
- [ ] Written for non-technical stakeholders
|
||||
- [ ] All mandatory sections completed
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [ ] No [NEEDS CLARIFICATION] markers remain
|
||||
- [ ] Requirements are testable and unambiguous
|
||||
- [ ] Success criteria are measurable
|
||||
- [ ] Success criteria are technology-agnostic (no implementation details)
|
||||
- [ ] All acceptance scenarios are defined
|
||||
- [ ] Edge cases are identified
|
||||
- [ ] Scope is clearly bounded
|
||||
- [ ] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [ ] All functional requirements have clear acceptance criteria
|
||||
- [ ] User scenarios cover primary flows
|
||||
- [ ] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [ ] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- Items marked incomplete require spec updates before `/speckit-clarify` or `/speckit-plan`
|
||||
```
|
||||
|
||||
b. **Run Validation Check**: Review the spec against each checklist item:
|
||||
- For each item, determine if it passes or fails
|
||||
- Document specific issues found (quote relevant spec sections)
|
||||
|
||||
c. **Handle Validation Results**:
|
||||
- **If all items pass**: Mark checklist complete and proceed to step 6
|
||||
|
||||
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||
1. List the failing items and specific issues
|
||||
2. Update the spec to address each issue
|
||||
3. Re-run validation until all items pass (max 3 iterations)
|
||||
4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user
|
||||
|
||||
- **If [NEEDS CLARIFICATION] markers remain**:
|
||||
1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec
|
||||
2. **LIMIT CHECK**: If more than 3 markers exist, keep only the 3 most critical (by scope/security/UX impact) and make informed guesses for the rest
|
||||
3. For each clarification needed (max 3), present options to user in this format:
|
||||
|
||||
```markdown
|
||||
## Question [N]: [Topic]
|
||||
|
||||
**Context**: [Quote relevant spec section]
|
||||
|
||||
**What we need to know**: [Specific question from NEEDS CLARIFICATION marker]
|
||||
|
||||
**Suggested Answers**:
|
||||
|
||||
| Option | Answer | Implications |
|
||||
| ------ | ------------------------- | ------------------------------------- |
|
||||
| A | [First suggested answer] | [What this means for the feature] |
|
||||
| B | [Second suggested answer] | [What this means for the feature] |
|
||||
| C | [Third suggested answer] | [What this means for the feature] |
|
||||
| Custom | Provide your own answer | [Explain how to provide custom input] |
|
||||
|
||||
**Your choice**: _[Wait for user response]_
|
||||
```
|
||||
|
||||
4. **CRITICAL - Table Formatting**: Ensure markdown tables are properly formatted:
|
||||
- Use consistent spacing with pipes aligned
|
||||
- Each cell should have spaces around content: `| Content |` not `|Content|`
|
||||
- Header separator must have at least 3 dashes: `|--------|`
|
||||
- Test that the table renders correctly in markdown preview
|
||||
5. Number questions sequentially (Q1, Q2, Q3 - max 3 total)
|
||||
6. Present all questions together before waiting for responses
|
||||
7. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B")
|
||||
8. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer
|
||||
9. Re-run validation after all clarifications are resolved
|
||||
|
||||
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
|
||||
|
||||
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit-clarify` or `/speckit-plan`).
|
||||
|
||||
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||
|
||||
## General Guidelines
|
||||
|
||||
## Quick Guidelines
|
||||
|
||||
- Focus on **WHAT** users need and **WHY**.
|
||||
- Avoid HOW to implement (no tech stack, APIs, code structure).
|
||||
- Written for business stakeholders, not developers.
|
||||
- DO NOT create any checklists that are embedded in the spec. That will be a separate command.
|
||||
|
||||
### Section Requirements
|
||||
|
||||
- **Mandatory sections**: Must be completed for every feature
|
||||
- **Optional sections**: Include only when relevant to the feature
|
||||
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
|
||||
|
||||
### For AI Generation
|
||||
|
||||
When creating this spec from a user prompt:
|
||||
|
||||
1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps
|
||||
2. **Document assumptions**: Record reasonable defaults in the Assumptions section
|
||||
3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that:
|
||||
- Significantly impact feature scope or user experience
|
||||
- Have multiple reasonable interpretations with different implications
|
||||
- Lack any reasonable default
|
||||
4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details
|
||||
5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
|
||||
6. **Common areas needing clarification** (only if no reasonable default exists):
|
||||
- Feature scope and boundaries (include/exclude specific use cases)
|
||||
- User types and permissions (if multiple conflicting interpretations possible)
|
||||
- Security/compliance requirements (when legally/financially significant)
|
||||
|
||||
**Examples of reasonable defaults** (don't ask about these):
|
||||
|
||||
- Data retention: Industry-standard practices for the domain
|
||||
- Performance targets: Standard web/mobile app expectations unless specified
|
||||
- Error handling: User-friendly messages with appropriate fallbacks
|
||||
- Authentication method: Standard session-based or OAuth2 for web apps
|
||||
- Integration patterns: RESTful APIs unless specified otherwise
|
||||
|
||||
### Success Criteria Guidelines
|
||||
|
||||
Success criteria must be:
|
||||
|
||||
1. **Measurable**: Include specific metrics (time, percentage, count, rate)
|
||||
2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools
|
||||
3. **User-focused**: Describe outcomes from user/business perspective, not system internals
|
||||
4. **Verifiable**: Can be tested/validated without knowing implementation details
|
||||
|
||||
**Good examples**:
|
||||
|
||||
- "Users can complete checkout in under 3 minutes"
|
||||
- "System supports 10,000 concurrent users"
|
||||
- "95% of searches return results in under 1 second"
|
||||
- "Task completion rate improves by 40%"
|
||||
|
||||
**Bad examples** (implementation-focused):
|
||||
|
||||
- "API response time is under 200ms" (too technical, use "Users see results instantly")
|
||||
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
||||
- "React components render efficiently" (framework-specific)
|
||||
- "Redis cache hit rate above 80%" (technology-specific)
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../\_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,115 @@
|
||||
# Feature Specification: [FEATURE NAME]
|
||||
|
||||
**Feature Branch**: `[###-feature-name]`
|
||||
**Created**: [DATE]
|
||||
**Status**: Draft
|
||||
**Input**: User description: "$ARGUMENTS"
|
||||
|
||||
## User Scenarios & Testing _(mandatory)_
|
||||
|
||||
<!--
|
||||
IMPORTANT: User stories should be PRIORITIZED as user journeys ordered by importance.
|
||||
Each user story/journey must be INDEPENDENTLY TESTABLE - meaning if you implement just ONE of them,
|
||||
you should still have a viable MVP (Minimum Viable Product) that delivers value.
|
||||
|
||||
Assign priorities (P1, P2, P3, etc.) to each story, where P1 is the most critical.
|
||||
Think of each story as a standalone slice of functionality that can be:
|
||||
- Developed independently
|
||||
- Tested independently
|
||||
- Deployed independently
|
||||
- Demonstrated to users independently
|
||||
-->
|
||||
|
||||
### User Story 1 - [Brief Title] (Priority: P1)
|
||||
|
||||
[Describe this user journey in plain language]
|
||||
|
||||
**Why this priority**: [Explain the value and why it has this priority level]
|
||||
|
||||
**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"]
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||
2. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 - [Brief Title] (Priority: P2)
|
||||
|
||||
[Describe this user journey in plain language]
|
||||
|
||||
**Why this priority**: [Explain the value and why it has this priority level]
|
||||
|
||||
**Independent Test**: [Describe how this can be tested independently]
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 - [Brief Title] (Priority: P3)
|
||||
|
||||
[Describe this user journey in plain language]
|
||||
|
||||
**Why this priority**: [Explain the value and why it has this priority level]
|
||||
|
||||
**Independent Test**: [Describe how this can be tested independently]
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
|
||||
|
||||
---
|
||||
|
||||
[Add more user stories as needed, each with an assigned priority]
|
||||
|
||||
### Edge Cases
|
||||
|
||||
<!--
|
||||
ACTION REQUIRED: The content in this section represents placeholders.
|
||||
Fill them out with the right edge cases.
|
||||
-->
|
||||
|
||||
- What happens when [boundary condition]?
|
||||
- How does system handle [error scenario]?
|
||||
|
||||
## Requirements _(mandatory)_
|
||||
|
||||
<!--
|
||||
ACTION REQUIRED: The content in this section represents placeholders.
|
||||
Fill them out with the right functional requirements.
|
||||
-->
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"]
|
||||
- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"]
|
||||
- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"]
|
||||
- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"]
|
||||
- **FR-005**: System MUST [behavior, e.g., "log all security events"]
|
||||
|
||||
_Example of marking unclear requirements:_
|
||||
|
||||
- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?]
|
||||
- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified]
|
||||
|
||||
### Key Entities _(include if feature involves data)_
|
||||
|
||||
- **[Entity 1]**: [What it represents, key attributes without implementation]
|
||||
- **[Entity 2]**: [What it represents, relationships to other entities]
|
||||
|
||||
## Success Criteria _(mandatory)_
|
||||
|
||||
<!--
|
||||
ACTION REQUIRED: Define measurable success criteria.
|
||||
These must be technology-agnostic and measurable.
|
||||
-->
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"]
|
||||
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
|
||||
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
|
||||
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
|
||||
@@ -0,0 +1,123 @@
|
||||
---
|
||||
name: speckit-status
|
||||
description: Display a dashboard showing feature status, completion percentage, and blockers.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Status Reporter**. Your role is to provide clear, actionable status updates on project progress.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Generate a dashboard view of all features and their completion status.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Discover Features**:
|
||||
|
||||
```bash
|
||||
# Find all feature directories
|
||||
find .specify/features -maxdepth 1 -type d 2>/dev/null || echo "No features found"
|
||||
```
|
||||
|
||||
2. **For Each Feature, Gather Metrics**:
|
||||
|
||||
| Artifact | Check | Metric |
|
||||
| ---------------- | ------------------ | -------------------------- |
|
||||
| spec.md | Exists? | Has [NEEDS CLARIFICATION]? |
|
||||
| plan.md | Exists? | All sections complete? |
|
||||
| tasks.md | Exists? | Count [x] vs [ ] vs [/] |
|
||||
| checklists/\*.md | All items checked? | Checklist completion % |
|
||||
|
||||
3. **Calculate Completion**:
|
||||
|
||||
```
|
||||
Phase 1 (Specify): spec.md exists & no clarifications needed
|
||||
Phase 2 (Plan): plan.md exists & complete
|
||||
Phase 3 (Tasks): tasks.md exists
|
||||
Phase 4 (Implement): tasks.md completion %
|
||||
Phase 5 (Validate): validation-report.md exists with PASS
|
||||
```
|
||||
|
||||
4. **Identify Blockers**:
|
||||
- [NEEDS CLARIFICATION] markers
|
||||
- [ ] tasks with no progress
|
||||
- Failed checklist items
|
||||
- Missing dependencies
|
||||
|
||||
5. **Generate Dashboard**:
|
||||
|
||||
```markdown
|
||||
# Speckit Status Dashboard
|
||||
|
||||
**Generated**: [timestamp]
|
||||
**Total Features**: X
|
||||
|
||||
## Overview
|
||||
|
||||
| Feature | Phase | Progress | Blockers | Next Action |
|
||||
| ------------ | --------- | -------- | -------- | ------------------------ |
|
||||
| auth-system | Implement | 75% | 0 | Complete remaining tasks |
|
||||
| payment-flow | Plan | 40% | 2 | Resolve clarifications |
|
||||
|
||||
## Feature Details
|
||||
|
||||
### [Feature Name]
|
||||
```
|
||||
|
||||
Spec: ████████░░ 80%
|
||||
Plan: ██████████ 100%
|
||||
Tasks: ██████░░░░ 60%
|
||||
|
||||
```
|
||||
|
||||
**Blockers**:
|
||||
- [ ] Clarification needed: "What payment providers?"
|
||||
|
||||
**Recent Activity**:
|
||||
- Last modified: [date]
|
||||
- Files changed: [list]
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
- Features Ready for Implementation: X
|
||||
- Features Blocked: Y
|
||||
- Overall Project Completion: Z%
|
||||
```
|
||||
|
||||
6. **Output**:
|
||||
- Display in terminal
|
||||
- Optionally write to `.specify/STATUS.md`
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Be Current**: Always read latest file state
|
||||
- **Be Visual**: Use progress bars and tables
|
||||
- **Be Actionable**: Every status should have a "next action"
|
||||
- **Be Fast**: Cache nothing, always recalculate
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,159 @@
|
||||
---
|
||||
name: speckit-tasks
|
||||
description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-plan
|
||||
handoffs:
|
||||
- label: Analyze For Consistency
|
||||
agent: speckit-analyze
|
||||
prompt: Run a project analysis for consistency
|
||||
send: true
|
||||
- label: Implement Project
|
||||
agent: speckit-implement
|
||||
prompt: Start the implementation in phases
|
||||
send: true
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Execution Strategist**. Your role is to deconstruct complex technical plans into atomic, dependency-ordered tasks. You organize work into user-story-driven phases to ensure incremental delivery and high observability.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
1. **Setup**: Run `../scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Load design documents**: Read from FEATURE_DIR:
|
||||
- **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
|
||||
- **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
|
||||
- Note: Not all projects have all documents. Generate tasks based on what's available.
|
||||
|
||||
3. **Execute task generation workflow**:
|
||||
- Load plan.md and extract tech stack, libraries, project structure
|
||||
- Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
|
||||
- If data-model.md exists: Extract entities and map to user stories
|
||||
- If contracts/ exists: Map endpoints to user stories
|
||||
- If research.md exists: Extract decisions for setup tasks
|
||||
- Generate tasks organized by user story (see Task Generation Rules below)
|
||||
- Generate dependency graph showing user story completion order
|
||||
- Create parallel execution examples per user story
|
||||
- Validate task completeness (each user story has all needed tasks, independently testable)
|
||||
|
||||
4. **Generate tasks.md**: Use `templates/tasks-template.md` as structure, fill with:
|
||||
- Correct feature name from plan.md
|
||||
- Phase 1: Setup tasks (project initialization)
|
||||
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
|
||||
- Phase 3+: One phase per user story (in priority order from spec.md)
|
||||
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
|
||||
- Final Phase: Polish & cross-cutting concerns
|
||||
- All tasks must follow the strict checklist format (see Task Generation Rules below)
|
||||
- Clear file paths for each task
|
||||
- Dependencies section showing story completion order
|
||||
- Parallel execution examples per story
|
||||
- Implementation strategy section (MVP first, incremental delivery)
|
||||
|
||||
5. **Report**: Output path to generated tasks.md and summary:
|
||||
- Total task count
|
||||
- Task count per user story
|
||||
- Parallel opportunities identified
|
||||
- Independent test criteria for each story
|
||||
- Suggested MVP scope (typically just User Story 1)
|
||||
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
|
||||
|
||||
Context for task generation: {{args}}
|
||||
|
||||
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
|
||||
|
||||
## Task Generation Rules
|
||||
|
||||
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
|
||||
|
||||
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
|
||||
|
||||
### Checklist Format (REQUIRED)
|
||||
|
||||
Every task MUST strictly follow this format:
|
||||
|
||||
```text
|
||||
- [ ] [TaskID] [P?] [Story?] Description with file path
|
||||
```
|
||||
|
||||
**Format Components**:
|
||||
|
||||
1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
|
||||
2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
|
||||
3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
|
||||
4. **[Story] label**: REQUIRED for user story phase tasks only
|
||||
- Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
|
||||
- Setup phase: NO story label
|
||||
- Foundational phase: NO story label
|
||||
- User Story phases: MUST have story label
|
||||
- Polish phase: NO story label
|
||||
5. **Description**: Clear action with exact file path
|
||||
|
||||
**Examples**:
|
||||
|
||||
- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
|
||||
- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
|
||||
- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
|
||||
- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
|
||||
- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
|
||||
- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
|
||||
- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
|
||||
- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
|
||||
|
||||
### Task Organization
|
||||
|
||||
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
|
||||
- Each user story (P1, P2, P3...) gets its own phase
|
||||
- Map all related components to their story:
|
||||
- Models needed for that story
|
||||
- Services needed for that story
|
||||
- Endpoints/UI needed for that story
|
||||
- If tests requested: Tests specific to that story
|
||||
- Mark story dependencies (most stories should be independent)
|
||||
|
||||
2. **From Contracts**:
|
||||
- Map each contract/endpoint → to the user story it serves
|
||||
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase
|
||||
|
||||
3. **From Data Model**:
|
||||
- Map each entity to the user story(ies) that need it
|
||||
- If entity serves multiple stories: Put in earliest story or Setup phase
|
||||
- Relationships → service layer tasks in appropriate story phase
|
||||
|
||||
4. **From Setup/Infrastructure**:
|
||||
- Shared infrastructure → Setup phase (Phase 1)
|
||||
- Foundational/blocking tasks → Foundational phase (Phase 2)
|
||||
- Story-specific setup → within that story's phase
|
||||
|
||||
### Phase Structure
|
||||
|
||||
- **Phase 1**: Setup (project initialization)
|
||||
- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
|
||||
- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
|
||||
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
||||
- Each phase should be a complete, independently testable increment
|
||||
- **Final Phase**: Polish & Cross-Cutting Concerns
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,250 @@
|
||||
---
|
||||
description: 'Task list template for feature implementation'
|
||||
---
|
||||
|
||||
# Tasks: [FEATURE NAME]
|
||||
|
||||
**Input**: Design documents from `/specs/[###-feature-name]/`
|
||||
**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/
|
||||
|
||||
**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
|
||||
|
||||
**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
|
||||
|
||||
## Format: `[ID] [P?] [Story] Description`
|
||||
|
||||
- **[P]**: Can run in parallel (different files, no dependencies)
|
||||
- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
|
||||
- Include exact file paths in descriptions
|
||||
|
||||
## Path Conventions
|
||||
|
||||
- **Single project**: `src/`, `tests/` at repository root
|
||||
- **Web app**: `backend/src/`, `frontend/src/`
|
||||
- **Mobile**: `api/src/`, `ios/src/` or `android/src/`
|
||||
- Paths shown below assume single project - adjust based on plan.md structure
|
||||
|
||||
<!--
|
||||
============================================================================
|
||||
IMPORTANT: The tasks below are SAMPLE TASKS for illustration purposes only.
|
||||
|
||||
The /speckit-tasks command MUST replace these with actual tasks based on:
|
||||
- User stories from spec.md (with their priorities P1, P2, P3...)
|
||||
- Feature requirements from plan.md
|
||||
- Entities from data-model.md
|
||||
- Endpoints from contracts/
|
||||
|
||||
Tasks MUST be organized by user story so each story can be:
|
||||
- Implemented independently
|
||||
- Tested independently
|
||||
- Delivered as an MVP increment
|
||||
|
||||
DO NOT keep these sample tasks in the generated tasks.md file.
|
||||
============================================================================
|
||||
-->
|
||||
|
||||
## Phase 1: Setup (Shared Infrastructure)
|
||||
|
||||
**Purpose**: Project initialization and basic structure
|
||||
|
||||
- [ ] T001 Create project structure per implementation plan
|
||||
- [ ] T002 Initialize [language] project with [framework] dependencies
|
||||
- [ ] T003 [P] Configure linting and formatting tools
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Foundational (Blocking Prerequisites)
|
||||
|
||||
**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented
|
||||
|
||||
**⚠️ CRITICAL**: No user story work can begin until this phase is complete
|
||||
|
||||
Examples of foundational tasks (adjust based on your project):
|
||||
|
||||
- [ ] T004 Setup database schema and migrations framework
|
||||
- [ ] T005 [P] Implement authentication/authorization framework
|
||||
- [ ] T006 [P] Setup API routing and middleware structure
|
||||
- [ ] T007 Create base models/entities that all stories depend on
|
||||
- [ ] T008 Configure error handling and logging infrastructure
|
||||
- [ ] T009 Setup environment configuration management
|
||||
|
||||
**Checkpoint**: Foundation ready - user story implementation can now begin in parallel
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: [Brief description of what this story delivers]
|
||||
|
||||
**Independent Test**: [How to verify this story works on its own]
|
||||
|
||||
### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️
|
||||
|
||||
> **NOTE: Write these tests FIRST, ensure they FAIL before implementation**
|
||||
|
||||
- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test\_[name].py
|
||||
- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test\_[name].py
|
||||
|
||||
### Implementation for User Story 1
|
||||
|
||||
- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py
|
||||
- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py
|
||||
- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013)
|
||||
- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py
|
||||
- [ ] T016 [US1] Add validation and error handling
|
||||
- [ ] T017 [US1] Add logging for user story 1 operations
|
||||
|
||||
**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 2 - [Title] (Priority: P2)
|
||||
|
||||
**Goal**: [Brief description of what this story delivers]
|
||||
|
||||
**Independent Test**: [How to verify this story works on its own]
|
||||
|
||||
### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️
|
||||
|
||||
- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test\_[name].py
|
||||
- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test\_[name].py
|
||||
|
||||
### Implementation for User Story 2
|
||||
|
||||
- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py
|
||||
- [ ] T021 [US2] Implement [Service] in src/services/[service].py
|
||||
- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py
|
||||
- [ ] T023 [US2] Integrate with User Story 1 components (if needed)
|
||||
|
||||
**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: User Story 3 - [Title] (Priority: P3)
|
||||
|
||||
**Goal**: [Brief description of what this story delivers]
|
||||
|
||||
**Independent Test**: [How to verify this story works on its own]
|
||||
|
||||
### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️
|
||||
|
||||
- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test\_[name].py
|
||||
- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test\_[name].py
|
||||
|
||||
### Implementation for User Story 3
|
||||
|
||||
- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py
|
||||
- [ ] T027 [US3] Implement [Service] in src/services/[service].py
|
||||
- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py
|
||||
|
||||
**Checkpoint**: All user stories should now be independently functional
|
||||
|
||||
---
|
||||
|
||||
[Add more user story phases as needed, following the same pattern]
|
||||
|
||||
---
|
||||
|
||||
## Phase N: Polish & Cross-Cutting Concerns
|
||||
|
||||
**Purpose**: Improvements that affect multiple user stories
|
||||
|
||||
- [ ] TXXX [P] Documentation updates in docs/
|
||||
- [ ] TXXX Code cleanup and refactoring
|
||||
- [ ] TXXX Performance optimization across all stories
|
||||
- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/
|
||||
- [ ] TXXX Security hardening
|
||||
- [ ] TXXX Run quickstart.md validation
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Execution Order
|
||||
|
||||
### Phase Dependencies
|
||||
|
||||
- **Setup (Phase 1)**: No dependencies - can start immediately
|
||||
- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories
|
||||
- **User Stories (Phase 3+)**: All depend on Foundational phase completion
|
||||
- User stories can then proceed in parallel (if staffed)
|
||||
- Or sequentially in priority order (P1 → P2 → P3)
|
||||
- **Polish (Final Phase)**: Depends on all desired user stories being complete
|
||||
|
||||
### User Story Dependencies
|
||||
|
||||
- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories
|
||||
- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable
|
||||
- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable
|
||||
|
||||
### Within Each User Story
|
||||
|
||||
- Tests (if included) MUST be written and FAIL before implementation
|
||||
- Models before services
|
||||
- Services before endpoints
|
||||
- Core implementation before integration
|
||||
- Story complete before moving to next priority
|
||||
|
||||
### Parallel Opportunities
|
||||
|
||||
- All Setup tasks marked [P] can run in parallel
|
||||
- All Foundational tasks marked [P] can run in parallel (within Phase 2)
|
||||
- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows)
|
||||
- All tests for a user story marked [P] can run in parallel
|
||||
- Models within a story marked [P] can run in parallel
|
||||
- Different user stories can be worked on in parallel by different team members
|
||||
|
||||
---
|
||||
|
||||
## Parallel Example: User Story 1
|
||||
|
||||
```bash
|
||||
# Launch all tests for User Story 1 together (if tests requested):
|
||||
Task: "Contract test for [endpoint] in tests/contract/test_[name].py"
|
||||
Task: "Integration test for [user journey] in tests/integration/test_[name].py"
|
||||
|
||||
# Launch all models for User Story 1 together:
|
||||
Task: "Create [Entity1] model in src/models/[entity1].py"
|
||||
Task: "Create [Entity2] model in src/models/[entity2].py"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### MVP First (User Story 1 Only)
|
||||
|
||||
1. Complete Phase 1: Setup
|
||||
2. Complete Phase 2: Foundational (CRITICAL - blocks all stories)
|
||||
3. Complete Phase 3: User Story 1
|
||||
4. **STOP and VALIDATE**: Test User Story 1 independently
|
||||
5. Deploy/demo if ready
|
||||
|
||||
### Incremental Delivery
|
||||
|
||||
1. Complete Setup + Foundational → Foundation ready
|
||||
2. Add User Story 1 → Test independently → Deploy/Demo (MVP!)
|
||||
3. Add User Story 2 → Test independently → Deploy/Demo
|
||||
4. Add User Story 3 → Test independently → Deploy/Demo
|
||||
5. Each story adds value without breaking previous stories
|
||||
|
||||
### Parallel Team Strategy
|
||||
|
||||
With multiple developers:
|
||||
|
||||
1. Team completes Setup + Foundational together
|
||||
2. Once Foundational is done:
|
||||
- Developer A: User Story 1
|
||||
- Developer B: User Story 2
|
||||
- Developer C: User Story 3
|
||||
3. Stories complete and integrate independently
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- [P] tasks = different files, no dependencies
|
||||
- [Story] label maps task to specific user story for traceability
|
||||
- Each user story should be independently completable and testable
|
||||
- Verify tests fail before implementing
|
||||
- Commit after each task or logical group
|
||||
- Stop at any checkpoint to validate story independently
|
||||
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
|
||||
@@ -0,0 +1,218 @@
|
||||
---
|
||||
name: speckit-taskstoissues
|
||||
description: Convert existing tasks into actionable, dependency-ordered issues for the feature based on available design artifacts.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-tasks
|
||||
tools: ['github/github-mcp-server/issue_write']
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Tracker Integrator**. Your role is to synchronize technical tasks with external project management systems (GitHub Issues or Gitea Issues). You ensure that every piece of work has a clear, tracked identity for collaborative execution.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Convert all tasks from `tasks.md` into well-structured issues on the appropriate platform (GitHub or Gitea), preserving dependency order, phase grouping, and labels.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Load Task Data**:
|
||||
Run `../scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute.
|
||||
For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||
|
||||
2. **Extract tasks path** from the executed script output.
|
||||
|
||||
3. **Detect Platform** — Get the Git remote and determine the platform:
|
||||
|
||||
```bash
|
||||
git config --get remote.origin.url
|
||||
```
|
||||
|
||||
| Remote URL Pattern | Platform | API |
|
||||
| ---------------------------------------- | ----------- | --------------------------- |
|
||||
| `github.com` | GitHub | GitHub MCP or REST API |
|
||||
| `gitea.*`, custom domain with `/api/v1/` | Gitea | Gitea REST API |
|
||||
| Other | Unsupported | **STOP** with error message |
|
||||
|
||||
**Platform Detection Rules**:
|
||||
- If URL contains `github.com` → GitHub
|
||||
- If URL contains a known Gitea domain (check `$ARGUMENTS` for hints, or try `<host>/api/v1/version`) → Gitea
|
||||
- If `$ARGUMENTS` explicitly specifies platform (e.g., `--platform gitea`) → use that
|
||||
- If uncertain → **ASK** the user which platform to use
|
||||
|
||||
> **UNDER NO CIRCUMSTANCES EVER CREATE ISSUES IN REPOSITORIES THAT DO NOT MATCH THE REMOTE URL**
|
||||
|
||||
4. **Parse `tasks.md`** — Extract structured data for each task:
|
||||
|
||||
| Field | Source | Example |
|
||||
| --------------- | ---------------------------- | -------------------------- |
|
||||
| Task ID | `T001`, `T002`, etc. | `T001` |
|
||||
| Phase | Phase heading | `Phase 1: Setup` |
|
||||
| Description | Task text after ID | `Create project structure` |
|
||||
| File paths | Paths in description | `src/models/user.py` |
|
||||
| Parallel marker | `[P]` flag | `true`/`false` |
|
||||
| User Story | `[US1]`, `[US2]`, etc. | `US1` |
|
||||
| Dependencies | Sequential ordering in phase | `T001 → T002` |
|
||||
|
||||
5. **Load Feature Context** (for issue body enrichment):
|
||||
- Read `spec.md` for requirement references
|
||||
- Read `plan.md` for architecture context (if exists)
|
||||
- Map tasks to requirements where possible
|
||||
|
||||
6. **Generate Issue Data** — For each task, create an issue with:
|
||||
|
||||
### Issue Title Format
|
||||
|
||||
```
|
||||
[<TaskID>] <Description>
|
||||
```
|
||||
|
||||
Example: `[T001] Create project structure per implementation plan`
|
||||
|
||||
### Issue Body Template
|
||||
|
||||
```markdown
|
||||
## Task Details
|
||||
|
||||
**Task ID**: <TaskID>
|
||||
**Phase**: <Phase Name>
|
||||
**Parallel**: <Yes/No>
|
||||
**User Story**: <Story reference, if any>
|
||||
|
||||
## Description
|
||||
|
||||
<Full task description from tasks.md>
|
||||
|
||||
## File Paths
|
||||
|
||||
- `<file path 1>`
|
||||
- `<file path 2>`
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Implementation complete per task description
|
||||
- [ ] Relevant tests pass (if applicable)
|
||||
- [ ] No regressions introduced
|
||||
|
||||
## Context
|
||||
|
||||
**Feature**: <Feature name from spec.md>
|
||||
**Spec Reference**: <Requirement ID if mapped>
|
||||
|
||||
---
|
||||
|
||||
_Auto-generated by speckit-taskstoissues from `tasks.md`_
|
||||
```
|
||||
|
||||
7. **Apply Labels** — Assign labels based on task metadata:
|
||||
|
||||
| Condition | Label |
|
||||
| ---------------------------------- | ------------------ |
|
||||
| Phase 1 (Setup) | `phase:setup` |
|
||||
| Phase 2 (Foundation) | `phase:foundation` |
|
||||
| Phase 3+ (User Stories) | `phase:story` |
|
||||
| Final Phase (Polish) | `phase:polish` |
|
||||
| Has `[P]` marker | `parallel` |
|
||||
| Has `[US1]` marker | `story:US1` |
|
||||
| Task creates test files | `type:test` |
|
||||
| Task creates models/entities | `type:model` |
|
||||
| Task creates services | `type:service` |
|
||||
| Task creates controllers/endpoints | `type:api` |
|
||||
| Task creates UI components | `type:ui` |
|
||||
|
||||
**Label Creation**: If labels don't exist on the repo, create them first before assigning.
|
||||
|
||||
8. **Set Milestone** (optional):
|
||||
- If `$ARGUMENTS` includes `--milestone "<name>"`, assign all issues to that milestone
|
||||
- If milestone doesn't exist, create it with the feature name as the title
|
||||
|
||||
9. **Create Issues** — Execute in dependency order:
|
||||
|
||||
**For GitHub**: Use the GitHub MCP server tool `issue_write` to create issues.
|
||||
|
||||
**For Gitea**: Use the Gitea REST API:
|
||||
|
||||
```bash
|
||||
# Create issue
|
||||
curl -s -X POST "https://<gitea-host>/api/v1/repos/<owner>/<repo>/issues" \
|
||||
-H "Authorization: token <GITEA_TOKEN>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"title": "[T001] Create project structure",
|
||||
"body": "<issue body>",
|
||||
"labels": [<label_ids>]
|
||||
}'
|
||||
```
|
||||
|
||||
**Authentication**:
|
||||
- GitHub: Uses MCP server (pre-authenticated)
|
||||
- Gitea: Requires `GITEA_TOKEN` environment variable. If not set, **STOP** and ask user to provide it.
|
||||
|
||||
**Rate Limiting**:
|
||||
- Create issues sequentially with a 500ms delay between requests
|
||||
- If rate limited (HTTP 429), wait and retry with exponential backoff
|
||||
|
||||
10. **Track Created Issues** — Maintain a mapping of `TaskID → IssueNumber`:
|
||||
|
||||
```markdown
|
||||
| Task ID | Issue # | Title | URL |
|
||||
| ------- | ------- | ----------------------------- | ----- |
|
||||
| T001 | #42 | Create project structure | <url> |
|
||||
| T002 | #43 | Configure database connection | <url> |
|
||||
```
|
||||
|
||||
11. **Update `tasks.md`** (optional — ask user first):
|
||||
- Append issue references to each task line:
|
||||
```
|
||||
- [ ] T001 Create project structure (#42)
|
||||
```
|
||||
|
||||
12. **Report Completion**:
|
||||
- Total issues created
|
||||
- Issues by phase
|
||||
- Issues by label
|
||||
- Any failures (with retry suggestions)
|
||||
- Link to issue board/project
|
||||
- Mapping table (Task ID → Issue #)
|
||||
|
||||
## Arguments
|
||||
|
||||
| Argument | Description | Default |
|
||||
| ---------------------------- | --------------------------------------- | ------------- |
|
||||
| `--platform <github\|gitea>` | Force platform detection | Auto-detect |
|
||||
| `--milestone "<name>"` | Assign issues to milestone | None |
|
||||
| `--dry-run` | Preview issues without creating | `false` |
|
||||
| `--labels-only` | Only create labels, don't create issues | `false` |
|
||||
| `--update-tasks` | Auto-update tasks.md with issue refs | `false` (ask) |
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Idempotency**: Check if an issue with the same title already exists before creating duplicates
|
||||
- **Dependency Order**: Create issues in task execution order so dependencies are naturally numbered
|
||||
- **Rich Context**: Include enough context in each issue body that it can be understood standalone
|
||||
- **Label Consistency**: Use a consistent label taxonomy across all issues
|
||||
- **Platform Safety**: Never create issues on repos that don't match the git remote
|
||||
- **Dry Run Support**: Always support `--dry-run` to preview before creating
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,134 @@
|
||||
---
|
||||
name: speckit-tester
|
||||
description: Execute tests, measure coverage, and report results.
|
||||
version: 1.8.9
|
||||
depends-on: []
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Test Runner**. Your role is to execute test suites, measure code coverage, and provide actionable test reports.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Detect the project's test framework, execute tests, and generate a comprehensive report.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Detect Test Framework**:
|
||||
|
||||
```bash
|
||||
# Check package.json for test frameworks
|
||||
cat package.json 2>/dev/null | grep -E "(jest|vitest|mocha|ava|tap)"
|
||||
|
||||
# Check for Python test frameworks
|
||||
ls pytest.ini setup.cfg pyproject.toml 2>/dev/null
|
||||
|
||||
# Check for Go tests
|
||||
find . -name "*_test.go" -maxdepth 3 2>/dev/null | head -1
|
||||
```
|
||||
|
||||
| Indicator | Framework |
|
||||
| ------------------------------- | ---------- |
|
||||
| `jest` in package.json | Jest |
|
||||
| `vitest` in package.json | Vitest |
|
||||
| `pytest.ini` or `[tool.pytest]` | Pytest |
|
||||
| `*_test.go` files | Go test |
|
||||
| `Cargo.toml` + `#[test]` | Cargo test |
|
||||
|
||||
2. **Run Tests with Coverage**:
|
||||
|
||||
| Framework | Command |
|
||||
| --------- | -------------------------------------------------------------------- |
|
||||
| Jest | `npx jest --coverage --json --outputFile=coverage/test-results.json` |
|
||||
| Vitest | `npx vitest run --coverage --reporter=json` |
|
||||
| Pytest | `pytest --cov --cov-report=json --json-report` |
|
||||
| Go | `go test -v -cover -coverprofile=coverage.out ./...` |
|
||||
| Cargo | `cargo test -- --test-threads=1` |
|
||||
|
||||
3. **Parse Test Results**:
|
||||
Extract from test output:
|
||||
- Total tests
|
||||
- Passed / Failed / Skipped
|
||||
- Execution time
|
||||
- Coverage percentage (if available)
|
||||
|
||||
4. **Identify Failures**:
|
||||
For each failing test:
|
||||
- Test name and file location
|
||||
- Error message
|
||||
- Stack trace (truncated to relevant lines)
|
||||
- Suggested fix (if pattern is recognizable)
|
||||
|
||||
5. **Generate Report**:
|
||||
|
||||
```markdown
|
||||
# Test Report
|
||||
|
||||
**Date**: [timestamp]
|
||||
**Framework**: [detected]
|
||||
**Status**: PASS | FAIL
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | Value |
|
||||
| ----------- | ----- |
|
||||
| Total Tests | X |
|
||||
| Passed | X |
|
||||
| Failed | X |
|
||||
| Skipped | X |
|
||||
| Duration | X.Xs |
|
||||
| Coverage | X% |
|
||||
|
||||
## Failed Tests
|
||||
|
||||
### [test name]
|
||||
|
||||
**File**: `path/to/test.ts:42`
|
||||
**Error**: Expected X but received Y
|
||||
**Suggestion**: Check mock setup for...
|
||||
|
||||
## Coverage by File
|
||||
|
||||
| File | Lines | Branches | Functions |
|
||||
| ----------- | ----- | -------- | --------- |
|
||||
| src/auth.ts | 85% | 70% | 90% |
|
||||
|
||||
## Next Actions
|
||||
|
||||
1. Fix failing test: [name]
|
||||
2. Increase coverage in: [low coverage files]
|
||||
```
|
||||
|
||||
6. **Output**:
|
||||
- Display report in terminal
|
||||
- Optionally save to `FEATURE_DIR/test-report.md`
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Run All Tests**: Don't skip tests unless explicitly requested
|
||||
- **Preserve Output**: Keep full test output for debugging
|
||||
- **Be Helpful**: Suggest fixes for common failure patterns
|
||||
- **Respect Timeouts**: Set reasonable timeout (5 min default)
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,106 @@
|
||||
---
|
||||
name: speckit-validate
|
||||
description: Validate that implementation matches specification requirements.
|
||||
version: 1.8.9
|
||||
depends-on:
|
||||
- speckit-implement
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Role
|
||||
|
||||
You are the **Antigravity Validator**. Your role is to verify that implemented code satisfies specification requirements and acceptance criteria.
|
||||
|
||||
## Task
|
||||
|
||||
### Outline
|
||||
|
||||
Post-implementation validation that compares code against spec requirements.
|
||||
|
||||
### Execution Steps
|
||||
|
||||
1. **Setup**:
|
||||
- Run `../scripts/bash/check-prerequisites.sh --json --require-tasks`
|
||||
- Parse FEATURE_DIR from output
|
||||
- Load: `spec.md`, `plan.md`, `tasks.md`
|
||||
|
||||
2. **Build Requirements Matrix**:
|
||||
Extract from spec.md:
|
||||
- All functional requirements
|
||||
- All acceptance criteria
|
||||
- All success criteria
|
||||
- Edge cases listed
|
||||
|
||||
3. **Scan Implementation**:
|
||||
From tasks.md, identify all files created/modified:
|
||||
- Read each file
|
||||
- Extract functions, classes, endpoints
|
||||
- Map to requirements (by name matching, comments, or explicit references)
|
||||
|
||||
4. **Validation Checks**:
|
||||
|
||||
| Check | Method |
|
||||
| -------------------- | ------------------------------------------------ |
|
||||
| Requirement Coverage | Each requirement has ≥1 implementation reference |
|
||||
| Acceptance Criteria | Each criterion is testable in code |
|
||||
| Edge Case Handling | Each edge case has explicit handling code |
|
||||
| Test Coverage | Each requirement has ≥1 test |
|
||||
|
||||
5. **Generate Validation Report**:
|
||||
|
||||
```markdown
|
||||
# Validation Report: [Feature Name]
|
||||
|
||||
**Date**: [timestamp]
|
||||
**Status**: PASS | PARTIAL | FAIL
|
||||
|
||||
## Coverage Summary
|
||||
|
||||
| Metric | Count | Percentage |
|
||||
| ----------------------- | ----- | ---------- |
|
||||
| Requirements Covered | X/Y | Z% |
|
||||
| Acceptance Criteria Met | X/Y | Z% |
|
||||
| Edge Cases Handled | X/Y | Z% |
|
||||
| Tests Present | X/Y | Z% |
|
||||
|
||||
## Uncovered Requirements
|
||||
|
||||
| Requirement | Status | Notes |
|
||||
| ----------- | ------- | ----------------------- |
|
||||
| [REQ-001] | Missing | No implementation found |
|
||||
|
||||
## Recommendations
|
||||
|
||||
1. [Action item for gaps]
|
||||
```
|
||||
|
||||
6. **Output**:
|
||||
- Display report
|
||||
- Write to `FEATURE_DIR/validation-report.md`
|
||||
- Set exit status based on coverage threshold (default: 80%)
|
||||
|
||||
## Operating Principles
|
||||
|
||||
- **Be Thorough**: Check every requirement, not just obvious ones
|
||||
- **Be Fair**: Semantic matching, not just keyword matching
|
||||
- **Be Actionable**: Every gap should have a clear fix recommendation
|
||||
- **Don't Block on Style**: Focus on functional coverage, not code style
|
||||
|
||||
---
|
||||
|
||||
## LCBP3-DMS Context (MUST LOAD)
|
||||
|
||||
Before executing, load **[../_LCBP3-CONTEXT.md](../_LCBP3-CONTEXT.md)** to get:
|
||||
|
||||
- Canonical rule sources (AGENTS.md, specs/06-Decision-Records/, specs/05-Engineering-Guidelines/)
|
||||
- Tier 1 non-negotiables (ADR-019 UUID, ADR-009 schema, ADR-016 security, ADR-002 numbering, ADR-008 BullMQ, ADR-018/020 AI boundary, ADR-007 errors)
|
||||
- Domain glossary (Correspondence / RFA / Transmittal / Circulation)
|
||||
- Helper script real paths
|
||||
- Commit checklist
|
||||
@@ -0,0 +1,109 @@
|
||||
---
|
||||
name: tdd
|
||||
description: Test-driven development with red-green-refactor loop. Use when user wants to build features or fix bugs using TDD, mentions "red-green-refactor", wants integration tests, or asks for test-first development.
|
||||
---
|
||||
|
||||
# Test-Driven Development
|
||||
|
||||
## Philosophy
|
||||
|
||||
**Core principle**: Tests should verify behavior through public interfaces, not implementation details. Code can change entirely; tests shouldn't.
|
||||
|
||||
**Good tests** are integration-style: they exercise real code paths through public APIs. They describe _what_ the system does, not _how_ it does it. A good test reads like a specification - "user can checkout with valid cart" tells you exactly what capability exists. These tests survive refactors because they don't care about internal structure.
|
||||
|
||||
**Bad tests** are coupled to implementation. They mock internal collaborators, test private methods, or verify through external means (like querying a database directly instead of using the interface). The warning sign: your test breaks when you refactor, but behavior hasn't changed. If you rename an internal function and tests fail, those tests were testing implementation, not behavior.
|
||||
|
||||
See [tests.md](tests.md) for examples and [mocking.md](mocking.md) for mocking guidelines.
|
||||
|
||||
## Anti-Pattern: Horizontal Slices
|
||||
|
||||
**DO NOT write all tests first, then all implementation.** This is "horizontal slicing" - treating RED as "write all tests" and GREEN as "write all code."
|
||||
|
||||
This produces **crap tests**:
|
||||
|
||||
- Tests written in bulk test _imagined_ behavior, not _actual_ behavior
|
||||
- You end up testing the _shape_ of things (data structures, function signatures) rather than user-facing behavior
|
||||
- Tests become insensitive to real changes - they pass when behavior breaks, fail when behavior is fine
|
||||
- You outrun your headlights, committing to test structure before understanding the implementation
|
||||
|
||||
**Correct approach**: Vertical slices via tracer bullets. One test → one implementation → repeat. Each test responds to what you learned from the previous cycle. Because you just wrote the code, you know exactly what behavior matters and how to verify it.
|
||||
|
||||
```
|
||||
WRONG (horizontal):
|
||||
RED: test1, test2, test3, test4, test5
|
||||
GREEN: impl1, impl2, impl3, impl4, impl5
|
||||
|
||||
RIGHT (vertical):
|
||||
RED→GREEN: test1→impl1
|
||||
RED→GREEN: test2→impl2
|
||||
RED→GREEN: test3→impl3
|
||||
...
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Planning
|
||||
|
||||
When exploring the codebase, use the project's domain glossary so that test names and interface vocabulary match the project's language, and respect ADRs in the area you're touching.
|
||||
|
||||
Before writing any code:
|
||||
|
||||
- [ ] Confirm with user what interface changes are needed
|
||||
- [ ] Confirm with user which behaviors to test (prioritize)
|
||||
- [ ] Identify opportunities for [deep modules](deep-modules.md) (small interface, deep implementation)
|
||||
- [ ] Design interfaces for [testability](interface-design.md)
|
||||
- [ ] List the behaviors to test (not implementation steps)
|
||||
- [ ] Get user approval on the plan
|
||||
|
||||
Ask: "What should the public interface look like? Which behaviors are most important to test?"
|
||||
|
||||
**You can't test everything.** Confirm with the user exactly which behaviors matter most. Focus testing effort on critical paths and complex logic, not every possible edge case.
|
||||
|
||||
### 2. Tracer Bullet
|
||||
|
||||
Write ONE test that confirms ONE thing about the system:
|
||||
|
||||
```
|
||||
RED: Write test for first behavior → test fails
|
||||
GREEN: Write minimal code to pass → test passes
|
||||
```
|
||||
|
||||
This is your tracer bullet - proves the path works end-to-end.
|
||||
|
||||
### 3. Incremental Loop
|
||||
|
||||
For each remaining behavior:
|
||||
|
||||
```
|
||||
RED: Write next test → fails
|
||||
GREEN: Minimal code to pass → passes
|
||||
```
|
||||
|
||||
Rules:
|
||||
|
||||
- One test at a time
|
||||
- Only enough code to pass current test
|
||||
- Don't anticipate future tests
|
||||
- Keep tests focused on observable behavior
|
||||
|
||||
### 4. Refactor
|
||||
|
||||
After all tests pass, look for [refactor candidates](refactoring.md):
|
||||
|
||||
- [ ] Extract duplication
|
||||
- [ ] Deepen modules (move complexity behind simple interfaces)
|
||||
- [ ] Apply SOLID principles where natural
|
||||
- [ ] Consider what new code reveals about existing code
|
||||
- [ ] Run tests after each refactor step
|
||||
|
||||
**Never refactor while RED.** Get to GREEN first.
|
||||
|
||||
## Checklist Per Cycle
|
||||
|
||||
```
|
||||
[ ] Test describes behavior, not implementation
|
||||
[ ] Test uses public interface only
|
||||
[ ] Test would survive internal refactor
|
||||
[ ] Code is minimal for this test
|
||||
[ ] No speculative features added
|
||||
```
|
||||
@@ -0,0 +1,33 @@
|
||||
# Deep Modules
|
||||
|
||||
From "A Philosophy of Software Design":
|
||||
|
||||
**Deep module** = small interface + lots of implementation
|
||||
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ Small Interface │ ← Few methods, simple params
|
||||
├─────────────────────┤
|
||||
│ │
|
||||
│ │
|
||||
│ Deep Implementation│ ← Complex logic hidden
|
||||
│ │
|
||||
│ │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
**Shallow module** = large interface + little implementation (avoid)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────┐
|
||||
│ Large Interface │ ← Many methods, complex params
|
||||
├─────────────────────────────────┤
|
||||
│ Thin Implementation │ ← Just passes through
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
When designing interfaces, ask:
|
||||
|
||||
- Can I reduce the number of methods?
|
||||
- Can I simplify the parameters?
|
||||
- Can I hide more complexity inside?
|
||||
@@ -0,0 +1,31 @@
|
||||
# Interface Design for Testability
|
||||
|
||||
Good interfaces make testing natural:
|
||||
|
||||
1. **Accept dependencies, don't create them**
|
||||
|
||||
```typescript
|
||||
// Testable
|
||||
function processOrder(order, paymentGateway) {}
|
||||
|
||||
// Hard to test
|
||||
function processOrder(order) {
|
||||
const gateway = new StripeGateway();
|
||||
}
|
||||
```
|
||||
|
||||
2. **Return results, don't produce side effects**
|
||||
|
||||
```typescript
|
||||
// Testable
|
||||
function calculateDiscount(cart): Discount {}
|
||||
|
||||
// Hard to test
|
||||
function applyDiscount(cart): void {
|
||||
cart.total -= discount;
|
||||
}
|
||||
```
|
||||
|
||||
3. **Small surface area**
|
||||
- Fewer methods = fewer tests needed
|
||||
- Fewer params = simpler test setup
|
||||
@@ -0,0 +1,59 @@
|
||||
# When to Mock
|
||||
|
||||
Mock at **system boundaries** only:
|
||||
|
||||
- External APIs (payment, email, etc.)
|
||||
- Databases (sometimes - prefer test DB)
|
||||
- Time/randomness
|
||||
- File system (sometimes)
|
||||
|
||||
Don't mock:
|
||||
|
||||
- Your own classes/modules
|
||||
- Internal collaborators
|
||||
- Anything you control
|
||||
|
||||
## Designing for Mockability
|
||||
|
||||
At system boundaries, design interfaces that are easy to mock:
|
||||
|
||||
**1. Use dependency injection**
|
||||
|
||||
Pass external dependencies in rather than creating them internally:
|
||||
|
||||
```typescript
|
||||
// Easy to mock
|
||||
function processPayment(order, paymentClient) {
|
||||
return paymentClient.charge(order.total);
|
||||
}
|
||||
|
||||
// Hard to mock
|
||||
function processPayment(order) {
|
||||
const client = new StripeClient(process.env.STRIPE_KEY);
|
||||
return client.charge(order.total);
|
||||
}
|
||||
```
|
||||
|
||||
**2. Prefer SDK-style interfaces over generic fetchers**
|
||||
|
||||
Create specific functions for each external operation instead of one generic function with conditional logic:
|
||||
|
||||
```typescript
|
||||
// GOOD: Each function is independently mockable
|
||||
const api = {
|
||||
getUser: (id) => fetch(`/users/${id}`),
|
||||
getOrders: (userId) => fetch(`/users/${userId}/orders`),
|
||||
createOrder: (data) => fetch('/orders', { method: 'POST', body: data }),
|
||||
};
|
||||
|
||||
// BAD: Mocking requires conditional logic inside the mock
|
||||
const api = {
|
||||
fetch: (endpoint, options) => fetch(endpoint, options),
|
||||
};
|
||||
```
|
||||
|
||||
The SDK approach means:
|
||||
- Each mock returns one specific shape
|
||||
- No conditional logic in test setup
|
||||
- Easier to see which endpoints a test exercises
|
||||
- Type safety per endpoint
|
||||
@@ -0,0 +1,10 @@
|
||||
# Refactor Candidates
|
||||
|
||||
After TDD cycle, look for:
|
||||
|
||||
- **Duplication** → Extract function/class
|
||||
- **Long methods** → Break into private helpers (keep tests on public interface)
|
||||
- **Shallow modules** → Combine or deepen
|
||||
- **Feature envy** → Move logic to where data lives
|
||||
- **Primitive obsession** → Introduce value objects
|
||||
- **Existing code** the new code reveals as problematic
|
||||
@@ -0,0 +1,61 @@
|
||||
# Good and Bad Tests
|
||||
|
||||
## Good Tests
|
||||
|
||||
**Integration-style**: Test through real interfaces, not mocks of internal parts.
|
||||
|
||||
```typescript
|
||||
// GOOD: Tests observable behavior
|
||||
test("user can checkout with valid cart", async () => {
|
||||
const cart = createCart();
|
||||
cart.add(product);
|
||||
const result = await checkout(cart, paymentMethod);
|
||||
expect(result.status).toBe("confirmed");
|
||||
});
|
||||
```
|
||||
|
||||
Characteristics:
|
||||
|
||||
- Tests behavior users/callers care about
|
||||
- Uses public API only
|
||||
- Survives internal refactors
|
||||
- Describes WHAT, not HOW
|
||||
- One logical assertion per test
|
||||
|
||||
## Bad Tests
|
||||
|
||||
**Implementation-detail tests**: Coupled to internal structure.
|
||||
|
||||
```typescript
|
||||
// BAD: Tests implementation details
|
||||
test("checkout calls paymentService.process", async () => {
|
||||
const mockPayment = jest.mock(paymentService);
|
||||
await checkout(cart, payment);
|
||||
expect(mockPayment.process).toHaveBeenCalledWith(cart.total);
|
||||
});
|
||||
```
|
||||
|
||||
Red flags:
|
||||
|
||||
- Mocking internal collaborators
|
||||
- Testing private methods
|
||||
- Asserting on call counts/order
|
||||
- Test breaks when refactoring without behavior change
|
||||
- Test name describes HOW not WHAT
|
||||
- Verifying through external means instead of interface
|
||||
|
||||
```typescript
|
||||
// BAD: Bypasses interface to verify
|
||||
test("createUser saves to database", async () => {
|
||||
await createUser({ name: "Alice" });
|
||||
const row = await db.query("SELECT * FROM users WHERE name = ?", ["Alice"]);
|
||||
expect(row).toBeDefined();
|
||||
});
|
||||
|
||||
// GOOD: Verifies through interface
|
||||
test("createUser makes user retrievable", async () => {
|
||||
const user = await createUser({ name: "Alice" });
|
||||
const retrieved = await getUser(user.id);
|
||||
expect(retrieved.name).toBe("Alice");
|
||||
});
|
||||
```
|
||||
@@ -0,0 +1,81 @@
|
||||
---
|
||||
name: to-issues
|
||||
description: Break a plan, spec, or PRD into independently-grabbable issues on the project issue tracker using tracer-bullet vertical slices. Use when user wants to convert a plan into issues, create implementation tickets, or break down work into issues.
|
||||
---
|
||||
|
||||
# To Issues
|
||||
|
||||
Break a plan into independently-grabbable issues using vertical slices (tracer bullets).
|
||||
|
||||
The issue tracker and triage label vocabulary should have been provided to you — run `/setup-matt-pocock-skills` if not.
|
||||
|
||||
## Process
|
||||
|
||||
### 1. Gather context
|
||||
|
||||
Work from whatever is already in the conversation context. If the user passes an issue reference (issue number, URL, or path) as an argument, fetch it from the issue tracker and read its full body and comments.
|
||||
|
||||
### 2. Explore the codebase (optional)
|
||||
|
||||
If you have not already explored the codebase, do so to understand the current state of the code. Issue titles and descriptions should use the project's domain glossary vocabulary, and respect ADRs in the area you're touching.
|
||||
|
||||
### 3. Draft vertical slices
|
||||
|
||||
Break the plan into **tracer bullet** issues. Each issue is a thin vertical slice that cuts through ALL integration layers end-to-end, NOT a horizontal slice of one layer.
|
||||
|
||||
Slices may be 'HITL' or 'AFK'. HITL slices require human interaction, such as an architectural decision or a design review. AFK slices can be implemented and merged without human interaction. Prefer AFK over HITL where possible.
|
||||
|
||||
<vertical-slice-rules>
|
||||
- Each slice delivers a narrow but COMPLETE path through every layer (schema, API, UI, tests)
|
||||
- A completed slice is demoable or verifiable on its own
|
||||
- Prefer many thin slices over few thick ones
|
||||
</vertical-slice-rules>
|
||||
|
||||
### 4. Quiz the user
|
||||
|
||||
Present the proposed breakdown as a numbered list. For each slice, show:
|
||||
|
||||
- **Title**: short descriptive name
|
||||
- **Type**: HITL / AFK
|
||||
- **Blocked by**: which other slices (if any) must complete first
|
||||
- **User stories covered**: which user stories this addresses (if the source material has them)
|
||||
|
||||
Ask the user:
|
||||
|
||||
- Does the granularity feel right? (too coarse / too fine)
|
||||
- Are the dependency relationships correct?
|
||||
- Should any slices be merged or split further?
|
||||
- Are the correct slices marked as HITL and AFK?
|
||||
|
||||
Iterate until the user approves the breakdown.
|
||||
|
||||
### 5. Publish the issues to the issue tracker
|
||||
|
||||
For each approved slice, publish a new issue to the issue tracker. Use the issue body template below. Apply the `needs-triage` triage label so each issue enters the normal triage flow.
|
||||
|
||||
Publish issues in dependency order (blockers first) so you can reference real issue identifiers in the "Blocked by" field.
|
||||
|
||||
<issue-template>
|
||||
## Parent
|
||||
|
||||
A reference to the parent issue on the issue tracker (if the source was an existing issue, otherwise omit this section).
|
||||
|
||||
## What to build
|
||||
|
||||
A concise description of this vertical slice. Describe the end-to-end behavior, not layer-by-layer implementation.
|
||||
|
||||
## Acceptance criteria
|
||||
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] Criterion 3
|
||||
|
||||
## Blocked by
|
||||
|
||||
- A reference to the blocking ticket (if any)
|
||||
|
||||
Or "None - can start immediately" if no blockers.
|
||||
|
||||
</issue-template>
|
||||
|
||||
Do NOT close or modify any parent issue.
|
||||
@@ -0,0 +1,74 @@
|
||||
---
|
||||
name: to-prd
|
||||
description: Turn the current conversation context into a PRD and publish it to the project issue tracker. Use when user wants to create a PRD from the current context.
|
||||
---
|
||||
|
||||
This skill takes the current conversation context and codebase understanding and produces a PRD. Do NOT interview the user — just synthesize what you already know.
|
||||
|
||||
The issue tracker and triage label vocabulary should have been provided to you — run `/setup-matt-pocock-skills` if not.
|
||||
|
||||
## Process
|
||||
|
||||
1. Explore the repo to understand the current state of the codebase, if you haven't already. Use the project's domain glossary vocabulary throughout the PRD, and respect any ADRs in the area you're touching.
|
||||
|
||||
2. Sketch out the major modules you will need to build or modify to complete the implementation. Actively look for opportunities to extract deep modules that can be tested in isolation.
|
||||
|
||||
A deep module (as opposed to a shallow module) is one which encapsulates a lot of functionality in a simple, testable interface which rarely changes.
|
||||
|
||||
Check with the user that these modules match their expectations. Check with the user which modules they want tests written for.
|
||||
|
||||
3. Write the PRD using the template below, then publish it to the project issue tracker. Apply the `needs-triage` triage label so it enters the normal triage flow.
|
||||
|
||||
<prd-template>
|
||||
|
||||
## Problem Statement
|
||||
|
||||
The problem that the user is facing, from the user's perspective.
|
||||
|
||||
## Solution
|
||||
|
||||
The solution to the problem, from the user's perspective.
|
||||
|
||||
## User Stories
|
||||
|
||||
A LONG, numbered list of user stories. Each user story should be in the format of:
|
||||
|
||||
1. As an <actor>, I want a <feature>, so that <benefit>
|
||||
|
||||
<user-story-example>
|
||||
1. As a mobile bank customer, I want to see balance on my accounts, so that I can make better informed decisions about my spending
|
||||
</user-story-example>
|
||||
|
||||
This list of user stories should be extremely extensive and cover all aspects of the feature.
|
||||
|
||||
## Implementation Decisions
|
||||
|
||||
A list of implementation decisions that were made. This can include:
|
||||
|
||||
- The modules that will be built/modified
|
||||
- The interfaces of those modules that will be modified
|
||||
- Technical clarifications from the developer
|
||||
- Architectural decisions
|
||||
- Schema changes
|
||||
- API contracts
|
||||
- Specific interactions
|
||||
|
||||
Do NOT include specific file paths or code snippets. They may end up being outdated very quickly.
|
||||
|
||||
## Testing Decisions
|
||||
|
||||
A list of testing decisions that were made. Include:
|
||||
|
||||
- A description of what makes a good test (only test external behavior, not implementation details)
|
||||
- Which modules will be tested
|
||||
- Prior art for the tests (i.e. similar types of tests in the codebase)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
A description of the things that are out of scope for this PRD.
|
||||
|
||||
## Further Notes
|
||||
|
||||
Any further notes about the feature.
|
||||
|
||||
</prd-template>
|
||||
@@ -0,0 +1,168 @@
|
||||
# Writing Agent Briefs
|
||||
|
||||
An agent brief is a structured comment posted on a GitHub issue when it moves to `ready-for-agent`. It is the authoritative specification that an AFK agent will work from. The original issue body and discussion are context — the agent brief is the contract.
|
||||
|
||||
## Principles
|
||||
|
||||
### Durability over precision
|
||||
|
||||
The issue may sit in `ready-for-agent` for days or weeks. The codebase will change in the meantime. Write the brief so it stays useful even as files are renamed, moved, or refactored.
|
||||
|
||||
- **Do** describe interfaces, types, and behavioral contracts
|
||||
- **Do** name specific types, function signatures, or config shapes that the agent should look for or modify
|
||||
- **Don't** reference file paths — they go stale
|
||||
- **Don't** reference line numbers
|
||||
- **Don't** assume the current implementation structure will remain the same
|
||||
|
||||
### Behavioral, not procedural
|
||||
|
||||
Describe **what** the system should do, not **how** to implement it. The agent will explore the codebase fresh and make its own implementation decisions.
|
||||
|
||||
- **Good:** "The `SkillConfig` type should accept an optional `schedule` field of type `CronExpression`"
|
||||
- **Bad:** "Open src/types/skill.ts and add a schedule field on line 42"
|
||||
- **Good:** "When a user runs `/triage` with no arguments, they should see a summary of issues needing attention"
|
||||
- **Bad:** "Add a switch statement in the main handler function"
|
||||
|
||||
### Complete acceptance criteria
|
||||
|
||||
The agent needs to know when it's done. Every agent brief must have concrete, testable acceptance criteria. Each criterion should be independently verifiable.
|
||||
|
||||
- **Good:** "Running `gh issue list --label needs-triage` returns issues that have been through initial classification"
|
||||
- **Bad:** "Triage should work correctly"
|
||||
|
||||
### Explicit scope boundaries
|
||||
|
||||
State what is out of scope. This prevents the agent from gold-plating or making assumptions about adjacent features.
|
||||
|
||||
## Template
|
||||
|
||||
```markdown
|
||||
## Agent Brief
|
||||
|
||||
**Category:** bug / enhancement
|
||||
**Summary:** one-line description of what needs to happen
|
||||
|
||||
**Current behavior:**
|
||||
Describe what happens now. For bugs, this is the broken behavior.
|
||||
For enhancements, this is the status quo the feature builds on.
|
||||
|
||||
**Desired behavior:**
|
||||
Describe what should happen after the agent's work is complete.
|
||||
Be specific about edge cases and error conditions.
|
||||
|
||||
**Key interfaces:**
|
||||
- `TypeName` — what needs to change and why
|
||||
- `functionName()` return type — what it currently returns vs what it should return
|
||||
- Config shape — any new configuration options needed
|
||||
|
||||
**Acceptance criteria:**
|
||||
- [ ] Specific, testable criterion 1
|
||||
- [ ] Specific, testable criterion 2
|
||||
- [ ] Specific, testable criterion 3
|
||||
|
||||
**Out of scope:**
|
||||
- Thing that should NOT be changed or addressed in this issue
|
||||
- Adjacent feature that might seem related but is separate
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Good agent brief (bug)
|
||||
|
||||
```markdown
|
||||
## Agent Brief
|
||||
|
||||
**Category:** bug
|
||||
**Summary:** Skill description truncation drops mid-word, producing broken output
|
||||
|
||||
**Current behavior:**
|
||||
When a skill description exceeds 1024 characters, it is truncated at exactly
|
||||
1024 characters regardless of word boundaries. This produces descriptions
|
||||
that end mid-word (e.g. "Use when the user wants to confi").
|
||||
|
||||
**Desired behavior:**
|
||||
Truncation should break at the last word boundary before 1024 characters
|
||||
and append "..." to indicate truncation.
|
||||
|
||||
**Key interfaces:**
|
||||
- The `SkillMetadata` type's `description` field — no type change needed,
|
||||
but the validation/processing logic that populates it needs to respect
|
||||
word boundaries
|
||||
- Any function that reads SKILL.md frontmatter and extracts the description
|
||||
|
||||
**Acceptance criteria:**
|
||||
- [ ] Descriptions under 1024 chars are unchanged
|
||||
- [ ] Descriptions over 1024 chars are truncated at the last word boundary
|
||||
before 1024 chars
|
||||
- [ ] Truncated descriptions end with "..."
|
||||
- [ ] The total length including "..." does not exceed 1024 chars
|
||||
|
||||
**Out of scope:**
|
||||
- Changing the 1024 char limit itself
|
||||
- Multi-line description support
|
||||
```
|
||||
|
||||
### Good agent brief (enhancement)
|
||||
|
||||
```markdown
|
||||
## Agent Brief
|
||||
|
||||
**Category:** enhancement
|
||||
**Summary:** Add `.out-of-scope/` directory support for tracking rejected feature requests
|
||||
|
||||
**Current behavior:**
|
||||
When a feature request is rejected, the issue is closed with a `wontfix` label
|
||||
and a comment. There is no persistent record of the decision or reasoning.
|
||||
Future similar requests require the maintainer to recall or search for the
|
||||
prior discussion.
|
||||
|
||||
**Desired behavior:**
|
||||
Rejected feature requests should be documented in `.out-of-scope/<concept>.md`
|
||||
files that capture the decision, reasoning, and links to all issues that
|
||||
requested the feature. When triaging new issues, these files should be
|
||||
checked for matches.
|
||||
|
||||
**Key interfaces:**
|
||||
- Markdown file format in `.out-of-scope/` — each file should have a
|
||||
`# Concept Name` heading, a `**Decision:**` line, a `**Reason:**` line,
|
||||
and a `**Prior requests:**` list with issue links
|
||||
- The triage workflow should read all `.out-of-scope/*.md` files early
|
||||
and match incoming issues against them by concept similarity
|
||||
|
||||
**Acceptance criteria:**
|
||||
- [ ] Closing a feature as wontfix creates/updates a file in `.out-of-scope/`
|
||||
- [ ] The file includes the decision, reasoning, and link to the closed issue
|
||||
- [ ] If a matching `.out-of-scope/` file already exists, the new issue is
|
||||
appended to its "Prior requests" list rather than creating a duplicate
|
||||
- [ ] During triage, existing `.out-of-scope/` files are checked and surfaced
|
||||
when a new issue matches a prior rejection
|
||||
|
||||
**Out of scope:**
|
||||
- Automated matching (human confirms the match)
|
||||
- Reopening previously rejected features
|
||||
- Bug reports (only enhancement rejections go to `.out-of-scope/`)
|
||||
```
|
||||
|
||||
### Bad agent brief
|
||||
|
||||
```markdown
|
||||
## Agent Brief
|
||||
|
||||
**Summary:** Fix the triage bug
|
||||
|
||||
**What to do:**
|
||||
The triage thing is broken. Look at the main file and fix it.
|
||||
The function around line 150 has the issue.
|
||||
|
||||
**Files to change:**
|
||||
- src/triage/handler.ts (line 150)
|
||||
- src/types.ts (line 42)
|
||||
```
|
||||
|
||||
This is bad because:
|
||||
- No category
|
||||
- Vague description ("the triage thing is broken")
|
||||
- References file paths and line numbers that will go stale
|
||||
- No acceptance criteria
|
||||
- No scope boundaries
|
||||
- No description of current vs desired behavior
|
||||
@@ -0,0 +1,101 @@
|
||||
# Out-of-Scope Knowledge Base
|
||||
|
||||
The `.out-of-scope/` directory in a repo stores persistent records of rejected feature requests. It serves two purposes:
|
||||
|
||||
1. **Institutional memory** — why a feature was rejected, so the reasoning isn't lost when the issue is closed
|
||||
2. **Deduplication** — when a new issue comes in that matches a prior rejection, the skill can surface the previous decision instead of re-litigating it
|
||||
|
||||
## Directory structure
|
||||
|
||||
```
|
||||
.out-of-scope/
|
||||
├── dark-mode.md
|
||||
├── plugin-system.md
|
||||
└── graphql-api.md
|
||||
```
|
||||
|
||||
One file per **concept**, not per issue. Multiple issues requesting the same thing are grouped under one file.
|
||||
|
||||
## File format
|
||||
|
||||
The file should be written in a relaxed, readable style — more like a short design document than a database entry. Use paragraphs, code samples, and examples to make the reasoning clear and useful to someone encountering it for the first time.
|
||||
|
||||
```markdown
|
||||
# Dark Mode
|
||||
|
||||
This project does not support dark mode or user-facing theming.
|
||||
|
||||
## Why this is out of scope
|
||||
|
||||
The rendering pipeline assumes a single color palette defined in
|
||||
`ThemeConfig`. Supporting multiple themes would require:
|
||||
|
||||
- A theme context provider wrapping the entire component tree
|
||||
- Per-component theme-aware style resolution
|
||||
- A persistence layer for user theme preferences
|
||||
|
||||
This is a significant architectural change that doesn't align with the
|
||||
project's focus on content authoring. Theming is a concern for downstream
|
||||
consumers who embed or redistribute the output.
|
||||
|
||||
```ts
|
||||
// The current ThemeConfig interface is not designed for runtime switching:
|
||||
interface ThemeConfig {
|
||||
colors: ColorPalette; // single palette, resolved at build time
|
||||
fonts: FontStack;
|
||||
}
|
||||
```
|
||||
|
||||
## Prior requests
|
||||
|
||||
- #42 — "Add dark mode support"
|
||||
- #87 — "Night theme for accessibility"
|
||||
- #134 — "Dark theme option"
|
||||
```
|
||||
|
||||
### Naming the file
|
||||
|
||||
Use a short, descriptive kebab-case name for the concept: `dark-mode.md`, `plugin-system.md`, `graphql-api.md`. The name should be recognizable enough that someone browsing the directory understands what was rejected without opening the file.
|
||||
|
||||
### Writing the reason
|
||||
|
||||
The reason should be substantive — not "we don't want this" but why. Good reasons reference:
|
||||
|
||||
- Project scope or philosophy ("This project focuses on X; theming is a downstream concern")
|
||||
- Technical constraints ("Supporting this would require Y, which conflicts with our Z architecture")
|
||||
- Strategic decisions ("We chose to use A instead of B because...")
|
||||
|
||||
The reason should be durable. Avoid referencing temporary circumstances ("we're too busy right now") — those aren't real rejections, they're deferrals.
|
||||
|
||||
## When to check `.out-of-scope/`
|
||||
|
||||
During triage (Step 1: Gather context), read all files in `.out-of-scope/`. When evaluating a new issue:
|
||||
|
||||
- Check if the request matches an existing out-of-scope concept
|
||||
- Matching is by concept similarity, not keyword — "night theme" matches `dark-mode.md`
|
||||
- If there's a match, surface it to the maintainer: "This is similar to `.out-of-scope/dark-mode.md` — we rejected this before because [reason]. Do you still feel the same way?"
|
||||
|
||||
The maintainer may:
|
||||
|
||||
- **Confirm** — the new issue gets added to the existing file's "Prior requests" list, then closed
|
||||
- **Reconsider** — the out-of-scope file gets deleted or updated, and the issue proceeds through normal triage
|
||||
- **Disagree** — the issues are related but distinct, proceed with normal triage
|
||||
|
||||
## When to write to `.out-of-scope/`
|
||||
|
||||
Only when an **enhancement** (not a bug) is rejected as `wontfix`. The flow:
|
||||
|
||||
1. Maintainer decides a feature request is out of scope
|
||||
2. Check if a matching `.out-of-scope/` file already exists
|
||||
3. If yes: append the new issue to the "Prior requests" list
|
||||
4. If no: create a new file with the concept name, decision, reason, and first prior request
|
||||
5. Post a comment on the issue explaining the decision and mentioning the `.out-of-scope/` file
|
||||
6. Close the issue with the `wontfix` label
|
||||
|
||||
## Updating or removing out-of-scope files
|
||||
|
||||
If the maintainer changes their mind about a previously rejected concept:
|
||||
|
||||
- Delete the `.out-of-scope/` file
|
||||
- The skill does not need to reopen old issues — they're historical records
|
||||
- The new issue that triggered the reconsideration proceeds through normal triage
|
||||
@@ -0,0 +1,103 @@
|
||||
---
|
||||
name: triage
|
||||
description: Triage issues through a state machine driven by triage roles. Use when user wants to create an issue, triage issues, review incoming bugs or feature requests, prepare issues for an AFK agent, or manage issue workflow.
|
||||
---
|
||||
|
||||
# Triage
|
||||
|
||||
Move issues on the project issue tracker through a small state machine of triage roles.
|
||||
|
||||
Every comment or issue posted to the issue tracker during triage **must** start with this disclaimer:
|
||||
|
||||
```
|
||||
> *This was generated by AI during triage.*
|
||||
```
|
||||
|
||||
## Reference docs
|
||||
|
||||
- [AGENT-BRIEF.md](AGENT-BRIEF.md) — how to write durable agent briefs
|
||||
- [OUT-OF-SCOPE.md](OUT-OF-SCOPE.md) — how the `.out-of-scope/` knowledge base works
|
||||
|
||||
## Roles
|
||||
|
||||
Two **category** roles:
|
||||
|
||||
- `bug` — something is broken
|
||||
- `enhancement` — new feature or improvement
|
||||
|
||||
Five **state** roles:
|
||||
|
||||
- `needs-triage` — maintainer needs to evaluate
|
||||
- `needs-info` — waiting on reporter for more information
|
||||
- `ready-for-agent` — fully specified, ready for an AFK agent
|
||||
- `ready-for-human` — needs human implementation
|
||||
- `wontfix` — will not be actioned
|
||||
|
||||
Every triaged issue should carry exactly one category role and one state role. If state roles conflict, flag it and ask the maintainer before doing anything else.
|
||||
|
||||
These are canonical role names — the actual label strings used in the issue tracker may differ. The mapping should have been provided to you - run `/setup-matt-pocock-skills` if not.
|
||||
|
||||
State transitions: an unlabeled issue normally goes to `needs-triage` first; from there it moves to `needs-info`, `ready-for-agent`, `ready-for-human`, or `wontfix`. `needs-info` returns to `needs-triage` once the reporter replies. The maintainer can override at any time — flag transitions that look unusual and ask before proceeding.
|
||||
|
||||
## Invocation
|
||||
|
||||
The maintainer invokes `/triage` and describes what they want in natural language. Interpret the request and act. Examples:
|
||||
|
||||
- "Show me anything that needs my attention"
|
||||
- "Let's look at #42"
|
||||
- "Move #42 to ready-for-agent"
|
||||
- "What's ready for agents to pick up?"
|
||||
|
||||
## Show what needs attention
|
||||
|
||||
Query the issue tracker and present three buckets, oldest first:
|
||||
|
||||
1. **Unlabeled** — never triaged.
|
||||
2. **`needs-triage`** — evaluation in progress.
|
||||
3. **`needs-info` with reporter activity since the last triage notes** — needs re-evaluation.
|
||||
|
||||
Show counts and a one-line summary per issue. Let the maintainer pick.
|
||||
|
||||
## Triage a specific issue
|
||||
|
||||
1. **Gather context.** Read the full issue (body, comments, labels, reporter, dates). Parse any prior triage notes so you don't re-ask resolved questions. Explore the codebase using the project's domain glossary, respecting ADRs in the area. Read `.out-of-scope/*.md` and surface any prior rejection that resembles this issue.
|
||||
|
||||
2. **Recommend.** Tell the maintainer your category and state recommendation with reasoning, plus a brief codebase summary relevant to the issue. Wait for direction.
|
||||
|
||||
3. **Reproduce (bugs only).** Before any grilling, attempt reproduction: read the reporter's steps, trace the relevant code, run tests or commands. Report what happened — successful repro with code path, failed repro, or insufficient detail (a strong `needs-info` signal). A confirmed repro makes a much stronger agent brief.
|
||||
|
||||
4. **Grill (if needed).** If the issue needs fleshing out, run a `/grill-with-docs` session.
|
||||
|
||||
5. **Apply the outcome:**
|
||||
- `ready-for-agent` — post an agent brief comment ([AGENT-BRIEF.md](AGENT-BRIEF.md)).
|
||||
- `ready-for-human` — same structure as an agent brief, but note why it can't be delegated (judgment calls, external access, design decisions, manual testing).
|
||||
- `needs-info` — post triage notes (template below).
|
||||
- `wontfix` (bug) — polite explanation, then close.
|
||||
- `wontfix` (enhancement) — write to `.out-of-scope/`, link to it from a comment, then close ([OUT-OF-SCOPE.md](OUT-OF-SCOPE.md)).
|
||||
- `needs-triage` — apply the role. Optional comment if there's partial progress.
|
||||
|
||||
## Quick state override
|
||||
|
||||
If the maintainer says "move #42 to ready-for-agent", trust them and apply the role directly. Confirm what you're about to do (role changes, comment, close), then act. Skip grilling. If moving to `ready-for-agent` without a grilling session, ask whether they want to write an agent brief.
|
||||
|
||||
## Needs-info template
|
||||
|
||||
```markdown
|
||||
## Triage Notes
|
||||
|
||||
**What we've established so far:**
|
||||
|
||||
- point 1
|
||||
- point 2
|
||||
|
||||
**What we still need from you (@reporter):**
|
||||
|
||||
- question 1
|
||||
- question 2
|
||||
```
|
||||
|
||||
Capture everything resolved during grilling under "established so far" so the work isn't lost. Questions must be specific and actionable, not "please provide more info".
|
||||
|
||||
## Resuming a previous session
|
||||
|
||||
If prior triage notes exist on the issue, read them, check whether the reporter has answered any outstanding questions, and present an updated picture before continuing. Don't re-ask resolved questions.
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
name: zoom-out
|
||||
description: Tell the agent to zoom out and give broader context or a higher-level perspective. Use when you're unfamiliar with a section of code or need to understand how it fits into the bigger picture.
|
||||
disable-model-invocation: true
|
||||
---
|
||||
|
||||
I don't know this area of code well. Go up a layer of abstraction. Give me a map of all the relevant modules and callers, using the project's domain glossary vocabulary.
|
||||
@@ -10,27 +10,27 @@ This meta-workflow orchestrates the **complete development lifecycle**, from spe
|
||||
## Preparation Phase (Steps 1-5)
|
||||
|
||||
1. **Specify** (`/speckit.specify`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-specify/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-specify/SKILL.md`
|
||||
- Execute with user's feature description
|
||||
- Creates: `spec.md`
|
||||
|
||||
2. **Clarify** (`/speckit.clarify`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-clarify/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-clarify/SKILL.md`
|
||||
- Execute to resolve ambiguities
|
||||
- Updates: `spec.md`
|
||||
|
||||
3. **Plan** (`/speckit.plan`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-plan/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-plan/SKILL.md`
|
||||
- Execute to create technical design
|
||||
- Creates: `plan.md`
|
||||
|
||||
4. **Tasks** (`/speckit.tasks`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-tasks/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-tasks/SKILL.md`
|
||||
- Execute to generate task breakdown
|
||||
- Creates: `tasks.md`
|
||||
|
||||
5. **Analyze** (`/speckit.analyze`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-analyze/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-analyze/SKILL.md`
|
||||
- Execute to validate consistency across spec, plan, and tasks
|
||||
- Output: Analysis report
|
||||
- **Gate**: If critical issues found, stop and fix before proceeding
|
||||
@@ -38,29 +38,29 @@ This meta-workflow orchestrates the **complete development lifecycle**, from spe
|
||||
## Implementation Phase (Steps 6-7)
|
||||
|
||||
6. **Implement** (`/speckit.implement`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-implement/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-implement/SKILL.md`
|
||||
- Execute all tasks from `tasks.md` with anti-regression protocols
|
||||
- Output: Working implementation
|
||||
|
||||
7. **Check** (`/speckit.checker`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-checker/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-checker/SKILL.md`
|
||||
- Run static analysis (linters, type checkers, security scanners)
|
||||
- Output: Checker report
|
||||
|
||||
## Verification Phase (Steps 8-10)
|
||||
|
||||
8. **Test** (`/speckit.tester`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-tester/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-tester/SKILL.md`
|
||||
- Run tests with coverage
|
||||
- Output: Test + coverage report
|
||||
|
||||
9. **Review** (`/speckit.reviewer`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-reviewer/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-reviewer/SKILL.md`
|
||||
- Perform code review
|
||||
- Output: Review report with findings
|
||||
|
||||
10. **Validate** (`/speckit.validate`):
|
||||
- Use the `view_file` tool to read: `.agents/skills/speckit-validate/SKILL.md`
|
||||
- Use the `view_file` tool to read: `.windsurf/skills/speckit-validate/SKILL.md`
|
||||
- Verify implementation matches spec requirements
|
||||
- Output: Validation report (pass/fail)
|
||||
|
||||
|
||||
@@ -9,20 +9,20 @@ This workflow orchestrates the sequential execution of the Speckit preparation p
|
||||
|
||||
1. **Step 1: Specify (Skill 02)**
|
||||
- Goal: Create or update the `spec.md` based on user input.
|
||||
- Action: Read and execute `.agents/skills/speckit-specify/SKILL.md`.
|
||||
- Action: Read and execute `.windsurf/skills/speckit-specify/SKILL.md`.
|
||||
|
||||
2. **Step 2: Clarify (Skill 03)**
|
||||
- Goal: Refine the `spec.md` by identifying and resolving ambiguities.
|
||||
- Action: Read and execute `.agents/skills/speckit-clarify/SKILL.md`.
|
||||
- Action: Read and execute `.windsurf/skills/speckit-clarify/SKILL.md`.
|
||||
|
||||
3. **Step 3: Plan (Skill 04)**
|
||||
- Goal: Generate `plan.md` from the finalized spec.
|
||||
- Action: Read and execute `.agents/skills/speckit-plan/SKILL.md`.
|
||||
- Action: Read and execute `.windsurf/skills/speckit-plan/SKILL.md`.
|
||||
|
||||
4. **Step 4: Tasks (Skill 05)**
|
||||
- Goal: Generate actionable `tasks.md` from the plan.
|
||||
- Action: Read and execute `.agents/skills/speckit-tasks/SKILL.md`.
|
||||
- Action: Read and execute `.windsurf/skills/speckit-tasks/SKILL.md`.
|
||||
|
||||
5. **Step 5: Analyze (Skill 06)**
|
||||
- Goal: Validate consistency across all design artifacts (spec, plan, tasks).
|
||||
- Action: Read and execute `.agents/skills/speckit-analyze/SKILL.md`.
|
||||
- Action: Read and execute `.windsurf/skills/speckit-analyze/SKILL.md`.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Create or update the project constitution from interactive or provi
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-constitution/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-constitution/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -10,7 +10,7 @@ description: Create or update the feature specification from a natural language
|
||||
- This is typically the starting point of a new feature.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-specify/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-specify/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Identify underspecified areas in the current feature spec by asking
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-clarify/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-clarify/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
@@ -9,7 +9,7 @@ description: Execute the implementation planning workflow using the plan templat
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-plan/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-plan/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
@@ -9,7 +9,7 @@ description: Generate an actionable, dependency-ordered tasks.md for the feature
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-tasks/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-tasks/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -11,7 +11,7 @@ description: Perform a non-destructive cross-artifact consistency and quality an
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-analyze/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-analyze/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Execute the implementation plan by processing and executing all tas
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-implement/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-implement/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -11,7 +11,7 @@ description: Run static analysis tools and aggregate results.
|
||||
- The user may specify paths to check or run on entire project.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-checker/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-checker/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -11,7 +11,7 @@ description: Execute tests, measure coverage, and report results.
|
||||
- The user may specify test paths, options, or just run all tests.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-tester/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-tester/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Perform code review with actionable feedback and suggestions.
|
||||
- The user may specify files to review, "staged" for git staged changes, or "branch" for branch diff.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-reviewer/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-reviewer/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Validate that implementation matches specification requirements.
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-validate/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-validate/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
+1
-1
@@ -9,7 +9,7 @@ description: Perform a security-focused audit of the codebase against OWASP Top
|
||||
- The user may pass a scope hint: `backend`, `frontend`, `both`, or specific module paths (defaults to `both`).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-security-audit/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-security-audit/SKILL.md`
|
||||
- Also load `.agents/skills/_LCBP3-CONTEXT.md` for project-specific rules.
|
||||
|
||||
3. **Execute**:
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Disciplined diagnosis loop for hard bugs and performance regressions. Reproduce → minimise → hypothesise → instrument → fix → regression-test.
|
||||
---
|
||||
|
||||
# Workflow: diagnose
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user has provided a bug report or performance regression. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/diagnose/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Execute phases: Build feedback loop → Reproduce → Hypothesise → Instrument → Fix → Regression test → Cleanup
|
||||
|
||||
4. **On Error**:
|
||||
- If cannot build a feedback loop: Ask user for environment access, captured artifacts, or production instrumentation
|
||||
- If no correct seam for regression test: Flag architectural issues to improve-codebase-architecture
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Grilling session that challenges your plan against the existing domain model, sharpens terminology, and updates documentation (CONTEXT.md, ADRs) inline as decisions crystallise.
|
||||
---
|
||||
|
||||
# Workflow: grill-with-docs
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user has provided a plan to stress-test. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/grill-with-docs/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Ask questions one at a time, waiting for feedback before continuing.
|
||||
- Update CONTEXT.md and ADRs inline as terms are resolved.
|
||||
|
||||
4. **On Error**:
|
||||
- If CONTEXT.md or docs/adr/ don't exist: Create them lazily when first term is resolved
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Test-driven development with red-green-refactor loop. Use when user wants to build features or fix bugs using TDD, mentions red-green-refactor, wants integration tests, or asks for test-first development.
|
||||
---
|
||||
|
||||
# Workflow: tdd
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user wants to build features or fix bugs using TDD. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/tdd/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Plan → Tracer Bullet → Incremental Loop → Refactor
|
||||
- One test at a time, only enough code to pass current test.
|
||||
|
||||
4. **On Error**:
|
||||
- If interface changes are needed: Confirm with user first
|
||||
- If unsure which behaviors to test: Ask user to prioritize
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Break a plan, spec, or PRD into independently-grabbable issues on the project issue tracker using tracer-bullet vertical slices.
|
||||
---
|
||||
|
||||
# Workflow: to-issues
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user wants to convert a plan into issues. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/to-issues/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Draft vertical slices (HITL or AFK) → Quiz user → Publish to issue tracker.
|
||||
- Each slice delivers a narrow but complete path through every layer.
|
||||
|
||||
4. **On Error**:
|
||||
- If issue tracker not configured: Run `/setup-matt-pocock-skills` first
|
||||
- If user wants different granularity: Iterate on slice sizes
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Turn the current conversation context into a PRD and publish it to the project issue tracker.
|
||||
---
|
||||
|
||||
# Workflow: to-prd
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user wants to create a PRD from the current context. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/to-prd/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Synthesize current conversation and codebase understanding into a PRD.
|
||||
- Do NOT interview the user - use existing knowledge.
|
||||
|
||||
4. **On Error**:
|
||||
- If issue tracker not configured: Run `/setup-matt-pocock-skills` first
|
||||
- If user expectations unclear: Confirm major modules with user
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Triage issues through a state machine driven by triage roles. Use when user wants to create an issue, triage issues, review incoming bugs or feature requests, prepare issues for an AFK agent, or manage issue workflow.
|
||||
---
|
||||
|
||||
# Workflow: triage
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user wants to triage issues. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/triage/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Show what needs attention OR Triage specific issue OR Quick state override.
|
||||
- Apply triage roles: bug/enhancement + needs-triage/needs-info/ready-for-agent/ready-for-human/wontfix.
|
||||
|
||||
4. **On Error**:
|
||||
- If issue tracker not configured: Run `/setup-matt-pocock-skills` first
|
||||
- If state roles conflict: Flag and ask maintainer before proceeding
|
||||
- If no reproduction possible: Mark as `needs-info`
|
||||
@@ -9,7 +9,7 @@ description: Generate a custom checklist for the current feature based on user r
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-checklist/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-checklist/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Compare two versions of a spec or plan to highlight changes.
|
||||
- The user has provided an input prompt (optional file paths or version references).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-diff/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-diff/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Migrate existing projects into the speckit structure by generating
|
||||
- The user has provided an input prompt (path to analyze, feature name).
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-migrate/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-migrate/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Challenge the specification with Socratic questioning to identify l
|
||||
- The user has provided an input prompt. Treat this as the primary input for the skill.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-quizme/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-quizme/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -11,7 +11,7 @@ description: Display a dashboard showing feature status, completion percentage,
|
||||
- The user may optionally specify a feature to focus on.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-status/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-status/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
|
||||
@@ -9,7 +9,7 @@ description: Convert existing tasks into actionable, dependency-ordered issues o
|
||||
- The user may pass filters (e.g., phase, priority). Default: convert all pending tasks.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.agents/skills/speckit-taskstoissues/SKILL.md`
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/speckit-taskstoissues/SKILL.md`
|
||||
- Also load `.agents/skills/_LCBP3-CONTEXT.md` for project conventions (labels, commit format).
|
||||
|
||||
3. **Execute**:
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
auto_execution_mode: 0
|
||||
description: Tell the agent to zoom out and give broader context or a higher-level perspective. Use when you're unfamiliar with a section of code or need to understand how it fits into the bigger picture.
|
||||
---
|
||||
|
||||
# Workflow: zoom-out
|
||||
|
||||
1. **Context Analysis**:
|
||||
- The user is unfamiliar with a section of code or needs broader context. Treat this as the primary input.
|
||||
|
||||
2. **Load Skill**:
|
||||
- Use the `view_file` tool to read the skill file at: `.windsurf/skills/zoom-out/SKILL.md`
|
||||
|
||||
3. **Execute**:
|
||||
- Follow the instructions in the `SKILL.md` exactly.
|
||||
- Go up a layer of abstraction and give a map of all relevant modules and callers.
|
||||
- Use the project's domain glossary vocabulary.
|
||||
|
||||
4. **On Error**:
|
||||
- No specific error handling - this is an exploratory skill.
|
||||
+12
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@playwright/mcp@latest"]
|
||||
},
|
||||
"memory": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-memory"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -109,13 +109,13 @@ Best practice — follow when possible:
|
||||
|
||||
Spec priority: **`06-Decision-Records`** > **`05-Engineering-Guidelines`** > others
|
||||
|
||||
| Document | Path | Status | Use When |
|
||||
| ---------------------------- | -------------------------------------------------------------------- | -------- | -------------------------------------- |
|
||||
| **Glossary** | `specs/00-overview/00-02-glossary.md` | — | Verify domain terminology |
|
||||
| **Schema Tables** | `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql` | — | Before writing any query |
|
||||
| **Data Dictionary** | `specs/03-Data-and-Storage/03-01-data-dictionary.md` | — | Field meanings + business rules |
|
||||
| **RBAC Matrix** | `specs/01-requirements/01-02-business-rules/01-02-01-rbac-matrix.md` | — | Permission levels + roles |
|
||||
| **Edge Cases** | `specs/01-Requirements/01-06-edge-cases-and-rules.md` | — | Prevent bugs in flows |
|
||||
| Document | Path | Status | Use When |
|
||||
| ---------------------------- | -------------------------------------------------------------------- | --------- | -------------------------------------- |
|
||||
| **Glossary** | `specs/00-overview/00-02-glossary.md` | — | Verify domain terminology |
|
||||
| **Schema Tables** | `specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql` | — | Before writing any query |
|
||||
| **Data Dictionary** | `specs/03-Data-and-Storage/03-01-data-dictionary.md` | — | Field meanings + business rules |
|
||||
| **RBAC Matrix** | `specs/01-requirements/01-02-business-rules/01-02-01-rbac-matrix.md` | — | Permission levels + roles |
|
||||
| **Edge Cases** | `specs/01-Requirements/01-06-edge-cases-and-rules.md` | — | Prevent bugs in flows |
|
||||
| **ADR-001 Workflow Engine** | `specs/06-Decision-Records/ADR-001-unified-workflow-engine.md` | ✅ Active | DSL-based workflow implementation |
|
||||
| **ADR-002 Doc Numbering** | `specs/06-Decision-Records/ADR-002-document-numbering-strategy.md` | ✅ Active | Document number generation + locking |
|
||||
| **ADR-007 Error Handling** | `specs/06-Decision-Records/ADR-007-error-handling-strategy.md` | ✅ Active | Error patterns & recovery |
|
||||
@@ -126,14 +126,69 @@ Spec priority: **`06-Decision-Records`** > **`05-Engineering-Guidelines`** > oth
|
||||
| **ADR-019 UUID** | `specs/06-Decision-Records/ADR-019-hybrid-identifier-strategy.md` | ✅ Active | UUID-related work |
|
||||
| **ADR-020 AI Integration** | `specs/06-Decision-Records/ADR-020-ai-intelligence-integration.md` | ✅ Active | AI architecture patterns |
|
||||
| **ADR-021 Workflow Context** | `specs/06-Decision-Records/ADR-021-workflow-context.md` | ✅ Active | Integrated workflow & step attachments |
|
||||
| **Backend Guidelines** | `specs/05-Engineering-Guidelines/05-02-backend-guidelines.md` | — | NestJS patterns |
|
||||
| **Frontend Guidelines** | `specs/05-Engineering-Guidelines/05-03-frontend-guidelines.md` | — | Next.js patterns |
|
||||
| **Testing Strategy** | `specs/05-Engineering-Guidelines/05-04-testing-strategy.md` | — | Coverage goals |
|
||||
| **Git Conventions** | `specs/05-Engineering-Guidelines/05-05-git-conventions.md` | — | Commit/branch naming |
|
||||
| **Code Snippets** | `specs/05-Engineering-Guidelines/05-06-code-snippets.md` | — | Reusable patterns |
|
||||
| **i18n Guidelines** | `specs/05-Engineering-Guidelines/05-08-i18n-guidelines.md` | — | Localization rules |
|
||||
| **Release Policy** | `specs/04-Infrastructure-OPS/04-08-release-management-policy.md` | — | Before deploy/hotfix |
|
||||
| **UAT Criteria** | `specs/01-Requirements/01-05-acceptance-criteria.md` | — | Feature completeness |
|
||||
| **Backend Guidelines** | `specs/05-Engineering-Guidelines/05-02-backend-guidelines.md` | — | NestJS patterns |
|
||||
| **Frontend Guidelines** | `specs/05-Engineering-Guidelines/05-03-frontend-guidelines.md` | — | Next.js patterns |
|
||||
| **Testing Strategy** | `specs/05-Engineering-Guidelines/05-04-testing-strategy.md` | — | Coverage goals |
|
||||
| **Git Conventions** | `specs/05-Engineering-Guidelines/05-05-git-conventions.md` | — | Commit/branch naming |
|
||||
| **Code Snippets** | `specs/05-Engineering-Guidelines/05-06-code-snippets.md` | — | Reusable patterns |
|
||||
| **i18n Guidelines** | `specs/05-Engineering-Guidelines/05-08-i18n-guidelines.md` | — | Localization rules |
|
||||
| **Release Policy** | `specs/04-Infrastructure-OPS/04-08-release-management-policy.md` | — | Before deploy/hotfix |
|
||||
| **UAT Criteria** | `specs/01-Requirements/01-05-acceptance-criteria.md` | — | Feature completeness |
|
||||
|
||||
---
|
||||
|
||||
## 📁 Specs Folder Organization
|
||||
|
||||
โครงสร้างโฟลเดอร์ `specs/` แบ่งเป็น 2 ส่วนหลัก:
|
||||
|
||||
### 1. Core Specs (Permanent - ไม่เปลี่ยนชื่อ)
|
||||
|
||||
โฟลเดอร์เหล่านี้เป็น Source of Truth ถาวรของระบบ:
|
||||
|
||||
- `00-overview/` - ภาพรวมระบบ + Product Vision + KPI + Training
|
||||
- `01-requirements/` - Business Requirements & Modularity
|
||||
- `02-architecture/` - สถาปัตยกรรมระบบ (System & Network)
|
||||
- `03-Data-and-Storage/` - โครงสร้างฐานข้อมูลและการจัดการไฟล์
|
||||
- `04-Infrastructure-OPS/` - โครงสร้างพื้นฐานและการปฏิบัติการ
|
||||
- `05-Engineering-Guidelines/` - มาตรฐานการพัฒนาและการเขียนโค้ด
|
||||
- `06-Decision-Records/` - Architecture Decision Records (ADRs)
|
||||
- `08-Tasks/` - Task documents
|
||||
- `88-logs/` - Logs
|
||||
- `99-archives/` - ประวัติการทำงานและ Tasks เก่า
|
||||
|
||||
### 2. Feature Work (Categorized - ใช้สำหรับงาน Implement)
|
||||
|
||||
โฟลเดอร์เหล่านี้ใช้เก็บ plan.md, spec.md, tasks.md สำหรับงานที่กำลังดำเนินการ:
|
||||
|
||||
- `100-Infrastructures/` - งานที่เกี่ยวกับ Infrastructure (Deployment, Monitoring, Docker Compose, Network)
|
||||
- `200-fullstacks/` - งาน Fullstack Development (Backend + Frontend features, Workflow Engine, API)
|
||||
- `300-others/` - งานอื่นๆ (Documentation, Research, Non-code tasks)
|
||||
|
||||
### การตั้งชื่อโฟลเดอร์ Feature Work
|
||||
|
||||
ใช้รูปแบบ: `nXX-feature-name`
|
||||
|
||||
- **n** = หลักร้อยของหมวดหมู่ (1, 2, 3)
|
||||
- **XX** = เลขลำดับงาน (01, 02, 03, ...)
|
||||
- **feature-name** = ชื่องาน (kebab-case)
|
||||
|
||||
ตัวอย่าง:
|
||||
|
||||
- `100-Infrastructures/102-infra-ops` - Infrastructure Operations
|
||||
- `200-fullstacks/201-transmittals-circulation` - Transmittals + Circulation Integration
|
||||
- `200-fullstacks/203-unified-workflow-engine` - Unified Workflow Engine
|
||||
|
||||
### กฎสำคัญ
|
||||
|
||||
- **เมื่อสร้าง feature spec ใหม่** → วางไว้ในหมวดหมู่ที่เหมาะสม (100/200/300)
|
||||
- **ใช้เลขลำดับต่อจากงานล่าสุด** ในหมวดหมู่เดียวกัน
|
||||
- **อ่าน README.md** ในแต่ละหมวดหมู่ก่อนเริ่มงาน
|
||||
|
||||
ดูรายละเอียดเพิ่มเติมใน:
|
||||
|
||||
- `specs/100-Infrastructures/README.md`
|
||||
- `specs/200-fullstacks/README.md`
|
||||
- `specs/300-others/README.md`
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
# **Workflow DSL Specification v1.0**
|
||||
|
||||
เอกสารนี้ระบุโครงสร้างภาษา (Domain-Specific Language) สำหรับกำหนด Business Logic ของการเดินเอกสารในระบบ LCBP3-DMS
|
||||
|
||||
## **1\. โครงสร้างหลัก (Root Structure)**
|
||||
|
||||
ไฟล์ Definition ต้องอยู่ในรูปแบบ YAML หรือ JSON โดยมีโครงสร้างดังนี้:
|
||||
|
||||
```json
|
||||
workflow: "RFA_FLOW" # รหัส Workflow (Unique)
|
||||
version: 1 # เวอร์ชันของ Logic
|
||||
description: "RFA Approval Process" # คำอธิบาย
|
||||
|
||||
# รายการสถานะทั้งหมดที่เป็นไปได้
|
||||
states:
|
||||
- name: "DRAFT" # ชื่อสถานะ (Case-sensitive)
|
||||
initial: true # เป็นสถานะเริ่มต้น (ต้องมี 1 สถานะ)
|
||||
on: # รายการ Action ที่ทำได้จากสถานะนี้
|
||||
SUBMIT: # ชื่อ Action (ปุ่มที่ User กด)
|
||||
to: "IN_REVIEW" # สถานะปลายทาง
|
||||
require: # (Optional) เงื่อนไขสิทธิ์
|
||||
role: "EDITOR"
|
||||
events: # (Optional) เหตุการณ์ที่จะเกิดขึ้นเมื่อเปลี่ยนสถานะ
|
||||
- type: "notify"
|
||||
target: "reviewer"
|
||||
|
||||
- name: "IN_REVIEW"
|
||||
on:
|
||||
APPROVE:
|
||||
to: "APPROVED"
|
||||
condition: "context.amount < 1000000" # (Optional) JS Expression
|
||||
REJECT:
|
||||
to: "DRAFT"
|
||||
events:
|
||||
- type: "notify"
|
||||
target: "creator"
|
||||
|
||||
- name: "APPROVED"
|
||||
terminal: true # เป็นสถานะจบ (ไม่สามารถไปต่อได้)
|
||||
```
|
||||
|
||||
## **2. รายละเอียด Field (Field Definitions)**
|
||||
|
||||
### **2.1 State Object**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| :------- | :------ | :------- | :--------------------------------------------- |
|
||||
| name | string | Yes | ชื่อสถานะ (Unique Key) |
|
||||
| initial | boolean | No | ระบุว่าเป็นจุดเริ่มต้น (ต้องมี 1 state ในระบบ) |
|
||||
| terminal | boolean | No | ระบุว่าเป็นจุดสิ้นสุด |
|
||||
| on | object | No | Map ของ Action -> Transition Rule |
|
||||
|
||||
### **2.2 Transition Rule Object**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| :-------- | :----- | :------- | :-------------------------------------- |
|
||||
| to | string | Yes | ชื่อสถานะปลายทาง |
|
||||
| require | object | No | เงื่อนไข Role/User |
|
||||
| condition | string | No | JavaScript Expression (return boolean) |
|
||||
| events | array | No | Side-effects ที่จะทำงานหลังเปลี่ยนสถานะ |
|
||||
|
||||
### **2.3 Requirements Object**
|
||||
|
||||
| Field | Type | Description |
|
||||
| :---- | :----- | :------------------------------------------ |
|
||||
| role | string | User ต้องมี Role นี้ (เช่น PROJECT_MANAGER) |
|
||||
| user | string | User ต้องมี ID นี้ (Hard-code) |
|
||||
|
||||
### **2.4 Event Object**
|
||||
|
||||
| Field | Type | Description |
|
||||
| :------- | :----- | :----------------------------------------- |
|
||||
| type | string | notify, webhook, update_status |
|
||||
| target | string | ผู้รับ (เช่น creator, assignee, หรือ Role) |
|
||||
| template | string | รหัส Template ข้อความ |
|
||||
|
||||
## **3\. ตัวอย่างการใช้งานจริง (Real-world Examples)**
|
||||
|
||||
### **ตัวอย่าง: RFA Approval Flow**
|
||||
|
||||
```json
|
||||
{
|
||||
"workflow": "RFA_STD",
|
||||
"version": 1,
|
||||
"states": [
|
||||
{
|
||||
"name": "DRAFT",
|
||||
"initial": true,
|
||||
"on": {
|
||||
"SUBMIT": {
|
||||
"to": "CONSULTANT_REVIEW",
|
||||
"require": { "role": "CONTRACTOR" }
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "CONSULTANT_REVIEW",
|
||||
"on": {
|
||||
"APPROVE_1": {
|
||||
"to": "OWNER_REVIEW",
|
||||
"condition": "context.priority === 'HIGH'"
|
||||
},
|
||||
"APPROVE_2": {
|
||||
"to": "APPROVED",
|
||||
"condition": "context.priority === 'NORMAL'"
|
||||
},
|
||||
"REJECT": {
|
||||
"to": "DRAFT"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "OWNER_REVIEW",
|
||||
"on": {
|
||||
"APPROVE": { "to": "APPROVED" },
|
||||
"REJECT": { "to": "CONSULTANT_REVIEW" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "APPROVED",
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,113 @@
|
||||
## การใช้งาน Playwright ใน LCBP3-DMS
|
||||
|
||||
จากการตรวจสอบ spec มี test strategy ที่รองรับ Playwright:
|
||||
|
||||
### 1. **Test Strategy** (จาก [05-04-testing-strategy.md](cci:7://file:///e:/np-dms/lcbp3/specs/05-Engineering-Guidelines/05-04-testing-strategy.md:0:0-0:0))
|
||||
|
||||
**Stack**:
|
||||
- **Backend**: Jest (Unit + Integration + E2E)
|
||||
- **Frontend**: Vitest (Unit) + **Playwright** (E2E)
|
||||
|
||||
### 2. **Setup Playwright**
|
||||
|
||||
```bash
|
||||
# ติดตั้ง Playwright browsers
|
||||
npx playwright install chromium
|
||||
|
||||
# หรือติดตั้งทั้งหมด
|
||||
npx playwright install
|
||||
```
|
||||
|
||||
### 3. **MCP Server สำหรับ Windsurf**
|
||||
|
||||
เพิ่มใน [.windsurfrc](cci:7://file:///e:/np-dms/lcbp3/.windsurfrc:0:0-0:0):
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@playwright/mcp@latest"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Restart Windsurf** แล้วจะเห็น Playwright MCP panel
|
||||
|
||||
### 4. **การใช้งานผ่าน Windsurf Cascade**
|
||||
|
||||
เมื่อ MCP พร้อมแล้ว สามารถใช้คำสั่ง:
|
||||
|
||||
- **Open browser**: `Navigate to http://localhost:3000`
|
||||
- **Take screenshot**: `Take a screenshot of the current page`
|
||||
- **Click element**: `Click the "Submit" button`
|
||||
- **Fill form**: `Type "test@example.com" into the email field`
|
||||
- **Verify**: `Check if "Success" message is visible`
|
||||
|
||||
### 5. **E2E Test Script Example**
|
||||
|
||||
สร้างไฟล์ `e2e/workflow-adr021.spec.ts`:
|
||||
|
||||
```typescript
|
||||
import { test, expect } from '@playwright/test';
|
||||
|
||||
test('ADR-021 workflow transition with attachment', async ({ page }) => {
|
||||
// 1. Login
|
||||
await page.goto('http://localhost:3000/login');
|
||||
await page.fill('[name="email"]', 'test@example.com');
|
||||
await page.fill('[name="password"]', 'password');
|
||||
await page.click('button[type="submit"]');
|
||||
|
||||
// 2. Navigate to RFA detail
|
||||
await page.goto('http://localhost:3000/rfas/test-uuid');
|
||||
|
||||
// 3. Verify IntegratedBanner
|
||||
await expect(page.locator('[data-testid="integrated-banner"]')).toBeVisible();
|
||||
await expect(page.locator('[data-testid="priority-badge"]')).toHaveText('HIGH');
|
||||
|
||||
// 4. Upload attachment
|
||||
await page.setInputFiles('[data-testid="file-dropzone"]', 'test.pdf');
|
||||
|
||||
// 5. Submit approval
|
||||
await page.click('[data-testid="approve-button"]');
|
||||
|
||||
// 6. Verify success
|
||||
await expect(page.locator('[data-testid="success-toast"]')).toBeVisible();
|
||||
});
|
||||
```
|
||||
|
||||
### 6. **Run E2E Tests**
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
# Run all E2E tests
|
||||
npx playwright test
|
||||
|
||||
# Run specific test
|
||||
npx playwright test e2e/workflow-adr021.spec.ts
|
||||
|
||||
# Run with UI mode (debug)
|
||||
npx playwright test --ui
|
||||
|
||||
# Run headed (see browser)
|
||||
npx playwright test --headed
|
||||
```
|
||||
|
||||
### 7. **Generate Test Report**
|
||||
|
||||
```bash
|
||||
npx playwright show-report
|
||||
```
|
||||
|
||||
### 8. **ถ้าใช้ MCP ผ่าน Windsurf**
|
||||
|
||||
Cascade จะมี tool ให้ใช้:
|
||||
- `browser_navigate` - เปิด URL
|
||||
- `browser_click` - คลิก element
|
||||
- `browser_type` - พิมพ์ข้อความ
|
||||
- `browser_take_screenshot` - ถ่าย screenshot
|
||||
- `browser_evaluate` - รัน JavaScript
|
||||
|
||||
ต้องการให้ช่วย setup หรือสร้าง E2E test สำหรับ feature ใด feature หนึ่งไหมครับ?
|
||||
@@ -82,6 +82,7 @@ const eslintConfig = [
|
||||
'*.config.js',
|
||||
'*.config.mjs',
|
||||
'*.config.ts',
|
||||
'public/monaco-vs/**',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
@@ -77,7 +77,7 @@
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
},
|
||||
"[markdown]": {
|
||||
"editor.defaultFormatter": "yzhang.markdown-all-in-one",
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.wordWrap": "on",
|
||||
},
|
||||
"[yaml]": {
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
# 100-Infrastructures
|
||||
|
||||
โฟลเดอร์นี้ใช้เก็บงานที่เกี่ยวกับ **Infrastructure** ของระบบ NAP-DMS
|
||||
|
||||
## ขอบเขตงาน (Scope)
|
||||
|
||||
งานที่ควรอยู่ในโฟลเดอร์นี้ ได้แก่:
|
||||
|
||||
- **Infrastructure Operations** - การดำเนินงานพื้นฐาน (Deployment, Monitoring, Backup/Recovery)
|
||||
- **Docker Compose** - การจัดการ Container stacks บน QNAP/ASUSTOR
|
||||
- **Network Design** - การออกแบบ Network Segmentation, VLAN
|
||||
- **Security Hardening** - การเสริมความปลอดภัยของ Infrastructure
|
||||
- **CI/CD** - การตั้งค่า Gitea Actions, Deployment pipelines
|
||||
- **Maintenance Procedures** - ขั้นตอนการดูแลรักษาระบบเบื้องต้น
|
||||
|
||||
## ตัวอย่างงานที่อยู่ในโฟลเดอร์นี้
|
||||
|
||||
- `102-infra-ops` - Infrastructure Operations & Deployment Automation
|
||||
|
||||
## การตั้งชื่อโฟลเดอร์
|
||||
|
||||
ใช้รูปแบบ: `1XX-feature-name`
|
||||
|
||||
- **1** = หลักร้อยของหมวดหมู่ (100-Infrastructures)
|
||||
- **XX** = เลขลำดับงาน (01, 02, 03, ...)
|
||||
- **feature-name** = ชื่องาน (kebab-case)
|
||||
|
||||
ตัวอย่าง:
|
||||
- `101-network-segmentation`
|
||||
- `102-infra-ops`
|
||||
- `103-security-hardening`
|
||||
|
||||
## โครงสร้างไฟล์ในแต่ละงาน
|
||||
|
||||
แต่ละโฟลเดอร์งานควรมีไฟล์ต่อไปนี้ (ถ้าเกี่ยวข้อง):
|
||||
|
||||
```
|
||||
1XX-feature-name/
|
||||
├── spec.md # คำอธิบายงานโดยละเอียด
|
||||
├── plan.md # แผนการดำเนินงาน
|
||||
├── tasks.md # รายการงานย่อย
|
||||
├── quickstart.md # คู่มือเริ่มต้น (ถ้ามี)
|
||||
├── research.md # การวิจัย/ศึกษา (ถ้ามี)
|
||||
├── data-model.md # โครงสร้างข้อมูล (ถ้ามี)
|
||||
├── checklists/ # Checklist ตรวจสอบ (ถ้ามี)
|
||||
└── contracts/ # สัญญา/ข้อตกลง (ถ้ามี)
|
||||
```
|
||||
|
||||
## การเชื่อมโยงกับ Core Specs
|
||||
|
||||
งานในโฟลเดอร์นี้ควรอ้างอิง Core Specs ที่เกี่ยวข้อง:
|
||||
|
||||
- `04-Infrastructure-OPS/` - Infrastructure & Operations Guide
|
||||
- `02-Architecture/` - System & Network Architecture
|
||||
- `06-Decision-Records/` - ADRs ที่เกี่ยวข้อง (ADR-010, ADR-015, ADR-016)
|
||||
@@ -0,0 +1,192 @@
|
||||
# Implementation Plan: ADR-021 Integrated Workflow Context & Step-specific Attachments
|
||||
|
||||
**Branch**: `200-fullstacks/202-adr-021-integrated-workflow-context` | **Date**: 2026-04-12 | **Spec**: [spec.md](./spec.md)
|
||||
**Location**: `specs/200-fullstacks/202-adr-021-integrated-workflow-context/`
|
||||
**Input**: Feature specification from ADR-021
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
ปรับปรุง Workflow Engine ให้รองรับ (1) **Integrated Banner** ที่ยุบรวม Metadata + Status + Actions ไว้ด้วยกัน (2) **Vertical Timeline Lifecycle** พร้อม Active Step Highlighting และ (3) **Step-specific Attachments** ที่เชื่อมโยงไฟล์แนบกับ `workflow_history` ของแต่ละขั้นตอนโดยตรง
|
||||
|
||||
แนวทางเทคนิค: ขยาย `workflow_histories` ด้วย FK ใน `attachments` (Nullable) + ขยาย `WorkflowTransitionDto` รับ `attachmentPublicIds` (pre-uploaded UUIDv7 list) + สร้าง Frontend components ใหม่ 4 ชิ้น
|
||||
|
||||
---
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: TypeScript 5.x (strict mode), Node.js 20+
|
||||
**Primary Dependencies**:
|
||||
- Backend: NestJS 10, TypeORM 0.3, MariaDB 10.6+, Redis (Redlock), BullMQ
|
||||
- Frontend: Next.js 14 (App Router), TailwindCSS 3.4, shadcn/ui, TanStack Query v5, React Hook Form + Zod
|
||||
**Storage**: MariaDB (schema via SQL delta — ADR-009), MinIO / Local FS via `StorageService`
|
||||
**Testing**: Jest (backend unit + e2e), Vitest (frontend)
|
||||
**Target Platform**: QNAP Container Station (Docker), Browser (Chrome/Edge latest)
|
||||
**Project Type**: Web application (backend/ + frontend/ monorepo)
|
||||
**Performance Goals**: (1) Workflow history + attachment join query < 200ms p95 (mitigated by Redis Cache TTL 1h); (2) `POST /instances/:id/transition` (พร้อม file) P95 ≤ 5 วินาที สำหรับ file ≤ 10MB (รวม ClamAV + Redlock + DB transaction)
|
||||
**Constraints**: No TypeORM migrations (ADR-009); UUID via `publicId` only (ADR-019); ClamAV scan mandatory (ADR-016); BullMQ for all async jobs (ADR-008)
|
||||
**Scale/Scope**: ~50 concurrent users, documents in hundreds per project
|
||||
|
||||
---
|
||||
|
||||
## Constitution Check
|
||||
|
||||
_GATE: Checked against `.windsurfrules` before Phase 0. Re-verified after Phase 1._
|
||||
|
||||
| Gate | Status | Notes |
|
||||
|------|--------|-------|
|
||||
| **🔴 UUID Pattern (ADR-019)** | ✅ PASS | All attachment references via `publicId` (UUIDv7 string). `workflow_history_id` FK value is CHAR(36) UUID from `workflow_histories.id`. No `parseInt` usage. |
|
||||
| **🔴 Schema via SQL Delta (ADR-009)** | ✅ PASS | Delta file `04-add-workflow-history-id-to-attachments.sql` — no TypeORM migration |
|
||||
| **🔴 Two-Phase Upload (ADR-016)** | ✅ PASS | Files uploaded via existing Two-Phase endpoint first; `publicId`s referenced in transition DTO |
|
||||
| **🔴 ClamAV Scan (ADR-016)** | ✅ PASS | ClamAV scan runs during Phase 1 of file upload (before transition) |
|
||||
| **🔴 CASL Guard (ADR-016)** | ✅ PASS | New `WorkflowTransitionGuard` implements 4-Level RBAC |
|
||||
| **🔴 Idempotency-Key (Security Rule #1)** | ✅ PASS | `POST /instances/:id/transition` validates `Idempotency-Key` header |
|
||||
| **🔴 BullMQ Async (ADR-008)** | ✅ PASS | Notifications dispatched via `WorkflowEventService` (existing BullMQ pattern) |
|
||||
| **🔴 No `any` types** | ✅ PASS | All new types fully typed |
|
||||
| **🟡 Thin Controller** | ✅ PASS | Controller delegates to Service; Guard handles RBAC |
|
||||
| **🟡 Test Coverage 80% business logic** | ⚠️ REQUIRED | See testing plan in Phase 3 |
|
||||
| **🔴 Redis Redlock (ADR-002)** | ✅ PASS | Redlock applied to `instanceId` during `processTransition()` — Fail-closed: Retry 3x (500ms exponential backoff) → HTTP 503 if Redis unavailable |
|
||||
| **🔴 Upload State Restriction** | ✅ PASS | Step-attachment upload permitted only in `PENDING_REVIEW`/`PENDING_APPROVAL`; Terminal states (`APPROVED`,`REJECTED`,`CLOSED`) → HTTP 409 |
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Documentation (this feature)
|
||||
|
||||
```text
|
||||
specs/feat/adr-021-integrated-workflow-context/
|
||||
├── spec.md # Feature specification
|
||||
├── plan.md # This file
|
||||
├── tasks.md # Generated by speckit-tasks
|
||||
└── checklists/ # Quality checklists
|
||||
```
|
||||
|
||||
### Source Code (impacted files)
|
||||
|
||||
```text
|
||||
# 🔴 Backend — DB & Entities
|
||||
specs/03-Data-and-Storage/deltas/
|
||||
└── 04-add-workflow-history-id-to-attachments.sql [NEW]
|
||||
|
||||
backend/src/common/file-storage/entities/
|
||||
└── attachment.entity.ts [MODIFY — add workflowHistoryId + relation]
|
||||
|
||||
backend/src/modules/workflow-engine/entities/
|
||||
└── workflow-history.entity.ts [MODIFY — add OneToMany attachments]
|
||||
|
||||
# 🔴 Backend — API & Guards
|
||||
backend/src/modules/workflow-engine/dto/
|
||||
└── workflow-transition.dto.ts [MODIFY — add attachmentPublicIds]
|
||||
|
||||
backend/src/modules/workflow-engine/guards/
|
||||
└── workflow-transition.guard.ts [NEW — 4-Level RBAC]
|
||||
|
||||
backend/src/modules/workflow-engine/
|
||||
├── workflow-engine.service.ts [MODIFY — extend processTransition()]
|
||||
├── workflow-engine.controller.ts [MODIFY — add idempotency header, guard]
|
||||
└── workflow-engine.module.ts [MODIFY — register guard]
|
||||
|
||||
# 🟡 Frontend — Types
|
||||
frontend/types/
|
||||
└── workflow.ts [MODIFY — add attachments to WorkflowHistoryStep]
|
||||
|
||||
frontend/types/dto/workflow-engine/
|
||||
└── workflow-engine.dto.ts [MODIFY — add WorkflowTransitionWithAttachmentsDto]
|
||||
|
||||
# 🟡 Frontend — New Components
|
||||
frontend/components/workflow/
|
||||
├── integrated-banner.tsx [NEW — Status + Metadata + Action bar]
|
||||
└── workflow-lifecycle.tsx [NEW — Vertical timeline with Indigo active step]
|
||||
|
||||
frontend/components/common/
|
||||
└── file-preview-modal.tsx [NEW — PDF/Image inline preview]
|
||||
|
||||
# 🟡 Frontend — New Hook
|
||||
frontend/hooks/
|
||||
└── use-workflow-action.ts [NEW — upload + transition orchestration]
|
||||
|
||||
# 🟡 Frontend — Page Refactors (use new components)
|
||||
frontend/app/(dashboard)/rfas/[uuid]/page.tsx [MODIFY — integrate IntegratedBanner + WorkflowLifecycle]
|
||||
frontend/app/(dashboard)/transmittals/[uuid]/page.tsx [MODIFY — same as RFA]
|
||||
frontend/app/(dashboard)/circulation/[uuid]/page.tsx [MODIFY — same as RFA]
|
||||
frontend/app/(dashboard)/correspondences/[uuid]/page.tsx [MODIFY — same as RFA]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Complexity Tracking
|
||||
|
||||
_No constitution violations. Architecture is additive (Nullable FK, extended DTO, new components)._
|
||||
|
||||
---
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Data Model
|
||||
|
||||
- `attachments.workflow_history_id` = `CHAR(36) NULL` FK → `workflow_histories.id`
|
||||
- `ON DELETE SET NULL` (preserve attachment records if history row deleted)
|
||||
- Composite index: `INDEX idx_att_wfhist_created (workflow_history_id, created_at)`
|
||||
- `WorkflowHistory` gains `@OneToMany(() => Attachment, a => a.workflowHistory)` — **lazy-loaded only**
|
||||
|
||||
### API Contract
|
||||
|
||||
**Extended `POST /workflow-engine/instances/:instanceId/transition`:**
|
||||
```
|
||||
Header: Idempotency-Key: <UUIDv7>
|
||||
Body: {
|
||||
action: string // existing
|
||||
comment?: string // existing
|
||||
payload?: Record // existing
|
||||
attachmentPublicIds?: string[] // NEW — UUIDv7 list of pre-uploaded attachments
|
||||
}
|
||||
```
|
||||
|
||||
**New `GET /workflow-engine/instances/:instanceId/history`:**
|
||||
```
|
||||
Response: WorkflowHistoryItem[] with nested attachments[] per step
|
||||
```
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
3 new components follow **compound pattern**:
|
||||
- `<IntegratedBanner>` — Status + Metadata + Action bar
|
||||
- `<WorkflowLifecycle>` — Vertical timeline, Indigo active step (pulse animation)
|
||||
- `<FilePreviewModal>` — PDF iframe / Image viewer
|
||||
|
||||
**`use-workflow-action` hook responsibilities:**
|
||||
1. Validate `Idempotency-Key` (generate UUIDv7 once per action intent)
|
||||
2. Guard: Check `currentState ∈ {PENDING_REVIEW, PENDING_APPROVAL}` before transition
|
||||
3. Ensure all `attachmentPublicIds` are committed (not temp) before transition
|
||||
4. Call `POST /instances/:id/transition` with `Idempotency-Key` header
|
||||
5. Handle HTTP 503 → toast "ระบบยุ่ง กรุณาลองใหม่"
|
||||
6. Invalidate TanStack Query cache for the document + workflow instance
|
||||
|
||||
**Modules in scope (v1.8.6):** RFA, Transmittal, Circulation, Correspondence (4 modules)
|
||||
|
||||
---
|
||||
|
||||
## Risk Register
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
|------|-----------|--------|------------|
|
||||
| N+1 query on history + attachments join | Medium | High | Eager-load only when explicitly querying history; Redis cache TTL 1h |
|
||||
| Race condition: 2 users upload to same step simultaneously | Low | High | Redis Redlock on `instanceId` — only 1 transition allowed at a time |
|
||||
| Attachment linked to wrong history record | Low | High | `processTransition()` creates history row first, then links attachments in same transaction |
|
||||
| ClamAV timeout during upload | Low | Medium | Upload endpoint has its own timeout; transition is decoupled |
|
||||
| Frontend: stale workflow state after transition | Medium | Medium | `use-workflow-action` hook invalidates TanStack Query cache on success |
|
||||
|
||||
---
|
||||
|
||||
## Dependencies Map
|
||||
|
||||
```
|
||||
ADR-021
|
||||
├── ADR-001 (Workflow Engine DSL) — extends processTransition()
|
||||
├── ADR-002 (Redis Redlock) — existing lock pattern applied to transition
|
||||
├── ADR-016 (Security) — Two-Phase upload, ClamAV, CASL Guard
|
||||
├── ADR-019 (UUID) — publicId for all attachment references
|
||||
└── ADR-008 (BullMQ) — notification dispatch (unchanged, existing pattern)
|
||||
```
|
||||
@@ -0,0 +1,162 @@
|
||||
# Feature Specification: Integrated Workflow Context & Step-specific Attachments
|
||||
|
||||
**Feature Branch**: `200-fullstacks/202-adr-021-integrated-workflow-context`
|
||||
**Created**: 2026-05-03
|
||||
**Status**: Draft
|
||||
**Input**: ADR-021 Integrated Workflow Context & Step-specific Attachments
|
||||
**Location**: `specs/200-fullstacks/202-adr-021-integrated-workflow-context/`
|
||||
|
||||
---
|
||||
|
||||
## Clarifications
|
||||
|
||||
### Session 2026-04-12 (from ADR-021)
|
||||
|
||||
- **Q:** What are the file size and attachment count limits per workflow step? → **A:** No explicit limit (controlled by infrastructure only)
|
||||
- **Q:** What are the specific values and storage format for the "Priority" field in the Integrated Banner? → **A:** Enum "URGENT", "HIGH", "MEDIUM", "LOW" — 4-tier system with visual indicators
|
||||
- **Q:** How should the system handle virus/malware detection during step-specific file upload? → **A:** Block upload immediately, delete temp file, show error "File rejected" to user
|
||||
- **Q:** What is the cache TTL for Workflow History data to reduce join query overhead? → **A:** 1 hour — balanced cache duration for workflow history data
|
||||
- **Q:** Who is authorized to upload step-specific attachments during a workflow transition? → **A:** Only assigned handler can upload; superadmin and organization admin can upload on behalf (impersonation)
|
||||
|
||||
### Session 2026-04-19 (from ADR-021)
|
||||
|
||||
- **Q:** Which workflow states allow step-specific attachment upload? → **A:** Only Active-decision states (`PENDING_REVIEW`, `PENDING_APPROVAL`) — Terminal states (`APPROVED`, `REJECTED`, `CLOSED`) are not allowed
|
||||
- **Q:** What happens if Redis Redlock fails during transition? → **A:** Fail-closed — Retry 3 times (500ms exponential backoff) then throw HTTP 503 "Service temporarily unavailable" to preserve data integrity
|
||||
- **Q:** Which modules must support step-specific attachments in v1.8.6? → **A:** **All 4 modules** — RFA, Transmittal, Circulation, and Correspondence
|
||||
- **Q:** Performance target for Upload + Transition API? → **A:** P95 ≤ 5 seconds for files ≤10MB (ClamAV scan + Redlock + DB transaction included)
|
||||
|
||||
---
|
||||
|
||||
## User Scenarios & Testing
|
||||
|
||||
### User Story 1 - Integrated Banner (Priority: P1) 🎯 MVP
|
||||
|
||||
As a Reviewer/Approver, I want to see all critical document information (Doc No, Subject, Status, Priority) and available actions in a single header bar without scrolling or switching screens, so I can make approval decisions quickly with full context.
|
||||
|
||||
**Why this priority**: This is the core UX improvement of ADR-021. Without the Integrated Banner, users waste time scrolling and switching between document content and workflow controls.
|
||||
|
||||
**Independent Test**: The IntegratedBanner component can be rendered with mock RFA/Transmittal/Circulation/Correspondence data, verifying Priority badge colors, Status display, and Action button visibility. Buttons must be disabled when workflow is in terminal states.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** an RFA in `PENDING_APPROVAL` state with `URGENT` priority, **When** I open the detail page, **Then** I see the Doc No, Subject, red URGENT badge, status badge, and Approve/Reject/Return buttons in a sticky header
|
||||
2. **Given** a Transmittal in `APPROVED` state, **When** I view the detail page, **Then** action buttons are disabled and the status shows as completed
|
||||
3. **Given** a Correspondence in `PENDING_REVIEW` state with `MEDIUM` priority, **When** I open the detail page, **Then** the priority badge shows yellow and all workflow actions are available
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 - Workflow Lifecycle Visualization (Priority: P1) 🎯 MVP
|
||||
|
||||
As a document participant, I want to see a vertical timeline showing all workflow steps with the current step highlighted, so I understand where the document is in the approval process and what steps remain.
|
||||
|
||||
**Why this priority**: Users currently lack visibility into workflow progress. The vertical timeline provides immediate orientation and reduces confusion about approval status.
|
||||
|
||||
**Independent Test**: The WorkflowLifecycle component can be rendered with mock workflow history data containing completed, current, and pending steps. Verify current step has Indigo (#6366f1) color with pulse animation.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a 4-step RFA workflow where step 2 is current, **When** I view the Workflow tab, **Then** step 1 shows as completed (with actor/date), step 2 shows Indigo with pulse, steps 3-4 are muted/pending
|
||||
2. **Given** a Circulation workflow with comments on completed steps, **When** I view the timeline, **Then** each completed step shows the handler name, action date, and any comments
|
||||
3. **Given** a Transmittal in terminal state, **When** I view the timeline, **Then** the final step is marked complete and no pulse animation is shown
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 - Step-specific Attachments (Priority: P2)
|
||||
|
||||
As a Reviewer, I want to upload supporting documents (images, PDFs) that are specifically linked to the current workflow step, so reviewers can see exactly which evidence was provided for each approval decision.
|
||||
|
||||
**Why this priority**: Currently all attachments are mixed at the document level. Step-specific attachments provide audit trail clarity and improve compliance tracking.
|
||||
|
||||
**Independent Test**: Upload files during a workflow transition in `PENDING_REVIEW` state, then verify via API that `attachments.workflow_history_id` is set correctly. Files uploaded in terminal states must be rejected with HTTP 409.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** an RFA in `PENDING_REVIEW` state, **When** I drag-drop 2 PDF files and click Approve, **Then** the files are linked to that workflow history step and visible in the timeline
|
||||
2. **Given** a Transmittal in `APPROVED` state, **When** I attempt to upload a file, **Then** the system rejects with "Upload not allowed in terminal state" error
|
||||
3. **Given** a Circulation in `PENDING_APPROVAL` state, **When** I upload a file and the approver rejects, **Then** the attachment remains linked to that rejection step for audit purposes
|
||||
|
||||
---
|
||||
|
||||
### User Story 4 - Internal File Preview (Priority: P2)
|
||||
|
||||
As a Reviewer, I want to click on any attachment and preview it in a modal without leaving the document page, so I can review evidence while maintaining workflow context.
|
||||
|
||||
**Why this priority**: Current workflow requires downloading or opening files in new tabs, breaking user flow and reducing productivity.
|
||||
|
||||
**Independent Test**: Click on PDF and Image attachments in the workflow timeline, verify FilePreviewModal opens with correct content type rendering (iframe for PDF, img for images).
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a step with 3 attachments (2 PDFs, 1 PNG), **When** I click the first PDF, **Then** a modal opens showing the PDF in an inline viewer
|
||||
2. **Given** the preview modal is open, **When** I press Escape or click the X button, **Then** the modal closes and I remain on the document page
|
||||
3. **Given** a large PDF attachment, **When** I open the preview, **Then** the modal loads within 2 seconds with proper scroll controls
|
||||
|
||||
---
|
||||
|
||||
### User Story 5 - i18n Support (Priority: P3)
|
||||
|
||||
As a Thai or English speaking user, I want all workflow UI text to display in my selected language, so I can use the system effectively regardless of my preferred language.
|
||||
|
||||
**Why this priority**: LCBP3-DMS must support bilingual operations. All new UI components must follow i18n standards from project inception.
|
||||
|
||||
**Independent Test**: Switch language between TH and EN, verify all IntegratedBanner labels, WorkflowLifecycle step labels, and FilePreviewModal controls display correctly in each language.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** my language is set to Thai, **When** I view an RFA detail page, **Then** all workflow action buttons show Thai text (อนุมัติ, ปฏิเสธ, ส่งกลับ)
|
||||
2. **Given** my language is set to English, **When** I view the Workflow tab, **Then** step labels show English text (Review, Approval, etc.)
|
||||
3. **Given** I switch language while viewing a document, **When** the page refreshes, **Then** all ADR-021 components show the newly selected language immediately
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- What happens when a user attempts transition with concurrent upload from another user? (Redis Redlock handles serialization)
|
||||
- How does system handle ClamAV detecting malware during step upload? (Block immediately, delete temp file, show "File rejected")
|
||||
- What happens when Redis is unavailable during transition? (Retry 3x with exponential backoff, then HTTP 503 fail-closed)
|
||||
- How does system handle duplicate Idempotency-Key? (Return cached response, no re-processing)
|
||||
- What happens when attachment file is deleted from storage after linking? (Show "File unavailable" in UI, preserve metadata)
|
||||
- How does system handle unauthorized upload attempt? (CASL Guard blocks with 403 Forbidden)
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: System MUST display Integrated Banner on RFA, Transmittal, Circulation, and Correspondence detail pages showing Doc No, Subject, Status, Priority, and available actions
|
||||
- **FR-002**: Priority badge MUST support 4 levels: URGENT (red), HIGH (orange), MEDIUM (yellow), LOW (green) with visual indicators
|
||||
- **FR-003**: Action buttons (Approve/Reject/Return) MUST be disabled when workflow is in terminal states (APPROVED, REJECTED, CLOSED)
|
||||
- **FR-004**: System MUST display Workflow Lifecycle as vertical timeline with current step highlighted in Indigo (#6366f1) with pulse animation
|
||||
- **FR-005**: System MUST support drag-drop file upload linked to workflow history steps, only allowed in PENDING_REVIEW or PENDING_APPROVAL states
|
||||
- **FR-006**: Upload attempts in terminal states MUST be rejected with HTTP 409 Conflict
|
||||
- **FR-007**: System MUST enforce 4-Level RBAC for workflow transitions: Superadmin > Org Admin > Assigned Handler > Read-only
|
||||
- **FR-008**: System MUST validate Idempotency-Key header on all transition requests to prevent duplicate processing
|
||||
- **FR-009**: File uploads MUST use Two-Phase pattern (Temp → ClamAV scan → Permanent)
|
||||
- **FR-010**: System MUST provide internal File Preview Modal for PDF and Image attachments without page navigation
|
||||
- **FR-011**: All UI text MUST use i18n keys supporting Thai and English languages
|
||||
- **FR-012**: Workflow transitions MUST use optimistic locking (version_no) to prevent race conditions
|
||||
- **FR-013**: Redis Redlock MUST serialize concurrent transitions on the same workflow instance
|
||||
- **FR-014**: Attachment linking to workflow history MUST occur in same database transaction as state transition
|
||||
|
||||
### Key Entities
|
||||
|
||||
- **WorkflowInstance**: Represents a running workflow tied to a document (RFA/Transmittal/Circulation/Correspondence). Tracks current state, definition reference, and context data.
|
||||
- **WorkflowHistory**: Audit record of each workflow transition. Contains from_state, to_state, action, actor, timestamp, and (ADR-021) linked attachments.
|
||||
- **Attachment** (Extended): File entity with new `workflow_history_id` FK linking to specific workflow step. NULL value indicates main document attachment (pre-ADR-021 behavior).
|
||||
- **IntegratedBanner**: UI component combining document metadata, workflow status, priority indicator, and action controls.
|
||||
- **WorkflowLifecycle**: UI component displaying vertical timeline of all workflow steps with visual highlighting.
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: Users can approve/reject documents 40% faster due to Integrated Banner reducing screen navigation
|
||||
- **SC-002**: 100% of workflow attachments are traceable to specific approval steps via `workflow_history_id` linkage
|
||||
- **SC-003**: File preview modal loads and displays PDF/Image files within 2 seconds (P95)
|
||||
- **SC-004**: Zero duplicate workflow transitions occur due to Idempotency-Key enforcement (verified via audit logs)
|
||||
- **SC-005**: System handles 50 concurrent workflow transitions per minute without data inconsistency (optimistic lock + Redlock)
|
||||
- **SC-006**: 100% of UI text in ADR-021 components is translatable (verified by language switch testing)
|
||||
- **SC-007**: Users can complete workflow transition with file upload within 5 seconds for files ≤10MB (P95, including ClamAV scan)
|
||||
@@ -0,0 +1,129 @@
|
||||
# Tasks: ADR-021 Integrated Workflow Context & Step-specific Attachments
|
||||
|
||||
**Branch**: `200-fullstacks/202-adr-021-integrated-workflow-context` | **Spec**: [spec.md](./spec.md) | **Plan**: [plan.md](./plan.md)
|
||||
**Location**: `specs/200-fullstacks/202-adr-021-integrated-workflow-context/`
|
||||
**Input**: Comprehensive task breakdown from `specs/08-Tasks/ADR-021-workflow-context/tasks.md`
|
||||
**Version**: 1.8.6 | **Date**: 2026-05-03
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
This file provides a high-level task overview. For the **full detailed tasks** (with implementation notes, verification commands, and acceptance criteria), see:
|
||||
👉 **`specs/200-fullstacks/202-adr-021-integrated-workflow-context/tasks.md`**
|
||||
|
||||
---
|
||||
|
||||
## Phase Overview
|
||||
|
||||
| Phase | Focus | Key Deliverables | Status |
|
||||
|-------|-------|------------------|--------|
|
||||
| **Phase 1** | Setup | Branch creation, dev environment verification | ⏳ Pending |
|
||||
| **Phase 2** | Backend Foundation | SQL delta, Entity relations, Guards, Service extension | ⏳ Pending |
|
||||
| **Phase 3** | Integrated Banner (US1) | `IntegratedBanner` component, 4 module integrations | ⏳ Pending |
|
||||
| **Phase 4** | Workflow Lifecycle (US2) | `WorkflowLifecycle` component, vertical timeline | ⏳ Pending |
|
||||
| **Phase 5** | Step Attachments (US3) | `use-workflow-action` hook, DTO extension, linking logic | ⏳ Pending |
|
||||
| **Phase 6** | File Preview (US4) | `FilePreviewModal` component | ⏳ Pending |
|
||||
| **Phase 7** | i18n & Testing (US5) | i18n keys, unit tests, component tests, E2E | ⏳ Pending |
|
||||
|
||||
---
|
||||
|
||||
## Critical Path Tasks (Phase 2)
|
||||
|
||||
| # | Task | File(s) | Dependencies |
|
||||
|---|------|---------|--------------|
|
||||
| T1 | Create SQL delta — add `workflow_history_id` to `attachments` | `deltas/04-*.sql` | None |
|
||||
| T2 | Update `attachment.entity.ts` — add column + relation | `attachment.entity.ts` | T1 |
|
||||
| T3 | Update `workflow-history.entity.ts` — add `@OneToMany` | `workflow-history.entity.ts` | T1 |
|
||||
| T4 | Extend `WorkflowTransitionDto` — add `attachmentPublicIds` | `workflow-transition.dto.ts` | None |
|
||||
| T5 | Create `WorkflowTransitionGuard` (CASL 4-Level) | `guards/workflow-transition.guard.ts` | None |
|
||||
| T6 | Extend `processTransition()` — link attachments | `workflow-engine.service.ts` | T2, T3, T4 |
|
||||
| T7 | Update Controller — idempotency + guard | `workflow-engine.controller.ts` | T5, T6 |
|
||||
| T8 | Register guard in Module | `workflow-engine.module.ts` | T5 |
|
||||
|
||||
---
|
||||
|
||||
## Frontend Tasks Overview (Phases 3-6)
|
||||
|
||||
| # | Task | Component/Page | Dependencies |
|
||||
|---|------|----------------|--------------|
|
||||
| F1 | Add types — `WorkflowHistoryItem` | `types/workflow.ts` | T7 |
|
||||
| F2 | Add DTO — `WorkflowTransitionWithAttachmentsDto` | `types/dto/workflow-engine/` | T4 |
|
||||
| F3 | Create hook — `use-workflow-action.ts` | `hooks/` | F2 |
|
||||
| F4 | Create component — `IntegratedBanner` | `components/workflow/` | F1 |
|
||||
| F5 | Create component — `WorkflowLifecycle` | `components/workflow/` | F1 |
|
||||
| F6 | Create component — `FilePreviewModal` | `components/common/` | F1 |
|
||||
| F7-F10 | Integrate into 4 module pages | `rfas/`, `transmittals/`, `circulation/`, `correspondences/` | F3-F6 |
|
||||
|
||||
---
|
||||
|
||||
## Testing Tasks (Phase 7)
|
||||
|
||||
| # | Task | Target | Type |
|
||||
|---|------|--------|------|
|
||||
| G1 | Unit tests — `processTransition()` extended | `workflow-engine.service.spec.ts` | Backend |
|
||||
| G2 | Unit tests — `WorkflowTransitionGuard` | `workflow-transition.guard.spec.ts` | Backend |
|
||||
| G3 | Component tests — `IntegratedBanner` | `integrated-banner.test.tsx` | Frontend |
|
||||
| G4 | Component tests — `WorkflowLifecycle` | `workflow-lifecycle.test.tsx` | Frontend |
|
||||
| G5 | Component tests — `FilePreviewModal` | `file-preview-modal.test.tsx` | Frontend |
|
||||
| G6 | E2E test — workflow with attachment | `test/workflow-with-attachment.e2e-spec.ts` | Integration |
|
||||
|
||||
---
|
||||
|
||||
## Verification Checkpoints
|
||||
|
||||
### Backend
|
||||
```bash
|
||||
# Schema check
|
||||
grep -n "workflow_history_id" specs/03-Data-and-Storage/lcbp3-v1.8.0-schema-02-tables.sql
|
||||
|
||||
# Type check
|
||||
cd backend && pnpm tsc --noEmit
|
||||
|
||||
# Unit tests
|
||||
cd backend && pnpm test --testPathPattern=workflow-engine.service
|
||||
cd backend && pnpm test --testPathPattern=workflow-transition.guard
|
||||
|
||||
# Integration test
|
||||
curl -X POST http://localhost:3001/api/workflow-engine/instances/:id/transition \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Idempotency-Key: $(uuidgen)" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"action":"APPROVE","comment":"OK","attachmentPublicIds":["<uuid>"]}'
|
||||
```
|
||||
|
||||
### Frontend
|
||||
```bash
|
||||
# Type check
|
||||
cd frontend && pnpm tsc --noEmit
|
||||
|
||||
# Component tests
|
||||
cd frontend && pnpm test --run components/workflow/integrated-banner
|
||||
cd frontend && pnpm test --run components/workflow/workflow-lifecycle
|
||||
cd frontend && pnpm test --run components/common/file-preview-modal
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
| Document | Path | Purpose |
|
||||
|----------|------|---------|
|
||||
| **Full Tasks** | `specs/08-Tasks/ADR-021-workflow-context/tasks.md` | Complete task breakdown with 360 lines of detail |
|
||||
| **Data Model** | `specs/08-Tasks/ADR-021-workflow-context/data-model.md` | Entity definitions, SQL delta, DTO specs |
|
||||
| **Quick Start** | `specs/08-Tasks/ADR-021-workflow-context/quickstart.md` | Developer onboarding guide |
|
||||
| **Research** | `specs/08-Tasks/ADR-021-workflow-context/research.md` | Phase 0 findings and decisions |
|
||||
| **Contracts** | `specs/08-Tasks/ADR-021-workflow-context/contracts/*.yaml` | API contracts |
|
||||
|
||||
---
|
||||
|
||||
## Definition of Done (Observable Outcomes)
|
||||
|
||||
| REQ | Done When |
|
||||
|-----|-----------|
|
||||
| **REQ-01** | Banner shows Doc No, Status, Priority badge, Approve/Reject buttons on all 4 module detail pages |
|
||||
| **REQ-02** | Workflow tab displays Role + Handler + Description for every step without reload |
|
||||
| **REQ-03** | Current step shows Indigo (#6366f1) with pulse animation; other steps distinct |
|
||||
| **REQ-04** | Drag-drop works only in `PENDING_REVIEW`/`PENDING_APPROVAL`; disabled in terminal states |
|
||||
| **REQ-05** | Clicking PDF/Image opens preview modal without page navigation |
|
||||
| **REQ-06** | All UI text changes when switching EN/TH; no hardcoded strings |
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user