fix: move browser-automation and spec-driven-workflow scripts to scripts/ directory
Validator expects scripts in scripts/ subdirectory, not at skill root. Moved 6 scripts to match repo convention. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
338
engineering/spec-driven-workflow/scripts/spec_generator.py
Normal file
338
engineering/spec-driven-workflow/scripts/spec_generator.py
Normal file
@@ -0,0 +1,338 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Spec Generator - Generates a feature specification template from a name and description.
|
||||
|
||||
Produces a complete spec document with all required sections pre-filled with
|
||||
guidance prompts. Output can be markdown or structured JSON.
|
||||
|
||||
No external dependencies - uses only Python standard library.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import textwrap
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
|
||||
SPEC_TEMPLATE = """\
|
||||
# Spec: {name}
|
||||
|
||||
**Author:** [your name]
|
||||
**Date:** {date}
|
||||
**Status:** Draft
|
||||
**Reviewers:** [list reviewers]
|
||||
**Related specs:** [links to related specs, or "None"]
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
{context_prompt}
|
||||
|
||||
---
|
||||
|
||||
## Functional Requirements
|
||||
|
||||
_Use RFC 2119 keywords: MUST, MUST NOT, SHOULD, SHOULD NOT, MAY._
|
||||
_Each requirement is a single, testable statement. Number sequentially._
|
||||
|
||||
- FR-1: The system MUST [describe required behavior].
|
||||
- FR-2: The system MUST [describe another required behavior].
|
||||
- FR-3: The system SHOULD [describe recommended behavior].
|
||||
- FR-4: The system MAY [describe optional behavior].
|
||||
- FR-5: The system MUST NOT [describe prohibited behavior].
|
||||
|
||||
---
|
||||
|
||||
## Non-Functional Requirements
|
||||
|
||||
### Performance
|
||||
- NFR-P1: [Operation] MUST complete in < [threshold] (p95) under [conditions].
|
||||
- NFR-P2: [Operation] SHOULD handle [throughput] requests per second.
|
||||
|
||||
### Security
|
||||
- NFR-S1: All data in transit MUST be encrypted via TLS 1.2+.
|
||||
- NFR-S2: The system MUST rate-limit [operation] to [limit] per [period] per [scope].
|
||||
|
||||
### Accessibility
|
||||
- NFR-A1: [UI component] MUST meet WCAG 2.1 AA standards.
|
||||
- NFR-A2: Error messages MUST be announced to screen readers.
|
||||
|
||||
### Scalability
|
||||
- NFR-SC1: The system SHOULD handle [number] concurrent [entities].
|
||||
|
||||
### Reliability
|
||||
- NFR-R1: The [service] MUST maintain [percentage]% uptime.
|
||||
|
||||
---
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
_Write in Given/When/Then (Gherkin) format._
|
||||
_Each criterion MUST reference at least one FR-* or NFR-*._
|
||||
|
||||
### AC-1: [Descriptive name] (FR-1)
|
||||
Given [precondition]
|
||||
When [action]
|
||||
Then [expected result]
|
||||
And [additional assertion]
|
||||
|
||||
### AC-2: [Descriptive name] (FR-2)
|
||||
Given [precondition]
|
||||
When [action]
|
||||
Then [expected result]
|
||||
|
||||
### AC-3: [Descriptive name] (NFR-S2)
|
||||
Given [precondition]
|
||||
When [action]
|
||||
Then [expected result]
|
||||
And [additional assertion]
|
||||
|
||||
---
|
||||
|
||||
## Edge Cases
|
||||
|
||||
_For every external dependency (API, database, file system, user input), specify at least one failure scenario._
|
||||
|
||||
- EC-1: [Input/condition] -> [expected behavior].
|
||||
- EC-2: [Input/condition] -> [expected behavior].
|
||||
- EC-3: [External service] is unavailable -> [expected behavior].
|
||||
- EC-4: [Concurrent/race condition] -> [expected behavior].
|
||||
- EC-5: [Boundary value] -> [expected behavior].
|
||||
|
||||
---
|
||||
|
||||
## API Contracts
|
||||
|
||||
_Define request/response shapes using TypeScript-style notation._
|
||||
_Cover all endpoints referenced in functional requirements._
|
||||
|
||||
### [METHOD] [endpoint]
|
||||
|
||||
Request:
|
||||
```typescript
|
||||
interface [Name]Request {{
|
||||
field: string; // Description, constraints
|
||||
optional?: number; // Default: [value]
|
||||
}}
|
||||
```
|
||||
|
||||
Success Response ([status code]):
|
||||
```typescript
|
||||
interface [Name]Response {{
|
||||
id: string;
|
||||
field: string;
|
||||
createdAt: string; // ISO 8601
|
||||
}}
|
||||
```
|
||||
|
||||
Error Response ([status code]):
|
||||
```typescript
|
||||
interface [Name]Error {{
|
||||
error: "[ERROR_CODE]";
|
||||
message: string;
|
||||
}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Data Models
|
||||
|
||||
_Define all entities referenced in requirements._
|
||||
|
||||
### [Entity Name]
|
||||
| Field | Type | Constraints |
|
||||
|-------|------|-------------|
|
||||
| id | UUID | Primary key, auto-generated |
|
||||
| [field] | [type] | [constraints] |
|
||||
| createdAt | timestamp | UTC, immutable |
|
||||
| updatedAt | timestamp | UTC, auto-updated |
|
||||
|
||||
---
|
||||
|
||||
## Out of Scope
|
||||
|
||||
_Explicit exclusions prevent scope creep. If someone asks for these during implementation, point them here._
|
||||
|
||||
- OS-1: [Feature/capability] — [reason for exclusion or link to future spec].
|
||||
- OS-2: [Feature/capability] — [reason for exclusion].
|
||||
- OS-3: [Feature/capability] — deferred to [version/sprint].
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
_Track unresolved questions here. Each must be resolved before status moves to "Approved"._
|
||||
|
||||
- [ ] Q1: [Question] — Owner: [name], Due: [date]
|
||||
- [ ] Q2: [Question] — Owner: [name], Due: [date]
|
||||
"""
|
||||
|
||||
|
||||
def generate_context_prompt(description: str) -> str:
|
||||
"""Generate a context section prompt based on the provided description."""
|
||||
if description:
|
||||
return textwrap.dedent(f"""\
|
||||
{description}
|
||||
|
||||
_Expand this context section to include:_
|
||||
_- Why does this feature exist? What problem does it solve?_
|
||||
_- What is the business motivation? (link to user research, support tickets, metrics)_
|
||||
_- What is the current state? (what exists today, what pain points exist)_
|
||||
_- 2-4 paragraphs maximum._""")
|
||||
return textwrap.dedent("""\
|
||||
_Why does this feature exist? What problem does it solve? What is the business
|
||||
motivation? Include links to user research, support tickets, or metrics that
|
||||
justify this work. 2-4 paragraphs maximum._""")
|
||||
|
||||
|
||||
def generate_spec(name: str, description: str) -> str:
|
||||
"""Generate a spec document from name and description."""
|
||||
context_prompt = generate_context_prompt(description)
|
||||
return SPEC_TEMPLATE.format(
|
||||
name=name,
|
||||
date=date.today().isoformat(),
|
||||
context_prompt=context_prompt,
|
||||
)
|
||||
|
||||
|
||||
def generate_spec_json(name: str, description: str) -> Dict[str, Any]:
|
||||
"""Generate structured JSON representation of the spec template."""
|
||||
return {
|
||||
"spec": {
|
||||
"title": f"Spec: {name}",
|
||||
"metadata": {
|
||||
"author": "[your name]",
|
||||
"date": date.today().isoformat(),
|
||||
"status": "Draft",
|
||||
"reviewers": [],
|
||||
"related_specs": [],
|
||||
},
|
||||
"context": description or "[Describe why this feature exists]",
|
||||
"functional_requirements": [
|
||||
{"id": "FR-1", "keyword": "MUST", "description": "[describe required behavior]"},
|
||||
{"id": "FR-2", "keyword": "MUST", "description": "[describe another required behavior]"},
|
||||
{"id": "FR-3", "keyword": "SHOULD", "description": "[describe recommended behavior]"},
|
||||
{"id": "FR-4", "keyword": "MAY", "description": "[describe optional behavior]"},
|
||||
{"id": "FR-5", "keyword": "MUST NOT", "description": "[describe prohibited behavior]"},
|
||||
],
|
||||
"non_functional_requirements": {
|
||||
"performance": [
|
||||
{"id": "NFR-P1", "description": "[operation] MUST complete in < [threshold]"},
|
||||
],
|
||||
"security": [
|
||||
{"id": "NFR-S1", "description": "All data in transit MUST be encrypted via TLS 1.2+"},
|
||||
],
|
||||
"accessibility": [
|
||||
{"id": "NFR-A1", "description": "[UI component] MUST meet WCAG 2.1 AA"},
|
||||
],
|
||||
"scalability": [
|
||||
{"id": "NFR-SC1", "description": "[system] SHOULD handle [N] concurrent [entities]"},
|
||||
],
|
||||
"reliability": [
|
||||
{"id": "NFR-R1", "description": "[service] MUST maintain [N]% uptime"},
|
||||
],
|
||||
},
|
||||
"acceptance_criteria": [
|
||||
{
|
||||
"id": "AC-1",
|
||||
"name": "[descriptive name]",
|
||||
"references": ["FR-1"],
|
||||
"given": "[precondition]",
|
||||
"when": "[action]",
|
||||
"then": "[expected result]",
|
||||
},
|
||||
],
|
||||
"edge_cases": [
|
||||
{"id": "EC-1", "condition": "[input/condition]", "behavior": "[expected behavior]"},
|
||||
],
|
||||
"api_contracts": [
|
||||
{
|
||||
"method": "[METHOD]",
|
||||
"endpoint": "[/api/path]",
|
||||
"request_fields": [{"name": "field", "type": "string", "constraints": "[description]"}],
|
||||
"success_response": {"status": 200, "fields": []},
|
||||
"error_response": {"status": 400, "fields": []},
|
||||
},
|
||||
],
|
||||
"data_models": [
|
||||
{
|
||||
"name": "[Entity]",
|
||||
"fields": [
|
||||
{"name": "id", "type": "UUID", "constraints": "Primary key, auto-generated"},
|
||||
],
|
||||
},
|
||||
],
|
||||
"out_of_scope": [
|
||||
{"id": "OS-1", "description": "[feature/capability]", "reason": "[reason]"},
|
||||
],
|
||||
"open_questions": [],
|
||||
},
|
||||
"metadata": {
|
||||
"generated_by": "spec_generator.py",
|
||||
"feature_name": name,
|
||||
"feature_description": description,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a feature specification template from a name and description.",
|
||||
epilog="Example: python spec_generator.py --name 'User Auth' --description 'OAuth 2.0 login flow'",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
required=True,
|
||||
help="Feature name (used as spec title)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--description",
|
||||
default="",
|
||||
help="Brief feature description (used to seed the context section)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
"-o",
|
||||
default=None,
|
||||
help="Output file path (default: stdout)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
choices=["md", "json"],
|
||||
default="md",
|
||||
help="Output format: md (markdown) or json (default: md)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
dest="json_flag",
|
||||
help="Shorthand for --format json",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
output_format = "json" if args.json_flag else args.format
|
||||
|
||||
if output_format == "json":
|
||||
result = generate_spec_json(args.name, args.description)
|
||||
output = json.dumps(result, indent=2)
|
||||
else:
|
||||
output = generate_spec(args.name, args.description)
|
||||
|
||||
if args.output:
|
||||
out_path = Path(args.output)
|
||||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
out_path.write_text(output, encoding="utf-8")
|
||||
print(f"Spec template written to {out_path}", file=sys.stderr)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
461
engineering/spec-driven-workflow/scripts/spec_validator.py
Normal file
461
engineering/spec-driven-workflow/scripts/spec_validator.py
Normal file
@@ -0,0 +1,461 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Spec Validator - Validates a feature specification for completeness and quality.
|
||||
|
||||
Checks that a spec document contains all required sections, uses RFC 2119 keywords
|
||||
correctly, has acceptance criteria in Given/When/Then format, and scores overall
|
||||
completeness from 0-100.
|
||||
|
||||
Sections checked:
|
||||
- Context, Functional Requirements, Non-Functional Requirements
|
||||
- Acceptance Criteria, Edge Cases, API Contracts, Data Models, Out of Scope
|
||||
|
||||
Exit codes: 0 = pass, 1 = warnings, 2 = critical (or --strict with score < 80)
|
||||
|
||||
No external dependencies - uses only Python standard library.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Tuple
|
||||
|
||||
|
||||
# Section definitions: (key, display_name, required_header_patterns, weight)
|
||||
SECTIONS = [
|
||||
("context", "Context", [r"^##\s+Context"], 10),
|
||||
("functional_requirements", "Functional Requirements", [r"^##\s+Functional\s+Requirements"], 15),
|
||||
("non_functional_requirements", "Non-Functional Requirements", [r"^##\s+Non-Functional\s+Requirements"], 10),
|
||||
("acceptance_criteria", "Acceptance Criteria", [r"^##\s+Acceptance\s+Criteria"], 20),
|
||||
("edge_cases", "Edge Cases", [r"^##\s+Edge\s+Cases"], 10),
|
||||
("api_contracts", "API Contracts", [r"^##\s+API\s+Contracts"], 10),
|
||||
("data_models", "Data Models", [r"^##\s+Data\s+Models"], 10),
|
||||
("out_of_scope", "Out of Scope", [r"^##\s+Out\s+of\s+Scope"], 10),
|
||||
("metadata", "Metadata (Author/Date/Status)", [r"\*\*Author:\*\*", r"\*\*Date:\*\*", r"\*\*Status:\*\*"], 5),
|
||||
]
|
||||
|
||||
RFC_KEYWORDS = ["MUST", "MUST NOT", "SHOULD", "SHOULD NOT", "MAY"]
|
||||
|
||||
# Patterns that indicate placeholder/unfilled content
|
||||
PLACEHOLDER_PATTERNS = [
|
||||
r"\[your\s+name\]",
|
||||
r"\[list\s+reviewers\]",
|
||||
r"\[describe\s+",
|
||||
r"\[input/condition\]",
|
||||
r"\[precondition\]",
|
||||
r"\[action\]",
|
||||
r"\[expected\s+result\]",
|
||||
r"\[feature/capability\]",
|
||||
r"\[operation\]",
|
||||
r"\[threshold\]",
|
||||
r"\[UI\s+component\]",
|
||||
r"\[service\]",
|
||||
r"\[percentage\]",
|
||||
r"\[number\]",
|
||||
r"\[METHOD\]",
|
||||
r"\[endpoint\]",
|
||||
r"\[Name\]",
|
||||
r"\[Entity\s+Name\]",
|
||||
r"\[type\]",
|
||||
r"\[constraints\]",
|
||||
r"\[field\]",
|
||||
r"\[reason\]",
|
||||
]
|
||||
|
||||
|
||||
class SpecValidator:
|
||||
"""Validates a spec document for completeness and quality."""
|
||||
|
||||
def __init__(self, content: str, file_path: str = ""):
|
||||
self.content = content
|
||||
self.file_path = file_path
|
||||
self.lines = content.split("\n")
|
||||
self.findings: List[Dict[str, Any]] = []
|
||||
self.section_scores: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def validate(self) -> Dict[str, Any]:
|
||||
"""Run all validation checks and return results."""
|
||||
self._check_sections_present()
|
||||
self._check_functional_requirements()
|
||||
self._check_acceptance_criteria()
|
||||
self._check_edge_cases()
|
||||
self._check_rfc_keywords()
|
||||
self._check_api_contracts()
|
||||
self._check_data_models()
|
||||
self._check_out_of_scope()
|
||||
self._check_placeholders()
|
||||
self._check_traceability()
|
||||
|
||||
total_score = self._calculate_score()
|
||||
|
||||
return {
|
||||
"file": self.file_path,
|
||||
"score": total_score,
|
||||
"grade": self._score_to_grade(total_score),
|
||||
"sections": self.section_scores,
|
||||
"findings": self.findings,
|
||||
"summary": self._build_summary(total_score),
|
||||
}
|
||||
|
||||
def _add_finding(self, severity: str, section: str, message: str):
|
||||
"""Record a validation finding."""
|
||||
self.findings.append({
|
||||
"severity": severity, # "error", "warning", "info"
|
||||
"section": section,
|
||||
"message": message,
|
||||
})
|
||||
|
||||
def _find_section_content(self, header_pattern: str) -> str:
|
||||
"""Extract content between a section header and the next ## header."""
|
||||
in_section = False
|
||||
section_lines = []
|
||||
for line in self.lines:
|
||||
if re.match(header_pattern, line, re.IGNORECASE):
|
||||
in_section = True
|
||||
continue
|
||||
if in_section and re.match(r"^##\s+", line):
|
||||
break
|
||||
if in_section:
|
||||
section_lines.append(line)
|
||||
return "\n".join(section_lines)
|
||||
|
||||
def _check_sections_present(self):
|
||||
"""Check that all required sections exist."""
|
||||
for key, name, patterns, weight in SECTIONS:
|
||||
found = False
|
||||
for pattern in patterns:
|
||||
for line in self.lines:
|
||||
if re.search(pattern, line, re.IGNORECASE):
|
||||
found = True
|
||||
break
|
||||
if found:
|
||||
break
|
||||
|
||||
if found:
|
||||
self.section_scores[key] = {"name": name, "present": True, "score": weight, "max": weight}
|
||||
else:
|
||||
self.section_scores[key] = {"name": name, "present": False, "score": 0, "max": weight}
|
||||
self._add_finding("error", key, f"Missing section: {name}")
|
||||
|
||||
def _check_functional_requirements(self):
|
||||
"""Validate functional requirements format and content."""
|
||||
content = self._find_section_content(r"^##\s+Functional\s+Requirements")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
fr_pattern = re.compile(r"-\s+FR-(\d+):")
|
||||
matches = fr_pattern.findall(content)
|
||||
|
||||
if not matches:
|
||||
self._add_finding("error", "functional_requirements", "No numbered requirements found (expected FR-N: format)")
|
||||
if "functional_requirements" in self.section_scores:
|
||||
self.section_scores["functional_requirements"]["score"] = max(
|
||||
0, self.section_scores["functional_requirements"]["score"] - 10
|
||||
)
|
||||
return
|
||||
|
||||
fr_count = len(matches)
|
||||
if fr_count < 3:
|
||||
self._add_finding("warning", "functional_requirements", f"Only {fr_count} requirements found. Most features need 3+.")
|
||||
|
||||
# Check for RFC keywords
|
||||
has_keyword = False
|
||||
for kw in RFC_KEYWORDS:
|
||||
if kw in content:
|
||||
has_keyword = True
|
||||
break
|
||||
if not has_keyword:
|
||||
self._add_finding("warning", "functional_requirements", "No RFC 2119 keywords (MUST/SHOULD/MAY) found.")
|
||||
|
||||
def _check_acceptance_criteria(self):
|
||||
"""Validate acceptance criteria use Given/When/Then format."""
|
||||
content = self._find_section_content(r"^##\s+Acceptance\s+Criteria")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
ac_pattern = re.compile(r"###\s+AC-(\d+):")
|
||||
matches = ac_pattern.findall(content)
|
||||
|
||||
if not matches:
|
||||
self._add_finding("error", "acceptance_criteria", "No numbered acceptance criteria found (expected ### AC-N: format)")
|
||||
if "acceptance_criteria" in self.section_scores:
|
||||
self.section_scores["acceptance_criteria"]["score"] = max(
|
||||
0, self.section_scores["acceptance_criteria"]["score"] - 15
|
||||
)
|
||||
return
|
||||
|
||||
ac_count = len(matches)
|
||||
|
||||
# Check Given/When/Then
|
||||
given_count = len(re.findall(r"(?i)\bgiven\b", content))
|
||||
when_count = len(re.findall(r"(?i)\bwhen\b", content))
|
||||
then_count = len(re.findall(r"(?i)\bthen\b", content))
|
||||
|
||||
if given_count < ac_count:
|
||||
self._add_finding("warning", "acceptance_criteria",
|
||||
f"Found {ac_count} criteria but only {given_count} 'Given' clauses. Each AC needs Given/When/Then.")
|
||||
if when_count < ac_count:
|
||||
self._add_finding("warning", "acceptance_criteria",
|
||||
f"Found {ac_count} criteria but only {when_count} 'When' clauses.")
|
||||
if then_count < ac_count:
|
||||
self._add_finding("warning", "acceptance_criteria",
|
||||
f"Found {ac_count} criteria but only {then_count} 'Then' clauses.")
|
||||
|
||||
# Check for FR references
|
||||
fr_refs = re.findall(r"\(FR-\d+", content)
|
||||
if not fr_refs:
|
||||
self._add_finding("warning", "acceptance_criteria",
|
||||
"No acceptance criteria reference functional requirements (expected (FR-N) in title).")
|
||||
|
||||
def _check_edge_cases(self):
|
||||
"""Validate edge cases section."""
|
||||
content = self._find_section_content(r"^##\s+Edge\s+Cases")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
ec_pattern = re.compile(r"-\s+EC-(\d+):")
|
||||
matches = ec_pattern.findall(content)
|
||||
|
||||
if not matches:
|
||||
self._add_finding("warning", "edge_cases", "No numbered edge cases found (expected EC-N: format)")
|
||||
elif len(matches) < 3:
|
||||
self._add_finding("warning", "edge_cases", f"Only {len(matches)} edge cases. Consider failure modes for each external dependency.")
|
||||
|
||||
def _check_rfc_keywords(self):
|
||||
"""Check RFC 2119 keywords are used consistently (capitalized)."""
|
||||
# Look for lowercase must/should/may that might be intended as RFC keywords
|
||||
context_content = self._find_section_content(r"^##\s+Functional\s+Requirements")
|
||||
context_content += self._find_section_content(r"^##\s+Non-Functional\s+Requirements")
|
||||
|
||||
for kw in ["must", "should", "may"]:
|
||||
# Find lowercase usage in requirement-like sentences
|
||||
pattern = rf"(?:system|service|API|endpoint)\s+{kw}\s+"
|
||||
if re.search(pattern, context_content):
|
||||
self._add_finding("warning", "rfc_keywords",
|
||||
f"Found lowercase '{kw}' in requirements. RFC 2119 keywords should be UPPERCASE: {kw.upper()}")
|
||||
|
||||
def _check_api_contracts(self):
|
||||
"""Validate API contracts section."""
|
||||
content = self._find_section_content(r"^##\s+API\s+Contracts")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
# Check for at least one endpoint definition
|
||||
has_endpoint = bool(re.search(r"(GET|POST|PUT|PATCH|DELETE)\s+/", content))
|
||||
if not has_endpoint:
|
||||
self._add_finding("warning", "api_contracts", "No HTTP method + path found (expected e.g., POST /api/endpoint)")
|
||||
|
||||
# Check for request/response definitions
|
||||
has_interface = bool(re.search(r"interface\s+\w+", content))
|
||||
if not has_interface:
|
||||
self._add_finding("info", "api_contracts", "No TypeScript interfaces found. Consider defining request/response shapes.")
|
||||
|
||||
def _check_data_models(self):
|
||||
"""Validate data models section."""
|
||||
content = self._find_section_content(r"^##\s+Data\s+Models")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
# Check for table format
|
||||
has_table = bool(re.search(r"\|.*\|.*\|", content))
|
||||
if not has_table:
|
||||
self._add_finding("warning", "data_models", "No table-formatted data models found. Use | Field | Type | Constraints | format.")
|
||||
|
||||
def _check_out_of_scope(self):
|
||||
"""Validate out of scope section."""
|
||||
content = self._find_section_content(r"^##\s+Out\s+of\s+Scope")
|
||||
if not content.strip():
|
||||
return
|
||||
|
||||
os_pattern = re.compile(r"-\s+OS-(\d+):")
|
||||
matches = os_pattern.findall(content)
|
||||
|
||||
if not matches:
|
||||
self._add_finding("warning", "out_of_scope", "No numbered exclusions found (expected OS-N: format)")
|
||||
elif len(matches) < 2:
|
||||
self._add_finding("info", "out_of_scope", "Only 1 exclusion listed. Consider what was deliberately left out.")
|
||||
|
||||
def _check_placeholders(self):
|
||||
"""Check for unfilled placeholder text."""
|
||||
placeholder_count = 0
|
||||
for pattern in PLACEHOLDER_PATTERNS:
|
||||
matches = re.findall(pattern, self.content, re.IGNORECASE)
|
||||
placeholder_count += len(matches)
|
||||
|
||||
if placeholder_count > 0:
|
||||
self._add_finding("warning", "placeholders",
|
||||
f"Found {placeholder_count} placeholder(s) that need to be filled in (e.g., [your name], [describe ...]).")
|
||||
# Deduct from overall score proportionally
|
||||
for key in self.section_scores:
|
||||
if self.section_scores[key]["present"]:
|
||||
deduction = min(3, self.section_scores[key]["score"])
|
||||
self.section_scores[key]["score"] = max(0, self.section_scores[key]["score"] - deduction)
|
||||
|
||||
def _check_traceability(self):
|
||||
"""Check that acceptance criteria reference functional requirements."""
|
||||
ac_content = self._find_section_content(r"^##\s+Acceptance\s+Criteria")
|
||||
fr_content = self._find_section_content(r"^##\s+Functional\s+Requirements")
|
||||
|
||||
if not ac_content.strip() or not fr_content.strip():
|
||||
return
|
||||
|
||||
# Extract FR IDs
|
||||
fr_ids = set(re.findall(r"FR-(\d+)", fr_content))
|
||||
# Extract FR references from AC
|
||||
ac_fr_refs = set(re.findall(r"FR-(\d+)", ac_content))
|
||||
|
||||
unreferenced = fr_ids - ac_fr_refs
|
||||
if unreferenced:
|
||||
unreferenced_list = ", ".join(f"FR-{i}" for i in sorted(unreferenced))
|
||||
self._add_finding("warning", "traceability",
|
||||
f"Functional requirements without acceptance criteria: {unreferenced_list}")
|
||||
|
||||
def _calculate_score(self) -> int:
|
||||
"""Calculate the total completeness score."""
|
||||
total = sum(s["score"] for s in self.section_scores.values())
|
||||
maximum = sum(s["max"] for s in self.section_scores.values())
|
||||
|
||||
if maximum == 0:
|
||||
return 0
|
||||
|
||||
# Apply finding-based deductions
|
||||
error_count = sum(1 for f in self.findings if f["severity"] == "error")
|
||||
warning_count = sum(1 for f in self.findings if f["severity"] == "warning")
|
||||
|
||||
base_score = round((total / maximum) * 100)
|
||||
deduction = (error_count * 5) + (warning_count * 2)
|
||||
|
||||
return max(0, min(100, base_score - deduction))
|
||||
|
||||
@staticmethod
|
||||
def _score_to_grade(score: int) -> str:
|
||||
"""Convert score to letter grade."""
|
||||
if score >= 90:
|
||||
return "A"
|
||||
if score >= 80:
|
||||
return "B"
|
||||
if score >= 70:
|
||||
return "C"
|
||||
if score >= 60:
|
||||
return "D"
|
||||
return "F"
|
||||
|
||||
def _build_summary(self, score: int) -> str:
|
||||
"""Build human-readable summary."""
|
||||
errors = [f for f in self.findings if f["severity"] == "error"]
|
||||
warnings = [f for f in self.findings if f["severity"] == "warning"]
|
||||
infos = [f for f in self.findings if f["severity"] == "info"]
|
||||
|
||||
lines = [
|
||||
f"Spec Completeness Score: {score}/100 (Grade: {self._score_to_grade(score)})",
|
||||
f"Errors: {len(errors)}, Warnings: {len(warnings)}, Info: {len(infos)}",
|
||||
"",
|
||||
]
|
||||
|
||||
if errors:
|
||||
lines.append("ERRORS (must fix):")
|
||||
for e in errors:
|
||||
lines.append(f" [{e['section']}] {e['message']}")
|
||||
lines.append("")
|
||||
|
||||
if warnings:
|
||||
lines.append("WARNINGS (should fix):")
|
||||
for w in warnings:
|
||||
lines.append(f" [{w['section']}] {w['message']}")
|
||||
lines.append("")
|
||||
|
||||
if infos:
|
||||
lines.append("INFO:")
|
||||
for i in infos:
|
||||
lines.append(f" [{i['section']}] {i['message']}")
|
||||
lines.append("")
|
||||
|
||||
# Section breakdown
|
||||
lines.append("Section Breakdown:")
|
||||
for key, data in self.section_scores.items():
|
||||
status = "PRESENT" if data["present"] else "MISSING"
|
||||
lines.append(f" {data['name']}: {data['score']}/{data['max']} ({status})")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def format_human(result: Dict[str, Any]) -> str:
|
||||
"""Format validation result for human reading."""
|
||||
lines = [
|
||||
"=" * 60,
|
||||
"SPEC VALIDATION REPORT",
|
||||
"=" * 60,
|
||||
"",
|
||||
]
|
||||
if result["file"]:
|
||||
lines.append(f"File: {result['file']}")
|
||||
lines.append("")
|
||||
|
||||
lines.append(result["summary"])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Validate a feature specification for completeness and quality.",
|
||||
epilog="Example: python spec_validator.py --file spec.md --strict",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--file",
|
||||
"-f",
|
||||
required=True,
|
||||
help="Path to the spec markdown file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="Exit with code 2 if score is below 80",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
dest="json_flag",
|
||||
help="Output results as JSON",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
file_path = Path(args.file)
|
||||
if not file_path.exists():
|
||||
print(f"Error: File not found: {file_path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
|
||||
if not content.strip():
|
||||
print(f"Error: File is empty: {file_path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
validator = SpecValidator(content, str(file_path))
|
||||
result = validator.validate()
|
||||
|
||||
if args.json_flag:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(format_human(result))
|
||||
|
||||
# Determine exit code
|
||||
score = result["score"]
|
||||
has_errors = any(f["severity"] == "error" for f in result["findings"])
|
||||
has_warnings = any(f["severity"] == "warning" for f in result["findings"])
|
||||
|
||||
if args.strict and score < 80:
|
||||
sys.exit(2)
|
||||
elif has_errors:
|
||||
sys.exit(2)
|
||||
elif has_warnings:
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
431
engineering/spec-driven-workflow/scripts/test_extractor.py
Normal file
431
engineering/spec-driven-workflow/scripts/test_extractor.py
Normal file
@@ -0,0 +1,431 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Extractor - Extracts test case stubs from a feature specification.
|
||||
|
||||
Parses acceptance criteria (Given/When/Then) and edge cases from a spec
|
||||
document, then generates test stubs for the specified framework.
|
||||
|
||||
Supported frameworks: pytest, jest, go-test
|
||||
|
||||
Exit codes: 0 = success, 1 = warnings (some criteria unparseable), 2 = critical error
|
||||
|
||||
No external dependencies - uses only Python standard library.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
|
||||
|
||||
class SpecParser:
|
||||
"""Parses spec documents to extract testable criteria."""
|
||||
|
||||
def __init__(self, content: str):
|
||||
self.content = content
|
||||
self.lines = content.split("\n")
|
||||
|
||||
def extract_acceptance_criteria(self) -> List[Dict[str, Any]]:
|
||||
"""Extract AC-N blocks with Given/When/Then clauses."""
|
||||
criteria = []
|
||||
ac_pattern = re.compile(r"###\s+AC-(\d+):\s*(.+?)(?:\s*\(([^)]+)\))?\s*$")
|
||||
|
||||
in_ac = False
|
||||
current_ac: Optional[Dict[str, Any]] = None
|
||||
body_lines: List[str] = []
|
||||
|
||||
for line in self.lines:
|
||||
match = ac_pattern.match(line)
|
||||
if match:
|
||||
# Save previous AC
|
||||
if current_ac is not None:
|
||||
current_ac["body"] = "\n".join(body_lines).strip()
|
||||
self._parse_gwt(current_ac)
|
||||
criteria.append(current_ac)
|
||||
|
||||
ac_id = int(match.group(1))
|
||||
name = match.group(2).strip()
|
||||
refs = match.group(3).strip() if match.group(3) else ""
|
||||
|
||||
current_ac = {
|
||||
"id": f"AC-{ac_id}",
|
||||
"name": name,
|
||||
"references": [r.strip() for r in refs.split(",") if r.strip()] if refs else [],
|
||||
"given": "",
|
||||
"when": "",
|
||||
"then": [],
|
||||
"body": "",
|
||||
}
|
||||
body_lines = []
|
||||
in_ac = True
|
||||
elif in_ac:
|
||||
# Check if we hit another ## section
|
||||
if re.match(r"^##\s+", line) and not re.match(r"^###\s+", line):
|
||||
in_ac = False
|
||||
if current_ac is not None:
|
||||
current_ac["body"] = "\n".join(body_lines).strip()
|
||||
self._parse_gwt(current_ac)
|
||||
criteria.append(current_ac)
|
||||
current_ac = None
|
||||
else:
|
||||
body_lines.append(line)
|
||||
|
||||
# Don't forget the last one
|
||||
if current_ac is not None:
|
||||
current_ac["body"] = "\n".join(body_lines).strip()
|
||||
self._parse_gwt(current_ac)
|
||||
criteria.append(current_ac)
|
||||
|
||||
return criteria
|
||||
|
||||
def extract_edge_cases(self) -> List[Dict[str, Any]]:
|
||||
"""Extract EC-N edge case items."""
|
||||
edge_cases = []
|
||||
ec_pattern = re.compile(r"-\s+EC-(\d+):\s*(.+?)(?:\s*->\s*|\s*->\s*|\s*→\s*)(.+)")
|
||||
|
||||
in_section = False
|
||||
for line in self.lines:
|
||||
if re.match(r"^##\s+Edge\s+Cases", line, re.IGNORECASE):
|
||||
in_section = True
|
||||
continue
|
||||
if in_section and re.match(r"^##\s+", line):
|
||||
break
|
||||
if in_section:
|
||||
match = ec_pattern.match(line.strip())
|
||||
if match:
|
||||
edge_cases.append({
|
||||
"id": f"EC-{match.group(1)}",
|
||||
"condition": match.group(2).strip().rstrip("."),
|
||||
"behavior": match.group(3).strip().rstrip("."),
|
||||
})
|
||||
|
||||
return edge_cases
|
||||
|
||||
def extract_spec_title(self) -> str:
|
||||
"""Extract the spec title from the first H1."""
|
||||
for line in self.lines:
|
||||
match = re.match(r"^#\s+(?:Spec:\s*)?(.+)", line)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return "UnknownFeature"
|
||||
|
||||
@staticmethod
|
||||
def _parse_gwt(ac: Dict[str, Any]):
|
||||
"""Parse Given/When/Then from the AC body text."""
|
||||
body = ac["body"]
|
||||
lines = body.split("\n")
|
||||
|
||||
current_section = None
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
lower = stripped.lower()
|
||||
if lower.startswith("given "):
|
||||
current_section = "given"
|
||||
ac["given"] = stripped[6:].strip()
|
||||
elif lower.startswith("when "):
|
||||
current_section = "when"
|
||||
ac["when"] = stripped[5:].strip()
|
||||
elif lower.startswith("then "):
|
||||
current_section = "then"
|
||||
ac["then"].append(stripped[5:].strip())
|
||||
elif lower.startswith("and "):
|
||||
if current_section == "then":
|
||||
ac["then"].append(stripped[4:].strip())
|
||||
elif current_section == "given":
|
||||
ac["given"] += " AND " + stripped[4:].strip()
|
||||
elif current_section == "when":
|
||||
ac["when"] += " AND " + stripped[4:].strip()
|
||||
|
||||
|
||||
def _sanitize_name(name: str) -> str:
|
||||
"""Convert a human-readable name to a valid function/method name."""
|
||||
# Remove parenthetical references like (FR-1)
|
||||
name = re.sub(r"\([^)]*\)", "", name)
|
||||
# Replace non-alphanumeric with underscore
|
||||
name = re.sub(r"[^a-zA-Z0-9]+", "_", name)
|
||||
# Remove leading/trailing underscores
|
||||
name = name.strip("_").lower()
|
||||
return name or "unnamed"
|
||||
|
||||
|
||||
def _to_pascal_case(name: str) -> str:
|
||||
"""Convert to PascalCase for Go test names."""
|
||||
parts = _sanitize_name(name).split("_")
|
||||
return "".join(p.capitalize() for p in parts if p)
|
||||
|
||||
|
||||
class PytestGenerator:
|
||||
"""Generates pytest test stubs."""
|
||||
|
||||
def generate(self, title: str, criteria: List[Dict], edge_cases: List[Dict]) -> str:
|
||||
class_name = "Test" + _to_pascal_case(title)
|
||||
lines = [
|
||||
'"""',
|
||||
f"Test suite for: {title}",
|
||||
f"Auto-generated from spec. {len(criteria)} acceptance criteria, {len(edge_cases)} edge cases.",
|
||||
"",
|
||||
"All tests are stubs — implement the test body to make them pass.",
|
||||
'"""',
|
||||
"",
|
||||
"import pytest",
|
||||
"",
|
||||
"",
|
||||
f"class {class_name}:",
|
||||
f' """Tests for {title}."""',
|
||||
"",
|
||||
]
|
||||
|
||||
for ac in criteria:
|
||||
method_name = f"test_{ac['id'].lower().replace('-', '')}_{_sanitize_name(ac['name'])}"
|
||||
docstring = f'{ac["id"]}: {ac["name"]}'
|
||||
ref_str = f" [{', '.join(ac['references'])}]" if ac["references"] else ""
|
||||
|
||||
lines.append(f" def {method_name}(self):")
|
||||
lines.append(f' """{docstring}{ref_str}"""')
|
||||
|
||||
if ac["given"]:
|
||||
lines.append(f" # Given {ac['given']}")
|
||||
if ac["when"]:
|
||||
lines.append(f" # When {ac['when']}")
|
||||
for t in ac["then"]:
|
||||
lines.append(f" # Then {t}")
|
||||
|
||||
lines.append(' raise NotImplementedError("Implement this test")')
|
||||
lines.append("")
|
||||
|
||||
if edge_cases:
|
||||
lines.append(" # --- Edge Cases ---")
|
||||
lines.append("")
|
||||
|
||||
for ec in edge_cases:
|
||||
method_name = f"test_{ec['id'].lower().replace('-', '')}_{_sanitize_name(ec['condition'])}"
|
||||
lines.append(f" def {method_name}(self):")
|
||||
lines.append(f' """{ec["id"]}: {ec["condition"]} -> {ec["behavior"]}"""')
|
||||
lines.append(f" # Condition: {ec['condition']}")
|
||||
lines.append(f" # Expected: {ec['behavior']}")
|
||||
lines.append(' raise NotImplementedError("Implement this test")')
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class JestGenerator:
|
||||
"""Generates Jest/Vitest test stubs (TypeScript)."""
|
||||
|
||||
def generate(self, title: str, criteria: List[Dict], edge_cases: List[Dict]) -> str:
|
||||
lines = [
|
||||
f"/**",
|
||||
f" * Test suite for: {title}",
|
||||
f" * Auto-generated from spec. {len(criteria)} acceptance criteria, {len(edge_cases)} edge cases.",
|
||||
f" *",
|
||||
f" * All tests are stubs — implement the test body to make them pass.",
|
||||
f" */",
|
||||
"",
|
||||
f'describe("{title}", () => {{',
|
||||
]
|
||||
|
||||
for ac in criteria:
|
||||
ref_str = f" [{', '.join(ac['references'])}]" if ac["references"] else ""
|
||||
test_name = f"{ac['id']}: {ac['name']}{ref_str}"
|
||||
|
||||
lines.append(f' it("{test_name}", () => {{')
|
||||
if ac["given"]:
|
||||
lines.append(f" // Given {ac['given']}")
|
||||
if ac["when"]:
|
||||
lines.append(f" // When {ac['when']}")
|
||||
for t in ac["then"]:
|
||||
lines.append(f" // Then {t}")
|
||||
lines.append("")
|
||||
lines.append(' throw new Error("Not implemented");')
|
||||
lines.append(" });")
|
||||
lines.append("")
|
||||
|
||||
if edge_cases:
|
||||
lines.append(" // --- Edge Cases ---")
|
||||
lines.append("")
|
||||
|
||||
for ec in edge_cases:
|
||||
test_name = f"{ec['id']}: {ec['condition']}"
|
||||
lines.append(f' it("{test_name}", () => {{')
|
||||
lines.append(f" // Condition: {ec['condition']}")
|
||||
lines.append(f" // Expected: {ec['behavior']}")
|
||||
lines.append("")
|
||||
lines.append(' throw new Error("Not implemented");')
|
||||
lines.append(" });")
|
||||
lines.append("")
|
||||
|
||||
lines.append("});")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class GoTestGenerator:
|
||||
"""Generates Go test stubs."""
|
||||
|
||||
def generate(self, title: str, criteria: List[Dict], edge_cases: List[Dict]) -> str:
|
||||
package_name = _sanitize_name(title).split("_")[0] or "feature"
|
||||
|
||||
lines = [
|
||||
f"package {package_name}_test",
|
||||
"",
|
||||
"import (",
|
||||
'\t"testing"',
|
||||
")",
|
||||
"",
|
||||
f"// Test suite for: {title}",
|
||||
f"// Auto-generated from spec. {len(criteria)} acceptance criteria, {len(edge_cases)} edge cases.",
|
||||
f"// All tests are stubs — implement the test body to make them pass.",
|
||||
"",
|
||||
]
|
||||
|
||||
for ac in criteria:
|
||||
func_name = "Test" + _to_pascal_case(ac["id"] + " " + ac["name"])
|
||||
ref_str = f" [{', '.join(ac['references'])}]" if ac["references"] else ""
|
||||
|
||||
lines.append(f"// {ac['id']}: {ac['name']}{ref_str}")
|
||||
lines.append(f"func {func_name}(t *testing.T) {{")
|
||||
|
||||
if ac["given"]:
|
||||
lines.append(f"\t// Given {ac['given']}")
|
||||
if ac["when"]:
|
||||
lines.append(f"\t// When {ac['when']}")
|
||||
for then_clause in ac["then"]:
|
||||
lines.append(f"\t// Then {then_clause}")
|
||||
|
||||
lines.append("")
|
||||
lines.append('\tt.Fatal("Not implemented")')
|
||||
lines.append("}")
|
||||
lines.append("")
|
||||
|
||||
if edge_cases:
|
||||
lines.append("// --- Edge Cases ---")
|
||||
lines.append("")
|
||||
|
||||
for ec in edge_cases:
|
||||
func_name = "Test" + _to_pascal_case(ec["id"] + " " + ec["condition"])
|
||||
lines.append(f"// {ec['id']}: {ec['condition']} -> {ec['behavior']}")
|
||||
lines.append(f"func {func_name}(t *testing.T) {{")
|
||||
lines.append(f"\t// Condition: {ec['condition']}")
|
||||
lines.append(f"\t// Expected: {ec['behavior']}")
|
||||
lines.append("")
|
||||
lines.append('\tt.Fatal("Not implemented")')
|
||||
lines.append("}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
GENERATORS = {
|
||||
"pytest": PytestGenerator,
|
||||
"jest": JestGenerator,
|
||||
"go-test": GoTestGenerator,
|
||||
}
|
||||
|
||||
FILE_EXTENSIONS = {
|
||||
"pytest": ".py",
|
||||
"jest": ".test.ts",
|
||||
"go-test": "_test.go",
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Extract test case stubs from a feature specification.",
|
||||
epilog="Example: python test_extractor.py --file spec.md --framework pytest --output tests/test_feature.py",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--file",
|
||||
"-f",
|
||||
required=True,
|
||||
help="Path to the spec markdown file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--framework",
|
||||
choices=list(GENERATORS.keys()),
|
||||
default="pytest",
|
||||
help="Target test framework (default: pytest)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
"-o",
|
||||
default=None,
|
||||
help="Output file path (default: stdout)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
dest="json_flag",
|
||||
help="Output extracted criteria as JSON instead of test code",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
file_path = Path(args.file)
|
||||
if not file_path.exists():
|
||||
print(f"Error: File not found: {file_path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
if not content.strip():
|
||||
print(f"Error: File is empty: {file_path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
spec_parser = SpecParser(content)
|
||||
title = spec_parser.extract_spec_title()
|
||||
criteria = spec_parser.extract_acceptance_criteria()
|
||||
edge_cases = spec_parser.extract_edge_cases()
|
||||
|
||||
if not criteria and not edge_cases:
|
||||
print("Error: No acceptance criteria or edge cases found in spec.", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
warnings = []
|
||||
for ac in criteria:
|
||||
if not ac["given"] and not ac["when"]:
|
||||
warnings.append(f"{ac['id']}: Could not parse Given/When/Then — check format.")
|
||||
|
||||
if args.json_flag:
|
||||
result = {
|
||||
"spec_title": title,
|
||||
"framework": args.framework,
|
||||
"acceptance_criteria": criteria,
|
||||
"edge_cases": edge_cases,
|
||||
"warnings": warnings,
|
||||
"counts": {
|
||||
"acceptance_criteria": len(criteria),
|
||||
"edge_cases": len(edge_cases),
|
||||
"total_test_cases": len(criteria) + len(edge_cases),
|
||||
},
|
||||
}
|
||||
output = json.dumps(result, indent=2)
|
||||
else:
|
||||
generator_class = GENERATORS[args.framework]
|
||||
generator = generator_class()
|
||||
output = generator.generate(title, criteria, edge_cases)
|
||||
|
||||
if args.output:
|
||||
out_path = Path(args.output)
|
||||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
out_path.write_text(output, encoding="utf-8")
|
||||
total = len(criteria) + len(edge_cases)
|
||||
print(f"Generated {total} test stubs -> {out_path}", file=sys.stderr)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if warnings:
|
||||
for w in warnings:
|
||||
print(f"Warning: {w}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user