Complete rewrite of the senior-qa skill addressing all feedback from Issue #51: SKILL.md (444 lines): - Added proper YAML frontmatter with trigger phrases - Added Table of Contents - Focused on React/Next.js testing (Jest, RTL, Playwright) - 3 actionable workflows with numbered steps - Removed marketing language References (3 files, 2,625+ lines total): - testing_strategies.md: Test pyramid, coverage targets, CI/CD patterns - test_automation_patterns.md: Page Object Model, fixtures, mocking, async testing - qa_best_practices.md: Naming conventions, isolation, debugging strategies Scripts (3 files, 2,261+ lines total): - test_suite_generator.py: Scans React components, generates Jest+RTL tests - coverage_analyzer.py: Parses Istanbul/LCOV, identifies critical gaps - e2e_test_scaffolder.py: Scans Next.js routes, generates Playwright tests Documentation: - Updated engineering-team/README.md senior-qa section - Added README.md in senior-qa subfolder Resolves #51 Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -1,81 +1,799 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Coverage Analyzer
|
||||
Automated tool for senior qa tasks
|
||||
|
||||
Parses Jest/Istanbul coverage reports and identifies gaps, uncovered branches,
|
||||
and provides actionable recommendations for improving test coverage.
|
||||
|
||||
Usage:
|
||||
python coverage_analyzer.py coverage/coverage-final.json --threshold 80
|
||||
python coverage_analyzer.py coverage/ --format html --output report.html
|
||||
python coverage_analyzer.py coverage/ --critical-paths
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileCoverage:
|
||||
"""Coverage data for a single file"""
|
||||
path: str
|
||||
statements: Tuple[int, int] # (covered, total)
|
||||
branches: Tuple[int, int]
|
||||
functions: Tuple[int, int]
|
||||
lines: Tuple[int, int]
|
||||
uncovered_lines: List[int] = field(default_factory=list)
|
||||
uncovered_branches: List[str] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def statement_pct(self) -> float:
|
||||
return (self.statements[0] / self.statements[1] * 100) if self.statements[1] > 0 else 100
|
||||
|
||||
@property
|
||||
def branch_pct(self) -> float:
|
||||
return (self.branches[0] / self.branches[1] * 100) if self.branches[1] > 0 else 100
|
||||
|
||||
@property
|
||||
def function_pct(self) -> float:
|
||||
return (self.functions[0] / self.functions[1] * 100) if self.functions[1] > 0 else 100
|
||||
|
||||
@property
|
||||
def line_pct(self) -> float:
|
||||
return (self.lines[0] / self.lines[1] * 100) if self.lines[1] > 0 else 100
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageGap:
|
||||
"""An identified coverage gap"""
|
||||
file: str
|
||||
gap_type: str # 'statements', 'branches', 'functions', 'lines'
|
||||
lines: List[int]
|
||||
severity: str # 'critical', 'high', 'medium', 'low'
|
||||
description: str
|
||||
recommendation: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoverageSummary:
|
||||
"""Overall coverage summary"""
|
||||
statements: Tuple[int, int]
|
||||
branches: Tuple[int, int]
|
||||
functions: Tuple[int, int]
|
||||
lines: Tuple[int, int]
|
||||
files_analyzed: int
|
||||
files_below_threshold: int = 0
|
||||
|
||||
|
||||
class CoverageParser:
|
||||
"""Parses various coverage report formats"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
|
||||
def parse(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]:
|
||||
"""Parse coverage data from file or directory"""
|
||||
if path.is_file():
|
||||
if path.suffix == '.json':
|
||||
return self._parse_istanbul_json(path)
|
||||
elif path.suffix == '.info' or 'lcov' in path.name:
|
||||
return self._parse_lcov(path)
|
||||
elif path.is_dir():
|
||||
# Look for common coverage files
|
||||
for filename in ['coverage-final.json', 'coverage-summary.json', 'lcov.info']:
|
||||
candidate = path / filename
|
||||
if candidate.exists():
|
||||
return self.parse(candidate)
|
||||
|
||||
# Check for coverage-final.json in coverage directory
|
||||
coverage_json = path / 'coverage-final.json'
|
||||
if coverage_json.exists():
|
||||
return self._parse_istanbul_json(coverage_json)
|
||||
|
||||
raise ValueError(f"Could not find or parse coverage data at: {path}")
|
||||
|
||||
def _parse_istanbul_json(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]:
|
||||
"""Parse Istanbul/Jest JSON coverage format"""
|
||||
with open(path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
files = {}
|
||||
total_statements = [0, 0]
|
||||
total_branches = [0, 0]
|
||||
total_functions = [0, 0]
|
||||
total_lines = [0, 0]
|
||||
|
||||
for file_path, file_data in data.items():
|
||||
# Skip node_modules
|
||||
if 'node_modules' in file_path:
|
||||
continue
|
||||
|
||||
# Parse statement coverage
|
||||
s_map = file_data.get('statementMap', {})
|
||||
s_hits = file_data.get('s', {})
|
||||
covered_statements = sum(1 for h in s_hits.values() if h > 0)
|
||||
total_statements[0] += covered_statements
|
||||
total_statements[1] += len(s_map)
|
||||
|
||||
# Parse branch coverage
|
||||
b_map = file_data.get('branchMap', {})
|
||||
b_hits = file_data.get('b', {})
|
||||
covered_branches = sum(
|
||||
sum(1 for h in hits if h > 0)
|
||||
for hits in b_hits.values()
|
||||
)
|
||||
total_branch_count = sum(len(b['locations']) for b in b_map.values())
|
||||
total_branches[0] += covered_branches
|
||||
total_branches[1] += total_branch_count
|
||||
|
||||
# Parse function coverage
|
||||
fn_map = file_data.get('fnMap', {})
|
||||
fn_hits = file_data.get('f', {})
|
||||
covered_functions = sum(1 for h in fn_hits.values() if h > 0)
|
||||
total_functions[0] += covered_functions
|
||||
total_functions[1] += len(fn_map)
|
||||
|
||||
# Determine uncovered lines
|
||||
uncovered_lines = []
|
||||
for stmt_id, hits in s_hits.items():
|
||||
if hits == 0 and stmt_id in s_map:
|
||||
stmt = s_map[stmt_id]
|
||||
start_line = stmt.get('start', {}).get('line', 0)
|
||||
if start_line not in uncovered_lines:
|
||||
uncovered_lines.append(start_line)
|
||||
|
||||
# Count lines
|
||||
line_coverage = self._calculate_line_coverage(s_map, s_hits)
|
||||
total_lines[0] += line_coverage[0]
|
||||
total_lines[1] += line_coverage[1]
|
||||
|
||||
# Identify uncovered branches
|
||||
uncovered_branches = []
|
||||
for branch_id, hits in b_hits.items():
|
||||
for idx, hit in enumerate(hits):
|
||||
if hit == 0:
|
||||
uncovered_branches.append(f"{branch_id}:{idx}")
|
||||
|
||||
files[file_path] = FileCoverage(
|
||||
path=file_path,
|
||||
statements=(covered_statements, len(s_map)),
|
||||
branches=(covered_branches, total_branch_count),
|
||||
functions=(covered_functions, len(fn_map)),
|
||||
lines=line_coverage,
|
||||
uncovered_lines=sorted(uncovered_lines)[:50], # Limit
|
||||
uncovered_branches=uncovered_branches[:20]
|
||||
)
|
||||
|
||||
summary = CoverageSummary(
|
||||
statements=tuple(total_statements),
|
||||
branches=tuple(total_branches),
|
||||
functions=tuple(total_functions),
|
||||
lines=tuple(total_lines),
|
||||
files_analyzed=len(files)
|
||||
)
|
||||
|
||||
return files, summary
|
||||
|
||||
def _calculate_line_coverage(self, s_map: Dict, s_hits: Dict) -> Tuple[int, int]:
|
||||
"""Calculate line coverage from statement data"""
|
||||
lines = set()
|
||||
covered_lines = set()
|
||||
|
||||
for stmt_id, stmt in s_map.items():
|
||||
start_line = stmt.get('start', {}).get('line', 0)
|
||||
end_line = stmt.get('end', {}).get('line', start_line)
|
||||
for line in range(start_line, end_line + 1):
|
||||
lines.add(line)
|
||||
if s_hits.get(stmt_id, 0) > 0:
|
||||
covered_lines.add(line)
|
||||
|
||||
return (len(covered_lines), len(lines))
|
||||
|
||||
def _parse_lcov(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]:
|
||||
"""Parse LCOV format coverage data"""
|
||||
with open(path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
files = {}
|
||||
current_file = None
|
||||
current_data = {}
|
||||
|
||||
total = {
|
||||
'statements': [0, 0],
|
||||
'branches': [0, 0],
|
||||
'functions': [0, 0],
|
||||
'lines': [0, 0]
|
||||
}
|
||||
|
||||
for line in content.split('\n'):
|
||||
line = line.strip()
|
||||
|
||||
if line.startswith('SF:'):
|
||||
current_file = line[3:]
|
||||
current_data = {
|
||||
'lines_hit': 0, 'lines_total': 0,
|
||||
'functions_hit': 0, 'functions_total': 0,
|
||||
'branches_hit': 0, 'branches_total': 0,
|
||||
'uncovered_lines': []
|
||||
}
|
||||
elif line.startswith('DA:'):
|
||||
parts = line[3:].split(',')
|
||||
if len(parts) >= 2:
|
||||
line_num = int(parts[0])
|
||||
hits = int(parts[1])
|
||||
current_data['lines_total'] += 1
|
||||
if hits > 0:
|
||||
current_data['lines_hit'] += 1
|
||||
else:
|
||||
current_data['uncovered_lines'].append(line_num)
|
||||
elif line.startswith('FN:'):
|
||||
current_data['functions_total'] += 1
|
||||
elif line.startswith('FNDA:'):
|
||||
parts = line[5:].split(',')
|
||||
if len(parts) >= 1 and int(parts[0]) > 0:
|
||||
current_data['functions_hit'] += 1
|
||||
elif line.startswith('BRDA:'):
|
||||
parts = line[5:].split(',')
|
||||
current_data['branches_total'] += 1
|
||||
if len(parts) >= 4 and parts[3] != '-' and int(parts[3]) > 0:
|
||||
current_data['branches_hit'] += 1
|
||||
elif line == 'end_of_record' and current_file:
|
||||
# Skip node_modules
|
||||
if 'node_modules' not in current_file:
|
||||
files[current_file] = FileCoverage(
|
||||
path=current_file,
|
||||
statements=(current_data['lines_hit'], current_data['lines_total']),
|
||||
branches=(current_data['branches_hit'], current_data['branches_total']),
|
||||
functions=(current_data['functions_hit'], current_data['functions_total']),
|
||||
lines=(current_data['lines_hit'], current_data['lines_total']),
|
||||
uncovered_lines=current_data['uncovered_lines'][:50]
|
||||
)
|
||||
|
||||
for key in total:
|
||||
if key == 'statements' or key == 'lines':
|
||||
total[key][0] += current_data['lines_hit']
|
||||
total[key][1] += current_data['lines_total']
|
||||
elif key == 'branches':
|
||||
total[key][0] += current_data['branches_hit']
|
||||
total[key][1] += current_data['branches_total']
|
||||
elif key == 'functions':
|
||||
total[key][0] += current_data['functions_hit']
|
||||
total[key][1] += current_data['functions_total']
|
||||
|
||||
current_file = None
|
||||
|
||||
summary = CoverageSummary(
|
||||
statements=tuple(total['statements']),
|
||||
branches=tuple(total['branches']),
|
||||
functions=tuple(total['functions']),
|
||||
lines=tuple(total['lines']),
|
||||
files_analyzed=len(files)
|
||||
)
|
||||
|
||||
return files, summary
|
||||
|
||||
|
||||
class CoverageAnalyzer:
|
||||
"""Main class for coverage analyzer functionality"""
|
||||
|
||||
def __init__(self, target_path: str, verbose: bool = False):
|
||||
self.target_path = Path(target_path)
|
||||
"""Analyzes coverage data and generates recommendations"""
|
||||
|
||||
CRITICAL_PATTERNS = [
|
||||
r'auth', r'payment', r'security', r'login', r'register',
|
||||
r'checkout', r'order', r'transaction', r'billing'
|
||||
]
|
||||
|
||||
SERVICE_PATTERNS = [
|
||||
r'service', r'api', r'handler', r'controller', r'middleware'
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
threshold: int = 80,
|
||||
critical_paths: bool = False,
|
||||
verbose: bool = False
|
||||
):
|
||||
self.threshold = threshold
|
||||
self.critical_paths = critical_paths
|
||||
self.verbose = verbose
|
||||
self.results = {}
|
||||
|
||||
def run(self) -> Dict:
|
||||
"""Execute the main functionality"""
|
||||
print(f"🚀 Running {self.__class__.__name__}...")
|
||||
print(f"📁 Target: {self.target_path}")
|
||||
|
||||
try:
|
||||
self.validate_target()
|
||||
self.analyze()
|
||||
self.generate_report()
|
||||
|
||||
print("✅ Completed successfully!")
|
||||
return self.results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
|
||||
def analyze(
|
||||
self,
|
||||
files: Dict[str, FileCoverage],
|
||||
summary: CoverageSummary
|
||||
) -> Tuple[List[CoverageGap], Dict[str, Any]]:
|
||||
"""Analyze coverage and return gaps and recommendations"""
|
||||
gaps = []
|
||||
recommendations = {
|
||||
'critical': [],
|
||||
'high': [],
|
||||
'medium': [],
|
||||
'low': []
|
||||
}
|
||||
|
||||
# Analyze each file
|
||||
for file_path, coverage in files.items():
|
||||
file_gaps = self._analyze_file(file_path, coverage)
|
||||
gaps.extend(file_gaps)
|
||||
|
||||
# Sort gaps by severity
|
||||
severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3}
|
||||
gaps.sort(key=lambda g: (severity_order[g.severity], -len(g.lines)))
|
||||
|
||||
# Generate recommendations
|
||||
for gap in gaps:
|
||||
recommendations[gap.severity].append({
|
||||
'file': gap.file,
|
||||
'type': gap.gap_type,
|
||||
'lines': gap.lines[:10], # Limit
|
||||
'description': gap.description,
|
||||
'recommendation': gap.recommendation
|
||||
})
|
||||
|
||||
# Add summary stats
|
||||
stats = {
|
||||
'overall_statement_pct': (summary.statements[0] / summary.statements[1] * 100) if summary.statements[1] > 0 else 100,
|
||||
'overall_branch_pct': (summary.branches[0] / summary.branches[1] * 100) if summary.branches[1] > 0 else 100,
|
||||
'overall_function_pct': (summary.functions[0] / summary.functions[1] * 100) if summary.functions[1] > 0 else 100,
|
||||
'overall_line_pct': (summary.lines[0] / summary.lines[1] * 100) if summary.lines[1] > 0 else 100,
|
||||
'files_analyzed': summary.files_analyzed,
|
||||
'files_below_threshold': sum(
|
||||
1 for f in files.values()
|
||||
if f.line_pct < self.threshold
|
||||
),
|
||||
'total_gaps': len(gaps),
|
||||
'critical_gaps': len(recommendations['critical']),
|
||||
'threshold': self.threshold,
|
||||
'meets_threshold': (summary.lines[0] / summary.lines[1] * 100) >= self.threshold if summary.lines[1] > 0 else True
|
||||
}
|
||||
|
||||
return gaps, {
|
||||
'recommendations': recommendations,
|
||||
'stats': stats
|
||||
}
|
||||
|
||||
def _analyze_file(self, file_path: str, coverage: FileCoverage) -> List[CoverageGap]:
|
||||
"""Analyze a single file for coverage gaps"""
|
||||
gaps = []
|
||||
|
||||
# Determine if file is critical
|
||||
is_critical = any(
|
||||
re.search(pattern, file_path.lower())
|
||||
for pattern in self.CRITICAL_PATTERNS
|
||||
)
|
||||
|
||||
is_service = any(
|
||||
re.search(pattern, file_path.lower())
|
||||
for pattern in self.SERVICE_PATTERNS
|
||||
)
|
||||
|
||||
# Determine severity based on file type and coverage level
|
||||
if is_critical:
|
||||
base_severity = 'critical'
|
||||
target_threshold = 95
|
||||
elif is_service:
|
||||
base_severity = 'high'
|
||||
target_threshold = 85
|
||||
else:
|
||||
base_severity = 'medium'
|
||||
target_threshold = self.threshold
|
||||
|
||||
# Check line coverage
|
||||
if coverage.line_pct < target_threshold:
|
||||
severity = base_severity if coverage.line_pct < 50 else self._lower_severity(base_severity)
|
||||
|
||||
gaps.append(CoverageGap(
|
||||
file=file_path,
|
||||
gap_type='lines',
|
||||
lines=coverage.uncovered_lines[:20],
|
||||
severity=severity,
|
||||
description=f"Line coverage at {coverage.line_pct:.1f}% (target: {target_threshold}%)",
|
||||
recommendation=self._get_line_recommendation(coverage)
|
||||
))
|
||||
|
||||
# Check branch coverage
|
||||
if coverage.branch_pct < target_threshold - 5: # Allow 5% less for branches
|
||||
severity = base_severity if coverage.branch_pct < 40 else self._lower_severity(base_severity)
|
||||
|
||||
gaps.append(CoverageGap(
|
||||
file=file_path,
|
||||
gap_type='branches',
|
||||
lines=[],
|
||||
severity=severity,
|
||||
description=f"Branch coverage at {coverage.branch_pct:.1f}%",
|
||||
recommendation=f"Add tests for conditional logic. {len(coverage.uncovered_branches)} uncovered branches."
|
||||
))
|
||||
|
||||
# Check function coverage
|
||||
if coverage.function_pct < target_threshold:
|
||||
severity = self._lower_severity(base_severity)
|
||||
|
||||
gaps.append(CoverageGap(
|
||||
file=file_path,
|
||||
gap_type='functions',
|
||||
lines=[],
|
||||
severity=severity,
|
||||
description=f"Function coverage at {coverage.function_pct:.1f}%",
|
||||
recommendation="Add tests for uncovered functions/methods."
|
||||
))
|
||||
|
||||
return gaps
|
||||
|
||||
def _lower_severity(self, severity: str) -> str:
|
||||
"""Lower severity by one level"""
|
||||
mapping = {
|
||||
'critical': 'high',
|
||||
'high': 'medium',
|
||||
'medium': 'low',
|
||||
'low': 'low'
|
||||
}
|
||||
return mapping[severity]
|
||||
|
||||
def _get_line_recommendation(self, coverage: FileCoverage) -> str:
|
||||
"""Generate recommendation for line coverage gaps"""
|
||||
if coverage.line_pct < 30:
|
||||
return "This file has very low coverage. Consider adding basic render/unit tests first."
|
||||
elif coverage.line_pct < 60:
|
||||
return "Add tests covering the main functionality and happy paths."
|
||||
else:
|
||||
return "Focus on edge cases and error handling paths."
|
||||
|
||||
|
||||
class ReportGenerator:
|
||||
"""Generates coverage reports in various formats"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
|
||||
def generate_text_report(
|
||||
self,
|
||||
files: Dict[str, FileCoverage],
|
||||
summary: CoverageSummary,
|
||||
analysis: Dict[str, Any],
|
||||
threshold: int
|
||||
) -> str:
|
||||
"""Generate a text report"""
|
||||
lines = []
|
||||
|
||||
# Header
|
||||
lines.append("=" * 60)
|
||||
lines.append("COVERAGE ANALYSIS REPORT")
|
||||
lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
lines.append("=" * 60)
|
||||
lines.append("")
|
||||
|
||||
# Overall summary
|
||||
stats = analysis['stats']
|
||||
lines.append("OVERALL COVERAGE:")
|
||||
lines.append(f" Statements: {stats['overall_statement_pct']:.1f}%")
|
||||
lines.append(f" Branches: {stats['overall_branch_pct']:.1f}%")
|
||||
lines.append(f" Functions: {stats['overall_function_pct']:.1f}%")
|
||||
lines.append(f" Lines: {stats['overall_line_pct']:.1f}%")
|
||||
lines.append("")
|
||||
|
||||
# Threshold check
|
||||
threshold_status = "PASS" if stats['meets_threshold'] else "FAIL"
|
||||
lines.append(f"Threshold ({threshold}%): {threshold_status}")
|
||||
lines.append(f"Files analyzed: {stats['files_analyzed']}")
|
||||
lines.append(f"Files below threshold: {stats['files_below_threshold']}")
|
||||
lines.append("")
|
||||
|
||||
# Critical gaps
|
||||
recs = analysis['recommendations']
|
||||
if recs['critical']:
|
||||
lines.append("-" * 60)
|
||||
lines.append("CRITICAL GAPS (requires immediate attention):")
|
||||
for rec in recs['critical'][:5]:
|
||||
lines.append(f" - {rec['file']}")
|
||||
lines.append(f" {rec['description']}")
|
||||
if rec['lines']:
|
||||
lines.append(f" Uncovered lines: {', '.join(map(str, rec['lines'][:5]))}")
|
||||
lines.append("")
|
||||
|
||||
# High priority gaps
|
||||
if recs['high']:
|
||||
lines.append("-" * 60)
|
||||
lines.append("HIGH PRIORITY GAPS:")
|
||||
for rec in recs['high'][:5]:
|
||||
lines.append(f" - {rec['file']}")
|
||||
lines.append(f" {rec['description']}")
|
||||
lines.append("")
|
||||
|
||||
# Files below threshold
|
||||
below_threshold = [
|
||||
(path, cov) for path, cov in files.items()
|
||||
if cov.line_pct < threshold
|
||||
]
|
||||
below_threshold.sort(key=lambda x: x[1].line_pct)
|
||||
|
||||
if below_threshold:
|
||||
lines.append("-" * 60)
|
||||
lines.append(f"FILES BELOW {threshold}% THRESHOLD:")
|
||||
for path, cov in below_threshold[:10]:
|
||||
short_path = path.split('/')[-1] if '/' in path else path
|
||||
lines.append(f" {cov.line_pct:5.1f}% {short_path}")
|
||||
if len(below_threshold) > 10:
|
||||
lines.append(f" ... and {len(below_threshold) - 10} more files")
|
||||
lines.append("")
|
||||
|
||||
# Recommendations
|
||||
lines.append("-" * 60)
|
||||
lines.append("RECOMMENDATIONS:")
|
||||
all_recs = (
|
||||
recs['critical'][:2] + recs['high'][:2] + recs['medium'][:2]
|
||||
)
|
||||
for i, rec in enumerate(all_recs[:5], 1):
|
||||
lines.append(f" {i}. {rec['recommendation']}")
|
||||
lines.append(f" File: {rec['file']}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("=" * 60)
|
||||
return '\n'.join(lines)
|
||||
|
||||
def generate_html_report(
|
||||
self,
|
||||
files: Dict[str, FileCoverage],
|
||||
summary: CoverageSummary,
|
||||
analysis: Dict[str, Any],
|
||||
threshold: int
|
||||
) -> str:
|
||||
"""Generate an HTML report"""
|
||||
stats = analysis['stats']
|
||||
recs = analysis['recommendations']
|
||||
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Coverage Analysis Report</title>
|
||||
<style>
|
||||
body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; margin: 40px; }}
|
||||
h1 {{ color: #333; }}
|
||||
.summary {{ display: grid; grid-template-columns: repeat(4, 1fr); gap: 20px; margin: 20px 0; }}
|
||||
.stat {{ background: #f5f5f5; padding: 20px; border-radius: 8px; text-align: center; }}
|
||||
.stat-value {{ font-size: 2em; font-weight: bold; }}
|
||||
.pass {{ color: #22c55e; }}
|
||||
.fail {{ color: #ef4444; }}
|
||||
.warn {{ color: #f59e0b; }}
|
||||
table {{ width: 100%; border-collapse: collapse; margin: 20px 0; }}
|
||||
th, td {{ padding: 12px; text-align: left; border-bottom: 1px solid #ddd; }}
|
||||
th {{ background: #f5f5f5; }}
|
||||
.gap-critical {{ background: #fef2f2; }}
|
||||
.gap-high {{ background: #fffbeb; }}
|
||||
.progress {{ background: #e5e7eb; border-radius: 4px; height: 8px; }}
|
||||
.progress-bar {{ height: 100%; border-radius: 4px; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Coverage Analysis Report</h1>
|
||||
<p>Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
|
||||
|
||||
<div class="summary">
|
||||
<div class="stat">
|
||||
<div class="stat-value {'pass' if stats['overall_statement_pct'] >= threshold else 'fail'}">{stats['overall_statement_pct']:.1f}%</div>
|
||||
<div>Statements</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-value {'pass' if stats['overall_branch_pct'] >= threshold - 5 else 'fail'}">{stats['overall_branch_pct']:.1f}%</div>
|
||||
<div>Branches</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-value {'pass' if stats['overall_function_pct'] >= threshold else 'fail'}">{stats['overall_function_pct']:.1f}%</div>
|
||||
<div>Functions</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-value {'pass' if stats['overall_line_pct'] >= threshold else 'fail'}">{stats['overall_line_pct']:.1f}%</div>
|
||||
<div>Lines</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h2>Threshold Status: <span class="{'pass' if stats['meets_threshold'] else 'fail'}">{'PASS' if stats['meets_threshold'] else 'FAIL'}</span></h2>
|
||||
<p>Target: {threshold}% | Files Analyzed: {stats['files_analyzed']} | Below Threshold: {stats['files_below_threshold']}</p>
|
||||
|
||||
<h2>Coverage Gaps</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Severity</th>
|
||||
<th>File</th>
|
||||
<th>Issue</th>
|
||||
<th>Recommendation</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
"""
|
||||
|
||||
# Add gaps to table
|
||||
all_gaps = (
|
||||
[(g, 'critical') for g in recs['critical']] +
|
||||
[(g, 'high') for g in recs['high']] +
|
||||
[(g, 'medium') for g in recs['medium'][:5]]
|
||||
)
|
||||
|
||||
for gap, severity in all_gaps[:15]:
|
||||
row_class = f"gap-{severity}" if severity in ['critical', 'high'] else ""
|
||||
html += f""" <tr class="{row_class}">
|
||||
<td>{severity.upper()}</td>
|
||||
<td>{gap['file'].split('/')[-1]}</td>
|
||||
<td>{gap['description']}</td>
|
||||
<td>{gap['recommendation']}</td>
|
||||
</tr>
|
||||
"""
|
||||
|
||||
html += """ </tbody>
|
||||
</table>
|
||||
|
||||
<h2>File Coverage Details</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>File</th>
|
||||
<th>Statements</th>
|
||||
<th>Branches</th>
|
||||
<th>Functions</th>
|
||||
<th>Lines</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
"""
|
||||
|
||||
# Sort files by line coverage
|
||||
sorted_files = sorted(files.items(), key=lambda x: x[1].line_pct)
|
||||
|
||||
for path, cov in sorted_files[:20]:
|
||||
short_path = path.split('/')[-1] if '/' in path else path
|
||||
html += f""" <tr>
|
||||
<td>{short_path}</td>
|
||||
<td>{cov.statement_pct:.1f}%</td>
|
||||
<td>{cov.branch_pct:.1f}%</td>
|
||||
<td>{cov.function_pct:.1f}%</td>
|
||||
<td>{cov.line_pct:.1f}%</td>
|
||||
</tr>
|
||||
"""
|
||||
|
||||
html += """ </tbody>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return html
|
||||
|
||||
|
||||
class CoverageAnalyzerTool:
|
||||
"""Main tool class"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
coverage_path: str,
|
||||
threshold: int = 80,
|
||||
critical_paths: bool = False,
|
||||
strict: bool = False,
|
||||
output_format: str = 'text',
|
||||
output_path: Optional[str] = None,
|
||||
verbose: bool = False
|
||||
):
|
||||
self.coverage_path = Path(coverage_path)
|
||||
self.threshold = threshold
|
||||
self.critical_paths = critical_paths
|
||||
self.strict = strict
|
||||
self.output_format = output_format
|
||||
self.output_path = output_path
|
||||
self.verbose = verbose
|
||||
|
||||
def run(self) -> Dict[str, Any]:
|
||||
"""Run the coverage analysis"""
|
||||
print(f"Analyzing coverage from: {self.coverage_path}")
|
||||
|
||||
# Parse coverage data
|
||||
parser = CoverageParser(self.verbose)
|
||||
files, summary = parser.parse(self.coverage_path)
|
||||
|
||||
print(f"Found coverage data for {len(files)} files")
|
||||
|
||||
# Analyze coverage
|
||||
analyzer = CoverageAnalyzer(
|
||||
threshold=self.threshold,
|
||||
critical_paths=self.critical_paths,
|
||||
verbose=self.verbose
|
||||
)
|
||||
gaps, analysis = analyzer.analyze(files, summary)
|
||||
|
||||
# Generate report
|
||||
reporter = ReportGenerator(self.verbose)
|
||||
|
||||
if self.output_format == 'html':
|
||||
report = reporter.generate_html_report(files, summary, analysis, self.threshold)
|
||||
else:
|
||||
report = reporter.generate_text_report(files, summary, analysis, self.threshold)
|
||||
|
||||
# Output report
|
||||
if self.output_path:
|
||||
with open(self.output_path, 'w') as f:
|
||||
f.write(report)
|
||||
print(f"Report written to: {self.output_path}")
|
||||
else:
|
||||
print(report)
|
||||
|
||||
# Return results
|
||||
results = {
|
||||
'status': 'pass' if analysis['stats']['meets_threshold'] else 'fail',
|
||||
'threshold': self.threshold,
|
||||
'coverage': {
|
||||
'statements': analysis['stats']['overall_statement_pct'],
|
||||
'branches': analysis['stats']['overall_branch_pct'],
|
||||
'functions': analysis['stats']['overall_function_pct'],
|
||||
'lines': analysis['stats']['overall_line_pct']
|
||||
},
|
||||
'files_analyzed': summary.files_analyzed,
|
||||
'files_below_threshold': analysis['stats']['files_below_threshold'],
|
||||
'total_gaps': analysis['stats']['total_gaps'],
|
||||
'critical_gaps': analysis['stats']['critical_gaps']
|
||||
}
|
||||
|
||||
# Exit with error if strict mode and below threshold
|
||||
if self.strict and not analysis['stats']['meets_threshold']:
|
||||
print(f"\nFailed: Coverage {analysis['stats']['overall_line_pct']:.1f}% below threshold {self.threshold}%")
|
||||
sys.exit(1)
|
||||
|
||||
def validate_target(self):
|
||||
"""Validate the target path exists and is accessible"""
|
||||
if not self.target_path.exists():
|
||||
raise ValueError(f"Target path does not exist: {self.target_path}")
|
||||
|
||||
if self.verbose:
|
||||
print(f"✓ Target validated: {self.target_path}")
|
||||
|
||||
def analyze(self):
|
||||
"""Perform the main analysis or operation"""
|
||||
if self.verbose:
|
||||
print("📊 Analyzing...")
|
||||
|
||||
# Main logic here
|
||||
self.results['status'] = 'success'
|
||||
self.results['target'] = str(self.target_path)
|
||||
self.results['findings'] = []
|
||||
|
||||
# Add analysis results
|
||||
if self.verbose:
|
||||
print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings")
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate and display the report"""
|
||||
print("\n" + "="*50)
|
||||
print("REPORT")
|
||||
print("="*50)
|
||||
print(f"Target: {self.results.get('target')}")
|
||||
print(f"Status: {self.results.get('status')}")
|
||||
print(f"Findings: {len(self.results.get('findings', []))}")
|
||||
print("="*50 + "\n")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Coverage Analyzer"
|
||||
description="Analyze Jest/Istanbul coverage reports and identify gaps",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Basic analysis
|
||||
python coverage_analyzer.py coverage/coverage-final.json
|
||||
|
||||
# With threshold enforcement
|
||||
python coverage_analyzer.py coverage/ --threshold 80 --strict
|
||||
|
||||
# Generate HTML report
|
||||
python coverage_analyzer.py coverage/ --format html --output report.html
|
||||
|
||||
# Focus on critical paths
|
||||
python coverage_analyzer.py coverage/ --critical-paths
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
'target',
|
||||
help='Target path to analyze or process'
|
||||
'coverage',
|
||||
help='Path to coverage file or directory'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--threshold', '-t',
|
||||
type=int,
|
||||
default=80,
|
||||
help='Coverage threshold percentage (default: 80)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--strict',
|
||||
action='store_true',
|
||||
help='Exit with error if coverage is below threshold'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--critical-paths',
|
||||
action='store_true',
|
||||
help='Focus analysis on critical business paths'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--format', '-f',
|
||||
choices=['text', 'html', 'json'],
|
||||
default='text',
|
||||
help='Output format (default: text)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Output file path'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
@@ -85,30 +803,34 @@ def main():
|
||||
parser.add_argument(
|
||||
'--json',
|
||||
action='store_true',
|
||||
help='Output results as JSON'
|
||||
help='Output results as JSON (summary only)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Output file path'
|
||||
)
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
tool = CoverageAnalyzer(
|
||||
args.target,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
results = tool.run()
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(results, indent=2)
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Results written to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
try:
|
||||
tool = CoverageAnalyzerTool(
|
||||
coverage_path=args.coverage,
|
||||
threshold=args.threshold,
|
||||
critical_paths=args.critical_paths,
|
||||
strict=args.strict,
|
||||
output_format=args.format,
|
||||
output_path=args.output,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
results = tool.run()
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
if args.verbose:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,81 +1,788 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
E2E Test Scaffolder
|
||||
Automated tool for senior qa tasks
|
||||
|
||||
Scans Next.js pages/app directory and generates Playwright test files
|
||||
with common interactions, Page Object Model classes, and configuration.
|
||||
|
||||
Usage:
|
||||
python e2e_test_scaffolder.py src/app/ --output e2e/
|
||||
python e2e_test_scaffolder.py pages/ --include-pom --routes "/login,/dashboard"
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Dict, List, Optional, Tuple, Set
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@dataclass
|
||||
class RouteInfo:
|
||||
"""Information about a detected route"""
|
||||
path: str # URL path e.g., /dashboard
|
||||
file_path: str # File system path
|
||||
route_type: str # 'page', 'layout', 'api', 'dynamic'
|
||||
has_params: bool
|
||||
params: List[str]
|
||||
has_form: bool
|
||||
has_auth: bool
|
||||
interactions: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestSpec:
|
||||
"""A Playwright test specification"""
|
||||
route: RouteInfo
|
||||
test_cases: List[str]
|
||||
imports: Set[str] = field(default_factory=set)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PageObject:
|
||||
"""Page Object Model class definition"""
|
||||
name: str
|
||||
route: str
|
||||
locators: List[Tuple[str, str, str]] # (name, selector, description)
|
||||
methods: List[Tuple[str, str]] # (name, code)
|
||||
|
||||
|
||||
class RouteScanner:
|
||||
"""Scans Next.js directories for routes"""
|
||||
|
||||
# Pattern to detect page files
|
||||
PAGE_PATTERNS = {
|
||||
'page.tsx', 'page.ts', 'page.jsx', 'page.js', # App Router
|
||||
'index.tsx', 'index.ts', 'index.jsx', 'index.js' # Pages Router
|
||||
}
|
||||
|
||||
# Patterns indicating specific features
|
||||
FORM_PATTERNS = [
|
||||
r'<form', r'handleSubmit', r'onSubmit', r'useForm',
|
||||
r'<input', r'<textarea', r'<select'
|
||||
]
|
||||
|
||||
AUTH_PATTERNS = [
|
||||
r'auth', r'login', r'signin', r'signup', r'register',
|
||||
r'useAuth', r'useSession', r'getServerSession', r'withAuth'
|
||||
]
|
||||
|
||||
INTERACTION_PATTERNS = {
|
||||
'click': r'onClick|button|Button|<a\s|Link',
|
||||
'type': r'<input|<textarea|onChange',
|
||||
'select': r'<select|Dropdown|Select',
|
||||
'navigation': r'useRouter|router\.push|Link',
|
||||
'modal': r'Modal|Dialog|isOpen|onClose',
|
||||
'toggle': r'toggle|Switch|Checkbox',
|
||||
'upload': r'<input.*type=["\']file|upload|dropzone'
|
||||
}
|
||||
|
||||
def __init__(self, source_path: Path, verbose: bool = False):
|
||||
self.source_path = source_path
|
||||
self.verbose = verbose
|
||||
self.routes: List[RouteInfo] = []
|
||||
self.is_app_router = self._detect_router_type()
|
||||
|
||||
def _detect_router_type(self) -> bool:
|
||||
"""Detect if using App Router or Pages Router"""
|
||||
# App Router: has 'app' directory with page.tsx files
|
||||
# Pages Router: has 'pages' directory with index.tsx files
|
||||
app_dir = self.source_path / 'app'
|
||||
if app_dir.exists() and list(app_dir.rglob('page.*')):
|
||||
return True
|
||||
|
||||
return 'app' in str(self.source_path).lower()
|
||||
|
||||
def scan(self, filter_routes: Optional[List[str]] = None) -> List[RouteInfo]:
|
||||
"""Scan for all routes"""
|
||||
self._scan_directory(self.source_path)
|
||||
|
||||
# Filter if specific routes requested
|
||||
if filter_routes:
|
||||
self.routes = [
|
||||
r for r in self.routes
|
||||
if any(fr in r.path for fr in filter_routes)
|
||||
]
|
||||
|
||||
return self.routes
|
||||
|
||||
def _scan_directory(self, directory: Path, url_path: str = ''):
|
||||
"""Recursively scan directory for routes"""
|
||||
if not directory.exists():
|
||||
return
|
||||
|
||||
for item in directory.iterdir():
|
||||
if item.name.startswith('.') or item.name == 'node_modules':
|
||||
continue
|
||||
|
||||
if item.is_dir():
|
||||
# Handle route groups (parentheses) and dynamic routes
|
||||
dir_name = item.name
|
||||
|
||||
if dir_name.startswith('(') and dir_name.endswith(')'):
|
||||
# Route group - doesn't add to URL path
|
||||
self._scan_directory(item, url_path)
|
||||
elif dir_name.startswith('[') and dir_name.endswith(']'):
|
||||
# Dynamic route
|
||||
param_name = dir_name[1:-1]
|
||||
if param_name.startswith('...'):
|
||||
# Catch-all route
|
||||
new_path = f"{url_path}/[...{param_name[3:]}]"
|
||||
else:
|
||||
new_path = f"{url_path}/[{param_name}]"
|
||||
self._scan_directory(item, new_path)
|
||||
elif dir_name == 'api':
|
||||
# API routes - scan but mark differently
|
||||
self._scan_api_directory(item, '/api')
|
||||
else:
|
||||
new_path = f"{url_path}/{dir_name}"
|
||||
self._scan_directory(item, new_path)
|
||||
|
||||
elif item.is_file():
|
||||
self._process_file(item, url_path)
|
||||
|
||||
def _process_file(self, file_path: Path, url_path: str):
|
||||
"""Process a potential page file"""
|
||||
if file_path.name not in self.PAGE_PATTERNS:
|
||||
return
|
||||
|
||||
# Skip if it's a layout or other special file
|
||||
if any(x in file_path.name for x in ['layout', 'loading', 'error', 'template']):
|
||||
return
|
||||
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8')
|
||||
except Exception:
|
||||
return
|
||||
|
||||
# Determine route path
|
||||
if url_path == '':
|
||||
route_path = '/'
|
||||
else:
|
||||
route_path = url_path
|
||||
|
||||
# Detect dynamic parameters
|
||||
params = re.findall(r'\[([^\]]+)\]', route_path)
|
||||
has_params = len(params) > 0
|
||||
|
||||
# Detect features
|
||||
has_form = any(re.search(p, content) for p in self.FORM_PATTERNS)
|
||||
has_auth = any(re.search(p, content, re.IGNORECASE) for p in self.AUTH_PATTERNS)
|
||||
|
||||
# Detect interactions
|
||||
interactions = []
|
||||
for interaction, pattern in self.INTERACTION_PATTERNS.items():
|
||||
if re.search(pattern, content):
|
||||
interactions.append(interaction)
|
||||
|
||||
route = RouteInfo(
|
||||
path=route_path,
|
||||
file_path=str(file_path),
|
||||
route_type='dynamic' if has_params else 'page',
|
||||
has_params=has_params,
|
||||
params=params,
|
||||
has_form=has_form,
|
||||
has_auth=has_auth,
|
||||
interactions=interactions
|
||||
)
|
||||
|
||||
self.routes.append(route)
|
||||
|
||||
if self.verbose:
|
||||
print(f" Found route: {route_path}")
|
||||
|
||||
def _scan_api_directory(self, directory: Path, url_path: str):
|
||||
"""Scan API routes (mark them differently)"""
|
||||
for item in directory.iterdir():
|
||||
if item.is_dir():
|
||||
new_path = f"{url_path}/{item.name}"
|
||||
self._scan_api_directory(item, new_path)
|
||||
elif item.is_file() and item.suffix in {'.ts', '.tsx', '.js', '.jsx'}:
|
||||
# API routes don't get E2E tests typically
|
||||
pass
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
"""Generates Playwright test files"""
|
||||
|
||||
def __init__(self, include_pom: bool = False, verbose: bool = False):
|
||||
self.include_pom = include_pom
|
||||
self.verbose = verbose
|
||||
|
||||
def generate(self, route: RouteInfo) -> str:
|
||||
"""Generate a test file for a route"""
|
||||
lines = []
|
||||
|
||||
# Imports
|
||||
lines.append("import { test, expect } from '@playwright/test';")
|
||||
|
||||
if self.include_pom:
|
||||
page_class = self._get_page_class_name(route.path)
|
||||
lines.append(f"import {{ {page_class} }} from './pages/{page_class}';")
|
||||
|
||||
lines.append('')
|
||||
|
||||
# Test describe block
|
||||
route_name = route.path if route.path != '/' else 'Home'
|
||||
lines.append(f"test.describe('{route_name}', () => {{")
|
||||
|
||||
# Generate test cases based on route features
|
||||
test_cases = self._generate_test_cases(route)
|
||||
|
||||
for test_case in test_cases:
|
||||
lines.append('')
|
||||
lines.append(test_case)
|
||||
|
||||
lines.append('});')
|
||||
lines.append('')
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _generate_test_cases(self, route: RouteInfo) -> List[str]:
|
||||
"""Generate test cases based on route features"""
|
||||
cases = []
|
||||
url = self._get_test_url(route)
|
||||
|
||||
# Basic navigation test
|
||||
cases.append(f''' test('loads successfully', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
await expect(page).toHaveURL(/{re.escape(route.path.replace('[', '').replace(']', '.*'))}/);
|
||||
// TODO: Add specific content assertions
|
||||
}});''')
|
||||
|
||||
# Page title test
|
||||
cases.append(f''' test('has correct title', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
// TODO: Update expected title
|
||||
await expect(page).toHaveTitle(/.*/);
|
||||
}});''')
|
||||
|
||||
# Auth-related tests
|
||||
if route.has_auth:
|
||||
cases.append(f''' test('redirects unauthenticated users', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
// TODO: Verify redirect to login
|
||||
// await expect(page).toHaveURL('/login');
|
||||
}});
|
||||
|
||||
test('allows authenticated access', async ({{ page }}) => {{
|
||||
// TODO: Set up authentication
|
||||
// await page.context().addCookies([{{ name: 'session', value: '...' }}]);
|
||||
await page.goto('{url}');
|
||||
await expect(page).toHaveURL(/{re.escape(route.path.replace('[', '').replace(']', '.*'))}/);
|
||||
}});''')
|
||||
|
||||
# Form tests
|
||||
if route.has_form:
|
||||
cases.append(f''' test('form submission works', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
|
||||
// TODO: Fill in form fields
|
||||
// await page.getByLabel('Email').fill('test@example.com');
|
||||
// await page.getByLabel('Password').fill('password123');
|
||||
|
||||
// Submit form
|
||||
// await page.getByRole('button', {{ name: 'Submit' }}).click();
|
||||
|
||||
// TODO: Assert success state
|
||||
// await expect(page.getByText('Success')).toBeVisible();
|
||||
}});
|
||||
|
||||
test('shows validation errors', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
|
||||
// Submit without filling required fields
|
||||
await page.getByRole('button', {{ name: /submit/i }}).click();
|
||||
|
||||
// TODO: Assert validation errors shown
|
||||
// await expect(page.getByText('Required')).toBeVisible();
|
||||
}});''')
|
||||
|
||||
# Click interaction tests
|
||||
if 'click' in route.interactions:
|
||||
cases.append(f''' test('button interactions work', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
|
||||
// TODO: Find and click interactive elements
|
||||
// const button = page.getByRole('button', {{ name: '...' }});
|
||||
// await button.click();
|
||||
// await expect(page.getByText('...')).toBeVisible();
|
||||
}});''')
|
||||
|
||||
# Navigation tests
|
||||
if 'navigation' in route.interactions:
|
||||
cases.append(f''' test('navigation works correctly', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
|
||||
// TODO: Click navigation links
|
||||
// await page.getByRole('link', {{ name: '...' }}).click();
|
||||
// await expect(page).toHaveURL('...');
|
||||
}});''')
|
||||
|
||||
# Modal tests
|
||||
if 'modal' in route.interactions:
|
||||
cases.append(f''' test('modal opens and closes', async ({{ page }}) => {{
|
||||
await page.goto('{url}');
|
||||
|
||||
// TODO: Open modal
|
||||
// await page.getByRole('button', {{ name: 'Open' }}).click();
|
||||
// await expect(page.getByRole('dialog')).toBeVisible();
|
||||
|
||||
// TODO: Close modal
|
||||
// await page.getByRole('button', {{ name: 'Close' }}).click();
|
||||
// await expect(page.getByRole('dialog')).not.toBeVisible();
|
||||
}});''')
|
||||
|
||||
# Dynamic route test
|
||||
if route.has_params:
|
||||
cases.append(f''' test('handles dynamic parameters', async ({{ page }}) => {{
|
||||
// TODO: Test with different parameter values
|
||||
await page.goto('{url}');
|
||||
await expect(page.locator('body')).toBeVisible();
|
||||
}});''')
|
||||
|
||||
return cases
|
||||
|
||||
def _get_test_url(self, route: RouteInfo) -> str:
|
||||
"""Get a testable URL for the route"""
|
||||
url = route.path
|
||||
|
||||
# Replace dynamic segments with example values
|
||||
for param in route.params:
|
||||
if param.startswith('...'):
|
||||
url = url.replace(f'[...{param[3:]}]', 'example/path')
|
||||
else:
|
||||
url = url.replace(f'[{param}]', 'test-id')
|
||||
|
||||
return url
|
||||
|
||||
def _get_page_class_name(self, route_path: str) -> str:
|
||||
"""Get Page Object class name from route path"""
|
||||
if route_path == '/':
|
||||
return 'HomePage'
|
||||
|
||||
# Remove leading slash and convert to PascalCase
|
||||
name = route_path.strip('/')
|
||||
name = re.sub(r'\[.*?\]', '', name) # Remove dynamic segments
|
||||
parts = name.split('/')
|
||||
return ''.join(p.title() for p in parts if p) + 'Page'
|
||||
|
||||
|
||||
class PageObjectGenerator:
|
||||
"""Generates Page Object Model classes"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
|
||||
def generate(self, route: RouteInfo) -> str:
|
||||
"""Generate a Page Object class for a route"""
|
||||
class_name = self._get_class_name(route.path)
|
||||
url = route.path
|
||||
|
||||
# Replace dynamic segments
|
||||
for param in route.params:
|
||||
url = url.replace(f'[{param}]', f'${{{param}}}')
|
||||
|
||||
lines = []
|
||||
|
||||
# Imports
|
||||
lines.append("import { Page, Locator, expect } from '@playwright/test';")
|
||||
lines.append('')
|
||||
|
||||
# Class definition
|
||||
lines.append(f"export class {class_name} {{")
|
||||
lines.append(" readonly page: Page;")
|
||||
|
||||
# Common locators
|
||||
locators = self._get_locators(route)
|
||||
for name, selector, _ in locators:
|
||||
lines.append(f" readonly {name}: Locator;")
|
||||
|
||||
lines.append('')
|
||||
|
||||
# Constructor
|
||||
lines.append(" constructor(page: Page) {")
|
||||
lines.append(" this.page = page;")
|
||||
for name, selector, _ in locators:
|
||||
lines.append(f" this.{name} = page.{selector};")
|
||||
lines.append(" }")
|
||||
lines.append('')
|
||||
|
||||
# Navigation method
|
||||
if route.has_params:
|
||||
param_args = ', '.join(f'{p}: string' for p in route.params)
|
||||
url_parts = url.split('/')
|
||||
url_template = '/'.join(
|
||||
f'${{{p}}}' if f'${{{p}}}' in part else part
|
||||
for p, part in zip(route.params, url_parts)
|
||||
)
|
||||
lines.append(f" async goto({param_args}) {{")
|
||||
lines.append(f" await this.page.goto(`{url_template}`);")
|
||||
else:
|
||||
lines.append(" async goto() {")
|
||||
lines.append(f" await this.page.goto('{route.path}');")
|
||||
lines.append(" }")
|
||||
lines.append('')
|
||||
|
||||
# Add methods based on features
|
||||
methods = self._get_methods(route, locators)
|
||||
for method_name, method_code in methods:
|
||||
lines.append(method_code)
|
||||
lines.append('')
|
||||
|
||||
lines.append('}')
|
||||
lines.append('')
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _get_class_name(self, route_path: str) -> str:
|
||||
"""Get class name from route path"""
|
||||
if route_path == '/':
|
||||
return 'HomePage'
|
||||
|
||||
name = route_path.strip('/')
|
||||
name = re.sub(r'\[.*?\]', '', name)
|
||||
parts = name.split('/')
|
||||
return ''.join(p.title() for p in parts if p) + 'Page'
|
||||
|
||||
def _get_locators(self, route: RouteInfo) -> List[Tuple[str, str, str]]:
|
||||
"""Get common locators for a page"""
|
||||
locators = []
|
||||
|
||||
# Always add a heading locator
|
||||
locators.append(('heading', "getByRole('heading', { level: 1 })", 'Main heading'))
|
||||
|
||||
if route.has_form:
|
||||
locators.extend([
|
||||
('submitButton', "getByRole('button', { name: /submit/i })", 'Form submit button'),
|
||||
('form', "locator('form')", 'Main form element'),
|
||||
])
|
||||
|
||||
if route.has_auth:
|
||||
locators.extend([
|
||||
('emailInput', "getByLabel('Email')", 'Email input field'),
|
||||
('passwordInput', "getByLabel('Password')", 'Password input field'),
|
||||
])
|
||||
|
||||
if 'navigation' in route.interactions:
|
||||
locators.append(('navLinks', "getByRole('navigation').getByRole('link')", 'Navigation links'))
|
||||
|
||||
if 'modal' in route.interactions:
|
||||
locators.append(('modal', "getByRole('dialog')", 'Modal dialog'))
|
||||
|
||||
return locators
|
||||
|
||||
def _get_methods(
|
||||
self,
|
||||
route: RouteInfo,
|
||||
locators: List[Tuple[str, str, str]]
|
||||
) -> List[Tuple[str, str]]:
|
||||
"""Get methods for the page object"""
|
||||
methods = []
|
||||
|
||||
# Wait for load method
|
||||
methods.append(('waitForLoad', ''' async waitForLoad() {
|
||||
await expect(this.heading).toBeVisible();
|
||||
}'''))
|
||||
|
||||
if route.has_form:
|
||||
methods.append(('submitForm', ''' async submitForm() {
|
||||
await this.submitButton.click();
|
||||
}'''))
|
||||
|
||||
if route.has_auth:
|
||||
methods.append(('login', ''' async login(email: string, password: string) {
|
||||
await this.emailInput.fill(email);
|
||||
await this.passwordInput.fill(password);
|
||||
await this.submitButton.click();
|
||||
}'''))
|
||||
|
||||
if 'modal' in route.interactions:
|
||||
methods.append(('waitForModal', ''' async waitForModal() {
|
||||
await expect(this.modal).toBeVisible();
|
||||
}'''))
|
||||
methods.append(('closeModal', ''' async closeModal() {
|
||||
await this.page.keyboard.press('Escape');
|
||||
await expect(this.modal).not.toBeVisible();
|
||||
}'''))
|
||||
|
||||
return methods
|
||||
|
||||
|
||||
class ConfigGenerator:
|
||||
"""Generates Playwright configuration"""
|
||||
|
||||
def generate_config(self) -> str:
|
||||
"""Generate playwright.config.ts"""
|
||||
return '''import { defineConfig, devices } from '@playwright/test';
|
||||
|
||||
/**
|
||||
* Playwright Test Configuration
|
||||
* @see https://playwright.dev/docs/test-configuration
|
||||
*/
|
||||
export default defineConfig({
|
||||
testDir: './e2e',
|
||||
fullyParallel: true,
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
reporter: [
|
||||
['html', { open: 'never' }],
|
||||
['list'],
|
||||
],
|
||||
use: {
|
||||
baseURL: process.env.BASE_URL || 'http://localhost:3000',
|
||||
trace: 'on-first-retry',
|
||||
screenshot: 'only-on-failure',
|
||||
},
|
||||
projects: [
|
||||
{
|
||||
name: 'chromium',
|
||||
use: { ...devices['Desktop Chrome'] },
|
||||
},
|
||||
{
|
||||
name: 'firefox',
|
||||
use: { ...devices['Desktop Firefox'] },
|
||||
},
|
||||
{
|
||||
name: 'webkit',
|
||||
use: { ...devices['Desktop Safari'] },
|
||||
},
|
||||
{
|
||||
name: 'Mobile Chrome',
|
||||
use: { ...devices['Pixel 5'] },
|
||||
},
|
||||
],
|
||||
webServer: {
|
||||
command: 'npm run dev',
|
||||
url: 'http://localhost:3000',
|
||||
reuseExistingServer: !process.env.CI,
|
||||
timeout: 120 * 1000,
|
||||
},
|
||||
});
|
||||
'''
|
||||
|
||||
def generate_auth_fixture(self) -> str:
|
||||
"""Generate authentication fixture"""
|
||||
return '''import { test as base, Page } from '@playwright/test';
|
||||
|
||||
interface AuthFixtures {
|
||||
authenticatedPage: Page;
|
||||
}
|
||||
|
||||
export const test = base.extend<AuthFixtures>({
|
||||
authenticatedPage: async ({ page }, use) => {
|
||||
// Option 1: Login via UI
|
||||
// await page.goto('/login');
|
||||
// await page.getByLabel('Email').fill(process.env.TEST_EMAIL || 'test@example.com');
|
||||
// await page.getByLabel('Password').fill(process.env.TEST_PASSWORD || 'password');
|
||||
// await page.getByRole('button', { name: 'Sign in' }).click();
|
||||
// await page.waitForURL('/dashboard');
|
||||
|
||||
// Option 2: Login via API
|
||||
// const response = await page.request.post('/api/auth/login', {
|
||||
// data: {
|
||||
// email: process.env.TEST_EMAIL,
|
||||
// password: process.env.TEST_PASSWORD,
|
||||
// },
|
||||
// });
|
||||
// const { token } = await response.json();
|
||||
// await page.context().addCookies([
|
||||
// { name: 'auth-token', value: token, domain: 'localhost', path: '/' }
|
||||
// ]);
|
||||
|
||||
await use(page);
|
||||
},
|
||||
});
|
||||
|
||||
export { expect } from '@playwright/test';
|
||||
'''
|
||||
|
||||
|
||||
class E2ETestScaffolder:
|
||||
"""Main class for e2e test scaffolder functionality"""
|
||||
|
||||
def __init__(self, target_path: str, verbose: bool = False):
|
||||
self.target_path = Path(target_path)
|
||||
"""Main scaffolder class"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
source_path: str,
|
||||
output_path: Optional[str] = None,
|
||||
include_pom: bool = False,
|
||||
routes: Optional[str] = None,
|
||||
verbose: bool = False
|
||||
):
|
||||
self.source_path = Path(source_path)
|
||||
self.output_path = Path(output_path) if output_path else Path('e2e')
|
||||
self.include_pom = include_pom
|
||||
self.routes_filter = routes.split(',') if routes else None
|
||||
self.verbose = verbose
|
||||
self.results = {}
|
||||
|
||||
self.results = {
|
||||
'status': 'success',
|
||||
'source': str(self.source_path),
|
||||
'routes': [],
|
||||
'generated_files': [],
|
||||
'summary': {}
|
||||
}
|
||||
|
||||
def run(self) -> Dict:
|
||||
"""Execute the main functionality"""
|
||||
print(f"🚀 Running {self.__class__.__name__}...")
|
||||
print(f"📁 Target: {self.target_path}")
|
||||
|
||||
try:
|
||||
self.validate_target()
|
||||
self.analyze()
|
||||
self.generate_report()
|
||||
|
||||
print("✅ Completed successfully!")
|
||||
return self.results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def validate_target(self):
|
||||
"""Validate the target path exists and is accessible"""
|
||||
if not self.target_path.exists():
|
||||
raise ValueError(f"Target path does not exist: {self.target_path}")
|
||||
|
||||
if self.verbose:
|
||||
print(f"✓ Target validated: {self.target_path}")
|
||||
|
||||
def analyze(self):
|
||||
"""Perform the main analysis or operation"""
|
||||
if self.verbose:
|
||||
print("📊 Analyzing...")
|
||||
|
||||
# Main logic here
|
||||
self.results['status'] = 'success'
|
||||
self.results['target'] = str(self.target_path)
|
||||
self.results['findings'] = []
|
||||
|
||||
# Add analysis results
|
||||
if self.verbose:
|
||||
print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings")
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate and display the report"""
|
||||
print("\n" + "="*50)
|
||||
print("REPORT")
|
||||
print("="*50)
|
||||
print(f"Target: {self.results.get('target')}")
|
||||
print(f"Status: {self.results.get('status')}")
|
||||
print(f"Findings: {len(self.results.get('findings', []))}")
|
||||
print("="*50 + "\n")
|
||||
"""Run the scaffolder"""
|
||||
print(f"Scanning: {self.source_path}")
|
||||
|
||||
# Validate source path
|
||||
if not self.source_path.exists():
|
||||
raise ValueError(f"Source path does not exist: {self.source_path}")
|
||||
|
||||
# Scan for routes
|
||||
scanner = RouteScanner(self.source_path, self.verbose)
|
||||
routes = scanner.scan(self.routes_filter)
|
||||
|
||||
print(f"Found {len(routes)} routes")
|
||||
|
||||
# Create output directories
|
||||
self.output_path.mkdir(parents=True, exist_ok=True)
|
||||
if self.include_pom:
|
||||
(self.output_path / 'pages').mkdir(exist_ok=True)
|
||||
|
||||
# Generate test files
|
||||
test_generator = TestGenerator(self.include_pom, self.verbose)
|
||||
pom_generator = PageObjectGenerator(self.verbose) if self.include_pom else None
|
||||
config_generator = ConfigGenerator()
|
||||
|
||||
# Generate tests for each route
|
||||
for route in routes:
|
||||
# Generate test file
|
||||
test_content = test_generator.generate(route)
|
||||
test_filename = self._get_test_filename(route.path)
|
||||
test_path = self.output_path / test_filename
|
||||
|
||||
test_path.write_text(test_content, encoding='utf-8')
|
||||
|
||||
self.results['generated_files'].append({
|
||||
'type': 'test',
|
||||
'route': route.path,
|
||||
'path': str(test_path)
|
||||
})
|
||||
|
||||
print(f" {test_filename}")
|
||||
|
||||
# Generate Page Object if enabled
|
||||
if self.include_pom:
|
||||
pom_content = pom_generator.generate(route)
|
||||
pom_filename = self._get_pom_filename(route.path)
|
||||
pom_path = self.output_path / 'pages' / pom_filename
|
||||
|
||||
pom_path.write_text(pom_content, encoding='utf-8')
|
||||
|
||||
self.results['generated_files'].append({
|
||||
'type': 'page_object',
|
||||
'route': route.path,
|
||||
'path': str(pom_path)
|
||||
})
|
||||
|
||||
print(f" pages/{pom_filename}")
|
||||
|
||||
# Generate config files if not exists
|
||||
config_path = Path('playwright.config.ts')
|
||||
if not config_path.exists():
|
||||
config_content = config_generator.generate_config()
|
||||
config_path.write_text(config_content, encoding='utf-8')
|
||||
self.results['generated_files'].append({
|
||||
'type': 'config',
|
||||
'path': str(config_path)
|
||||
})
|
||||
print(f" playwright.config.ts")
|
||||
|
||||
# Generate auth fixture
|
||||
fixtures_dir = self.output_path / 'fixtures'
|
||||
fixtures_dir.mkdir(exist_ok=True)
|
||||
auth_fixture_path = fixtures_dir / 'auth.ts'
|
||||
if not auth_fixture_path.exists():
|
||||
auth_content = config_generator.generate_auth_fixture()
|
||||
auth_fixture_path.write_text(auth_content, encoding='utf-8')
|
||||
self.results['generated_files'].append({
|
||||
'type': 'fixture',
|
||||
'path': str(auth_fixture_path)
|
||||
})
|
||||
print(f" fixtures/auth.ts")
|
||||
|
||||
# Store route info
|
||||
self.results['routes'] = [asdict(r) for r in routes]
|
||||
|
||||
# Summary
|
||||
self.results['summary'] = {
|
||||
'total_routes': len(routes),
|
||||
'total_files': len(self.results['generated_files']),
|
||||
'output_directory': str(self.output_path),
|
||||
'include_pom': self.include_pom
|
||||
}
|
||||
|
||||
print('')
|
||||
print(f"Summary: {len(routes)} routes, {len(self.results['generated_files'])} files generated")
|
||||
|
||||
return self.results
|
||||
|
||||
def _get_test_filename(self, route_path: str) -> str:
|
||||
"""Get test filename from route path"""
|
||||
if route_path == '/':
|
||||
return 'home.spec.ts'
|
||||
|
||||
name = route_path.strip('/')
|
||||
name = re.sub(r'\[([^\]]+)\]', r'\1', name) # [id] -> id
|
||||
name = name.replace('/', '-')
|
||||
return f"{name}.spec.ts"
|
||||
|
||||
def _get_pom_filename(self, route_path: str) -> str:
|
||||
"""Get Page Object filename from route path"""
|
||||
if route_path == '/':
|
||||
return 'HomePage.ts'
|
||||
|
||||
name = route_path.strip('/')
|
||||
name = re.sub(r'\[.*?\]', '', name)
|
||||
parts = name.split('/')
|
||||
class_name = ''.join(p.title() for p in parts if p) + 'Page'
|
||||
return f"{class_name}.ts"
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="E2E Test Scaffolder"
|
||||
description="Generate Playwright E2E tests from Next.js routes",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Scaffold E2E tests for App Router
|
||||
python e2e_test_scaffolder.py src/app/ --output e2e/
|
||||
|
||||
# Include Page Object Models
|
||||
python e2e_test_scaffolder.py src/app/ --include-pom
|
||||
|
||||
# Generate for specific routes only
|
||||
python e2e_test_scaffolder.py src/app/ --routes "/login,/dashboard,/checkout"
|
||||
|
||||
# Verbose output
|
||||
python e2e_test_scaffolder.py pages/ -v
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
'target',
|
||||
help='Target path to analyze or process'
|
||||
'source',
|
||||
help='Source directory (app/ or pages/)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
default='e2e',
|
||||
help='Output directory for test files (default: e2e/)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--include-pom',
|
||||
action='store_true',
|
||||
help='Generate Page Object Model classes'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--routes',
|
||||
help='Comma-separated list of routes to generate tests for'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
@@ -87,28 +794,27 @@ def main():
|
||||
action='store_true',
|
||||
help='Output results as JSON'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Output file path'
|
||||
)
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
tool = E2ETestScaffolder(
|
||||
args.target,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
results = tool.run()
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(results, indent=2)
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Results written to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
try:
|
||||
scaffolder = E2ETestScaffolder(
|
||||
source_path=args.source,
|
||||
output_path=args.output,
|
||||
include_pom=args.include_pom,
|
||||
routes=args.routes,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
results = scaffolder.run()
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,81 +1,572 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Suite Generator
|
||||
Automated tool for senior qa tasks
|
||||
|
||||
Scans React/TypeScript components and generates Jest + React Testing Library
|
||||
test stubs with proper structure, accessibility tests, and common patterns.
|
||||
|
||||
Usage:
|
||||
python test_suite_generator.py src/components/ --output __tests__/
|
||||
python test_suite_generator.py src/ --include-a11y --scan-only
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Dict, List, Optional, Tuple, Set
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComponentInfo:
|
||||
"""Information about a detected React component"""
|
||||
name: str
|
||||
file_path: str
|
||||
component_type: str # 'functional', 'class', 'forwardRef', 'memo'
|
||||
has_props: bool
|
||||
props: List[str]
|
||||
has_hooks: List[str]
|
||||
has_context: bool
|
||||
has_effects: bool
|
||||
has_state: bool
|
||||
has_callbacks: bool
|
||||
exports: List[str]
|
||||
imports: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestCase:
|
||||
"""A single test case to generate"""
|
||||
name: str
|
||||
description: str
|
||||
test_type: str # 'render', 'interaction', 'a11y', 'props', 'state'
|
||||
code: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestFile:
|
||||
"""A complete test file to generate"""
|
||||
component: ComponentInfo
|
||||
test_cases: List[TestCase] = field(default_factory=list)
|
||||
imports: Set[str] = field(default_factory=set)
|
||||
|
||||
|
||||
class ComponentScanner:
|
||||
"""Scans source files for React components"""
|
||||
|
||||
# Patterns for detecting React components
|
||||
FUNCTIONAL_COMPONENT = re.compile(
|
||||
r'^(?:export\s+)?(?:const|function)\s+([A-Z][a-zA-Z0-9]*)\s*[=:]?\s*(?:\([^)]*\)\s*(?::\s*[^=]+)?\s*=>|function\s*\([^)]*\))',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
ARROW_COMPONENT = re.compile(
|
||||
r'^(?:export\s+)?const\s+([A-Z][a-zA-Z0-9]*)\s*=\s*(?:React\.)?(?:memo|forwardRef)?\s*\(',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
CLASS_COMPONENT = re.compile(
|
||||
r'^(?:export\s+)?class\s+([A-Z][a-zA-Z0-9]*)\s+extends\s+(?:React\.)?(?:Component|PureComponent)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
HOOK_PATTERN = re.compile(r'use([A-Z][a-zA-Z0-9]*)\s*\(')
|
||||
PROPS_PATTERN = re.compile(r'(?:props\.|{\s*([^}]+)\s*}\s*=\s*props|:\s*([A-Z][a-zA-Z0-9]*Props))')
|
||||
CONTEXT_PATTERN = re.compile(r'useContext\s*\(|\.Provider|\.Consumer')
|
||||
EFFECT_PATTERN = re.compile(r'useEffect\s*\(|useLayoutEffect\s*\(')
|
||||
STATE_PATTERN = re.compile(r'useState\s*\(|useReducer\s*\(|this\.state')
|
||||
CALLBACK_PATTERN = re.compile(r'on[A-Z][a-zA-Z]*\s*[=:]|handle[A-Z][a-zA-Z]*\s*[=:]')
|
||||
|
||||
def __init__(self, source_path: Path, verbose: bool = False):
|
||||
self.source_path = source_path
|
||||
self.verbose = verbose
|
||||
self.components: List[ComponentInfo] = []
|
||||
|
||||
def scan(self) -> List[ComponentInfo]:
|
||||
"""Scan the source path for React components"""
|
||||
extensions = {'.tsx', '.jsx', '.ts', '.js'}
|
||||
|
||||
for root, dirs, files in os.walk(self.source_path):
|
||||
# Skip node_modules and test directories
|
||||
dirs[:] = [d for d in dirs if d not in {'node_modules', '__tests__', 'test', 'tests', '.git'}]
|
||||
|
||||
for file in files:
|
||||
if Path(file).suffix in extensions:
|
||||
file_path = Path(root) / file
|
||||
self._scan_file(file_path)
|
||||
|
||||
return self.components
|
||||
|
||||
def _scan_file(self, file_path: Path):
|
||||
"""Scan a single file for components"""
|
||||
try:
|
||||
content = file_path.read_text(encoding='utf-8')
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Warning: Could not read {file_path}: {e}")
|
||||
return
|
||||
|
||||
# Skip test files
|
||||
if '.test.' in file_path.name or '.spec.' in file_path.name:
|
||||
return
|
||||
|
||||
# Skip files without JSX indicators
|
||||
if 'return' not in content or ('<' not in content and 'jsx' not in content.lower()):
|
||||
# Could still be a hook
|
||||
if not self.HOOK_PATTERN.search(content):
|
||||
return
|
||||
|
||||
# Find functional components
|
||||
for match in self.FUNCTIONAL_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
self._add_component(name, file_path, content, 'functional')
|
||||
|
||||
# Find arrow function components
|
||||
for match in self.ARROW_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
component_type = 'functional'
|
||||
if 'memo(' in content:
|
||||
component_type = 'memo'
|
||||
elif 'forwardRef(' in content:
|
||||
component_type = 'forwardRef'
|
||||
self._add_component(name, file_path, content, component_type)
|
||||
|
||||
# Find class components
|
||||
for match in self.CLASS_COMPONENT.finditer(content):
|
||||
name = match.group(1)
|
||||
self._add_component(name, file_path, content, 'class')
|
||||
|
||||
def _add_component(self, name: str, file_path: Path, content: str, component_type: str):
|
||||
"""Add a component to the list if not already present"""
|
||||
# Check if already added
|
||||
for comp in self.components:
|
||||
if comp.name == name and comp.file_path == str(file_path):
|
||||
return
|
||||
|
||||
# Extract hooks used
|
||||
hooks = list(set(self.HOOK_PATTERN.findall(content)))
|
||||
|
||||
# Extract prop names (simplified)
|
||||
props = []
|
||||
props_match = self.PROPS_PATTERN.search(content)
|
||||
if props_match:
|
||||
props_str = props_match.group(1) or ''
|
||||
props = [p.strip().split(':')[0].strip() for p in props_str.split(',') if p.strip()]
|
||||
|
||||
# Extract imports
|
||||
imports = re.findall(r"import\s+(?:{[^}]+}|[^;]+)\s+from\s+['\"]([^'\"]+)['\"]", content)
|
||||
|
||||
# Extract exports
|
||||
exports = re.findall(r"export\s+(?:default\s+)?(?:const|function|class)\s+(\w+)", content)
|
||||
|
||||
component = ComponentInfo(
|
||||
name=name,
|
||||
file_path=str(file_path),
|
||||
component_type=component_type,
|
||||
has_props=bool(props) or 'props' in content.lower(),
|
||||
props=props[:10], # Limit props
|
||||
has_hooks=hooks[:10], # Limit hooks
|
||||
has_context=bool(self.CONTEXT_PATTERN.search(content)),
|
||||
has_effects=bool(self.EFFECT_PATTERN.search(content)),
|
||||
has_state=bool(self.STATE_PATTERN.search(content)),
|
||||
has_callbacks=bool(self.CALLBACK_PATTERN.search(content)),
|
||||
exports=exports[:5],
|
||||
imports=imports[:10]
|
||||
)
|
||||
|
||||
self.components.append(component)
|
||||
|
||||
if self.verbose:
|
||||
print(f" Found: {name} ({component_type}) in {file_path.name}")
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
"""Generates Jest + React Testing Library test files"""
|
||||
|
||||
def __init__(self, include_a11y: bool = False, template: Optional[str] = None):
|
||||
self.include_a11y = include_a11y
|
||||
self.template = template
|
||||
|
||||
def generate(self, component: ComponentInfo) -> TestFile:
|
||||
"""Generate a test file for a component"""
|
||||
test_file = TestFile(component=component)
|
||||
|
||||
# Build imports
|
||||
test_file.imports.add("import { render, screen } from '@testing-library/react';")
|
||||
|
||||
if component.has_callbacks:
|
||||
test_file.imports.add("import userEvent from '@testing-library/user-event';")
|
||||
|
||||
if component.has_effects or component.has_state:
|
||||
test_file.imports.add("import { waitFor } from '@testing-library/react';")
|
||||
|
||||
if self.include_a11y:
|
||||
test_file.imports.add("import { axe, toHaveNoViolations } from 'jest-axe';")
|
||||
|
||||
# Add component import
|
||||
relative_path = self._get_relative_import(component.file_path)
|
||||
test_file.imports.add(f"import {{ {component.name} }} from '{relative_path}';")
|
||||
|
||||
# Generate test cases
|
||||
test_file.test_cases.append(self._generate_render_test(component))
|
||||
|
||||
if component.has_props:
|
||||
test_file.test_cases.append(self._generate_props_test(component))
|
||||
|
||||
if component.has_callbacks:
|
||||
test_file.test_cases.append(self._generate_interaction_test(component))
|
||||
|
||||
if component.has_state:
|
||||
test_file.test_cases.append(self._generate_state_test(component))
|
||||
|
||||
if self.include_a11y:
|
||||
test_file.test_cases.append(self._generate_a11y_test(component))
|
||||
|
||||
return test_file
|
||||
|
||||
def _get_relative_import(self, file_path: str) -> str:
|
||||
"""Get the relative import path for a component"""
|
||||
path = Path(file_path)
|
||||
# Remove extension
|
||||
stem = path.stem
|
||||
if stem == 'index':
|
||||
return f"../{path.parent.name}"
|
||||
return f"../{path.parent.name}/{stem}"
|
||||
|
||||
def _generate_render_test(self, component: ComponentInfo) -> TestCase:
|
||||
"""Generate a basic render test"""
|
||||
props_str = self._get_mock_props(component)
|
||||
|
||||
code = f''' it('renders without crashing', () => {{
|
||||
render(<{component.name}{props_str} />);
|
||||
}});
|
||||
|
||||
it('renders expected content', () => {{
|
||||
render(<{component.name}{props_str} />);
|
||||
// TODO: Add specific content assertions
|
||||
// expect(screen.getByRole('...')).toBeInTheDocument();
|
||||
}});'''
|
||||
|
||||
return TestCase(
|
||||
name='render',
|
||||
description='Basic render tests',
|
||||
test_type='render',
|
||||
code=code
|
||||
)
|
||||
|
||||
def _generate_props_test(self, component: ComponentInfo) -> TestCase:
|
||||
"""Generate props-related tests"""
|
||||
props = component.props[:3] if component.props else ['prop1']
|
||||
|
||||
prop_tests = []
|
||||
for prop in props:
|
||||
prop_tests.append(f''' it('renders with {prop} prop', () => {{
|
||||
render(<{component.name} {prop}="test-value" />);
|
||||
// TODO: Assert that {prop} affects rendering
|
||||
}});''')
|
||||
|
||||
code = '\n\n'.join(prop_tests)
|
||||
|
||||
return TestCase(
|
||||
name='props',
|
||||
description='Props handling tests',
|
||||
test_type='props',
|
||||
code=code
|
||||
)
|
||||
|
||||
def _generate_interaction_test(self, component: ComponentInfo) -> TestCase:
|
||||
"""Generate user interaction tests"""
|
||||
code = f''' it('handles user interaction', async () => {{
|
||||
const user = userEvent.setup();
|
||||
const handleClick = jest.fn();
|
||||
|
||||
render(<{component.name} onClick={{handleClick}} />);
|
||||
|
||||
// TODO: Find the interactive element
|
||||
const button = screen.getByRole('button');
|
||||
await user.click(button);
|
||||
|
||||
expect(handleClick).toHaveBeenCalledTimes(1);
|
||||
}});
|
||||
|
||||
it('handles keyboard navigation', async () => {{
|
||||
const user = userEvent.setup();
|
||||
render(<{component.name} />);
|
||||
|
||||
// TODO: Add keyboard interaction tests
|
||||
// await user.tab();
|
||||
// expect(screen.getByRole('...')).toHaveFocus();
|
||||
}});'''
|
||||
|
||||
return TestCase(
|
||||
name='interaction',
|
||||
description='User interaction tests',
|
||||
test_type='interaction',
|
||||
code=code
|
||||
)
|
||||
|
||||
def _generate_state_test(self, component: ComponentInfo) -> TestCase:
|
||||
"""Generate state-related tests"""
|
||||
code = f''' it('updates state correctly', async () => {{
|
||||
const user = userEvent.setup();
|
||||
render(<{component.name} />);
|
||||
|
||||
// TODO: Trigger state change
|
||||
// await user.click(screen.getByRole('button'));
|
||||
|
||||
// TODO: Assert state change is reflected in UI
|
||||
await waitFor(() => {{
|
||||
// expect(screen.getByText('...')).toBeInTheDocument();
|
||||
}});
|
||||
}});'''
|
||||
|
||||
return TestCase(
|
||||
name='state',
|
||||
description='State management tests',
|
||||
test_type='state',
|
||||
code=code
|
||||
)
|
||||
|
||||
def _generate_a11y_test(self, component: ComponentInfo) -> TestCase:
|
||||
"""Generate accessibility test"""
|
||||
props_str = self._get_mock_props(component)
|
||||
|
||||
code = f''' it('has no accessibility violations', async () => {{
|
||||
const {{ container }} = render(<{component.name}{props_str} />);
|
||||
const results = await axe(container);
|
||||
expect(results).toHaveNoViolations();
|
||||
}});'''
|
||||
|
||||
return TestCase(
|
||||
name='accessibility',
|
||||
description='Accessibility tests',
|
||||
test_type='a11y',
|
||||
code=code
|
||||
)
|
||||
|
||||
def _get_mock_props(self, component: ComponentInfo) -> str:
|
||||
"""Generate mock props string for a component"""
|
||||
if not component.has_props or not component.props:
|
||||
return ''
|
||||
|
||||
# Return empty for simplicity, user should fill in
|
||||
return ' {...mockProps}'
|
||||
|
||||
def format_test_file(self, test_file: TestFile) -> str:
|
||||
"""Format the complete test file content"""
|
||||
lines = []
|
||||
|
||||
# Imports
|
||||
lines.append("import '@testing-library/jest-dom';")
|
||||
for imp in sorted(test_file.imports):
|
||||
lines.append(imp)
|
||||
|
||||
lines.append('')
|
||||
|
||||
# A11y setup if needed
|
||||
if self.include_a11y:
|
||||
lines.append('expect.extend(toHaveNoViolations);')
|
||||
lines.append('')
|
||||
|
||||
# Mock props if component has props
|
||||
if test_file.component.has_props:
|
||||
lines.append('// TODO: Define mock props')
|
||||
lines.append('const mockProps = {};')
|
||||
lines.append('')
|
||||
|
||||
# Describe block
|
||||
lines.append(f"describe('{test_file.component.name}', () => {{")
|
||||
|
||||
# Test cases grouped by type
|
||||
test_types = {}
|
||||
for test_case in test_file.test_cases:
|
||||
if test_case.test_type not in test_types:
|
||||
test_types[test_case.test_type] = []
|
||||
test_types[test_case.test_type].append(test_case)
|
||||
|
||||
for test_type, cases in test_types.items():
|
||||
for case in cases:
|
||||
lines.append('')
|
||||
lines.append(f' // {case.description}')
|
||||
lines.append(case.code)
|
||||
|
||||
lines.append('});')
|
||||
lines.append('')
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
class TestSuiteGenerator:
|
||||
"""Main class for test suite generator functionality"""
|
||||
|
||||
def __init__(self, target_path: str, verbose: bool = False):
|
||||
self.target_path = Path(target_path)
|
||||
"""Main class for generating test suites"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
source_path: str,
|
||||
output_path: Optional[str] = None,
|
||||
include_a11y: bool = False,
|
||||
scan_only: bool = False,
|
||||
verbose: bool = False,
|
||||
template: Optional[str] = None
|
||||
):
|
||||
self.source_path = Path(source_path)
|
||||
self.output_path = Path(output_path) if output_path else None
|
||||
self.include_a11y = include_a11y
|
||||
self.scan_only = scan_only
|
||||
self.verbose = verbose
|
||||
self.results = {}
|
||||
|
||||
self.template = template
|
||||
self.results = {
|
||||
'status': 'success',
|
||||
'source': str(self.source_path),
|
||||
'components': [],
|
||||
'generated_files': [],
|
||||
'summary': {}
|
||||
}
|
||||
|
||||
def run(self) -> Dict:
|
||||
"""Execute the main functionality"""
|
||||
print(f"🚀 Running {self.__class__.__name__}...")
|
||||
print(f"📁 Target: {self.target_path}")
|
||||
|
||||
try:
|
||||
self.validate_target()
|
||||
self.analyze()
|
||||
self.generate_report()
|
||||
|
||||
print("✅ Completed successfully!")
|
||||
"""Execute the test suite generation"""
|
||||
print(f"Scanning: {self.source_path}")
|
||||
|
||||
# Validate source path
|
||||
if not self.source_path.exists():
|
||||
raise ValueError(f"Source path does not exist: {self.source_path}")
|
||||
|
||||
# Scan for components
|
||||
scanner = ComponentScanner(self.source_path, self.verbose)
|
||||
components = scanner.scan()
|
||||
|
||||
print(f"Found {len(components)} React components")
|
||||
|
||||
if self.scan_only:
|
||||
self._report_scan_results(components)
|
||||
return self.results
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def validate_target(self):
|
||||
"""Validate the target path exists and is accessible"""
|
||||
if not self.target_path.exists():
|
||||
raise ValueError(f"Target path does not exist: {self.target_path}")
|
||||
|
||||
if self.verbose:
|
||||
print(f"✓ Target validated: {self.target_path}")
|
||||
|
||||
def analyze(self):
|
||||
"""Perform the main analysis or operation"""
|
||||
if self.verbose:
|
||||
print("📊 Analyzing...")
|
||||
|
||||
# Main logic here
|
||||
self.results['status'] = 'success'
|
||||
self.results['target'] = str(self.target_path)
|
||||
self.results['findings'] = []
|
||||
|
||||
# Add analysis results
|
||||
if self.verbose:
|
||||
print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings")
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate and display the report"""
|
||||
print("\n" + "="*50)
|
||||
print("REPORT")
|
||||
print("="*50)
|
||||
print(f"Target: {self.results.get('target')}")
|
||||
print(f"Status: {self.results.get('status')}")
|
||||
print(f"Findings: {len(self.results.get('findings', []))}")
|
||||
print("="*50 + "\n")
|
||||
|
||||
# Generate tests
|
||||
if not self.output_path:
|
||||
# Default to __tests__ in source directory
|
||||
self.output_path = self.source_path / '__tests__'
|
||||
|
||||
self.output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
generator = TestGenerator(self.include_a11y, self.template)
|
||||
|
||||
total_tests = 0
|
||||
for component in components:
|
||||
test_file = generator.generate(component)
|
||||
content = generator.format_test_file(test_file)
|
||||
|
||||
# Write test file
|
||||
test_filename = f"{component.name}.test.tsx"
|
||||
test_path = self.output_path / test_filename
|
||||
|
||||
test_path.write_text(content, encoding='utf-8')
|
||||
|
||||
test_count = len(test_file.test_cases)
|
||||
total_tests += test_count
|
||||
|
||||
self.results['generated_files'].append({
|
||||
'component': component.name,
|
||||
'path': str(test_path),
|
||||
'test_cases': test_count
|
||||
})
|
||||
|
||||
print(f" {test_filename} ({test_count} test cases)")
|
||||
|
||||
# Store component info
|
||||
self.results['components'] = [asdict(c) for c in components]
|
||||
|
||||
# Summary
|
||||
self.results['summary'] = {
|
||||
'total_components': len(components),
|
||||
'total_files': len(self.results['generated_files']),
|
||||
'total_test_cases': total_tests,
|
||||
'output_directory': str(self.output_path)
|
||||
}
|
||||
|
||||
print('')
|
||||
print(f"Summary: {len(components)} test files, {total_tests} test cases")
|
||||
|
||||
return self.results
|
||||
|
||||
def _report_scan_results(self, components: List[ComponentInfo]):
|
||||
"""Report scan results without generating tests"""
|
||||
print('')
|
||||
print("=" * 60)
|
||||
print("COMPONENT SCAN RESULTS")
|
||||
print("=" * 60)
|
||||
|
||||
# Group by type
|
||||
by_type = {}
|
||||
for comp in components:
|
||||
comp_type = comp.component_type
|
||||
if comp_type not in by_type:
|
||||
by_type[comp_type] = []
|
||||
by_type[comp_type].append(comp)
|
||||
|
||||
for comp_type, comps in sorted(by_type.items()):
|
||||
print(f"\n{comp_type.upper()} COMPONENTS ({len(comps)}):")
|
||||
for comp in comps:
|
||||
hooks_str = f" [hooks: {', '.join(comp.has_hooks[:3])}]" if comp.has_hooks else ""
|
||||
state_str = " [stateful]" if comp.has_state else ""
|
||||
print(f" - {comp.name}{hooks_str}{state_str}")
|
||||
print(f" {comp.file_path}")
|
||||
|
||||
print('')
|
||||
print("=" * 60)
|
||||
print(f"Total: {len(components)} components")
|
||||
print("=" * 60)
|
||||
|
||||
self.results['components'] = [asdict(c) for c in components]
|
||||
self.results['summary'] = {
|
||||
'total_components': len(components),
|
||||
'by_type': {k: len(v) for k, v in by_type.items()}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Test Suite Generator"
|
||||
description="Generate Jest + React Testing Library test stubs for React components",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Scan and generate tests
|
||||
python test_suite_generator.py src/components/ --output __tests__/
|
||||
|
||||
# Scan only (don't generate)
|
||||
python test_suite_generator.py src/components/ --scan-only
|
||||
|
||||
# Include accessibility tests
|
||||
python test_suite_generator.py src/ --include-a11y --output tests/
|
||||
|
||||
# Verbose output
|
||||
python test_suite_generator.py src/components/ -v
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
'target',
|
||||
help='Target path to analyze or process'
|
||||
'source',
|
||||
help='Source directory containing React components'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Output directory for test files (default: <source>/__tests__/)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--include-a11y',
|
||||
action='store_true',
|
||||
help='Include accessibility tests using jest-axe'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--scan-only',
|
||||
action='store_true',
|
||||
help='Scan and report components without generating tests'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--template',
|
||||
help='Custom template file for test generation'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
@@ -87,28 +578,28 @@ def main():
|
||||
action='store_true',
|
||||
help='Output results as JSON'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
help='Output file path'
|
||||
)
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
tool = TestSuiteGenerator(
|
||||
args.target,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
results = tool.run()
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(results, indent=2)
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Results written to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
try:
|
||||
generator = TestSuiteGenerator(
|
||||
args.source,
|
||||
output_path=args.output,
|
||||
include_a11y=args.include_a11y,
|
||||
scan_only=args.scan_only,
|
||||
verbose=args.verbose,
|
||||
template=args.template
|
||||
)
|
||||
|
||||
results = generator.run()
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user