diff --git a/.codex/skills-index.json b/.codex/skills-index.json index 229cfb6..187df5a 100644 --- a/.codex/skills-index.json +++ b/.codex/skills-index.json @@ -99,7 +99,7 @@ "name": "senior-qa", "source": "../../engineering-team/senior-qa", "category": "engineering", - "description": "Comprehensive QA and testing skill for quality assurance, test automation, and testing strategies for ReactJS, NextJS, NodeJS applications. Includes test suite generation, coverage analysis, E2E testing setup, and quality metrics. Use when designing test strategies, writing test cases, implementing test automation, performing manual testing, or analyzing test coverage." + "description": "This skill should be used when the user asks to \"generate tests\", \"write unit tests\", \"analyze test coverage\", \"scaffold E2E tests\", \"set up Playwright\", \"configure Jest\", \"implement testing patterns\", or \"improve test quality\". Use for React/Next.js testing with Jest, React Testing Library, and Playwright." }, { "name": "senior-secops", diff --git a/engineering-team/README.md b/engineering-team/README.md index b0bdb6b..aa7cc0b 100644 --- a/engineering-team/README.md +++ b/engineering-team/README.md @@ -196,31 +196,36 @@ skill-name/ ### 5. Senior QA Testing Engineer (`senior-qa.zip`) -**Purpose:** Quality assurance and test automation +**Purpose:** Quality assurance and test automation for React/Next.js applications + +**Tech Stack Focus:** +- Jest + React Testing Library (unit/integration) +- Playwright (E2E testing) +- Istanbul/NYC (coverage analysis) +- MSW (API mocking) **Key Capabilities:** -- Test suite generation -- Coverage analysis -- E2E test setup (Playwright, Cypress) -- Unit/Integration testing -- Test automation strategies -- Quality metrics tracking +- Component test generation with accessibility checks +- Coverage gap analysis with critical path detection +- E2E test scaffolding with Page Object Model +- Test pyramid implementation (70/20/10 ratio) +- CI/CD integration patterns **Scripts:** -- `test_suite_generator.py` - Generate test suites -- `coverage_analyzer.py` - Analyze test coverage -- `e2e_test_scaffolder.py` - Setup E2E tests +- `test_suite_generator.py` - Scans React components, generates Jest + RTL tests with accessibility assertions +- `coverage_analyzer.py` - Parses Istanbul/LCOV reports, identifies untested critical paths, generates HTML reports +- `e2e_test_scaffolder.py` - Scans Next.js routes, generates Playwright tests with Page Object Model classes **References:** -- `testing_strategies.md` - Testing approaches and pyramid -- `test_automation_patterns.md` - Automation best practices -- `qa_best_practices.md` - QA processes and standards +- `testing_strategies.md` - Test pyramid, coverage targets, CI/CD integration patterns +- `test_automation_patterns.md` - Page Object Model, fixtures, mocking strategies, async testing +- `qa_best_practices.md` - Test naming, isolation, flaky test handling, debugging strategies **Use When:** -- Setting up testing infrastructure -- Writing test cases -- Analyzing test coverage -- Implementing test automation +- Setting up React/Next.js testing infrastructure +- Generating component test suites with RTL +- Analyzing coverage gaps in critical paths +- Scaffolding Playwright E2E tests for Next.js routes --- diff --git a/engineering-team/senior-qa/README.md b/engineering-team/senior-qa/README.md new file mode 100644 index 0000000..7e7b304 --- /dev/null +++ b/engineering-team/senior-qa/README.md @@ -0,0 +1,196 @@ +# Senior QA Testing Engineer Skill + +Production-ready quality assurance and test automation skill for React/Next.js applications. + +## Tech Stack Focus + +| Category | Technologies | +|----------|--------------| +| Unit/Integration | Jest, React Testing Library | +| E2E Testing | Playwright | +| Coverage Analysis | Istanbul, NYC, LCOV | +| API Mocking | MSW (Mock Service Worker) | +| Accessibility | jest-axe, @axe-core/playwright | + +## Quick Start + +```bash +# Generate component tests +python scripts/test_suite_generator.py src/components --include-a11y + +# Analyze coverage gaps +python scripts/coverage_analyzer.py coverage/coverage-final.json --threshold 80 --strict + +# Scaffold E2E tests for Next.js +python scripts/e2e_test_scaffolder.py src/app --page-objects +``` + +## Scripts + +### test_suite_generator.py + +Scans React/TypeScript components and generates Jest + React Testing Library test stubs. + +**Features:** +- Detects functional, class, memo, and forwardRef components +- Generates render, interaction, and accessibility tests +- Identifies props requiring mock data +- Optional `--include-a11y` for jest-axe assertions + +**Usage:** +```bash +python scripts/test_suite_generator.py [options] + +Options: + --scan-only List components without generating tests + --include-a11y Add accessibility test assertions + --output DIR Output directory for test files +``` + +### coverage_analyzer.py + +Parses Istanbul JSON or LCOV coverage reports and identifies testing gaps. + +**Features:** +- Calculates line, branch, function, and statement coverage +- Identifies critical untested paths (auth, payment, API routes) +- Generates text and HTML reports +- Threshold enforcement with `--strict` flag + +**Usage:** +```bash +python scripts/coverage_analyzer.py [options] + +Options: + --threshold N Minimum coverage percentage (default: 80) + --strict Exit with error if below threshold + --format FORMAT Output format: text, json, html + --output FILE Output file path +``` + +### e2e_test_scaffolder.py + +Scans Next.js App Router or Pages Router directories and generates Playwright tests. + +**Features:** +- Detects routes, dynamic parameters, and layouts +- Generates test files per route with navigation and content checks +- Optional Page Object Model class generation +- Generates `playwright.config.ts` and auth fixtures + +**Usage:** +```bash +python scripts/e2e_test_scaffolder.py [options] + +Options: + --page-objects Generate Page Object Model classes + --output DIR Output directory for E2E tests + --base-url URL Base URL for tests (default: http://localhost:3000) +``` + +## References + +### testing_strategies.md (650 lines) + +Comprehensive testing strategy guide covering: +- Test pyramid and distribution (70% unit, 20% integration, 10% E2E) +- Coverage targets by project type +- Testing types (unit, integration, E2E, visual, accessibility) +- CI/CD integration patterns +- Testing decision framework + +### test_automation_patterns.md (1010 lines) + +React/Next.js test automation patterns: +- Page Object Model implementation for Playwright +- Test data factories and builder patterns +- Fixture management (Playwright and Jest) +- Mocking strategies (MSW, Jest module mocking) +- Custom test utilities (`renderWithProviders`) +- Async testing patterns +- Snapshot testing guidelines + +### qa_best_practices.md (965 lines) + +Quality assurance best practices: +- Writing testable React code +- Test naming conventions (Describe-It pattern) +- Arrange-Act-Assert structure +- Test isolation principles +- Handling flaky tests +- Debugging failed tests +- Quality metrics and KPIs + +## Workflows + +### Workflow 1: New Component Testing + +1. Create component in `src/components/` +2. Run `test_suite_generator.py` to generate test stub +3. Fill in test assertions based on component behavior +4. Run `npm test` to verify tests pass +5. Check coverage with `coverage_analyzer.py` + +### Workflow 2: E2E Test Setup + +1. Run `e2e_test_scaffolder.py` on your Next.js app directory +2. Review generated tests in `e2e/` directory +3. Customize Page Objects for complex interactions +4. Run `npx playwright test` to execute +5. Configure CI/CD with generated `playwright.config.ts` + +### Workflow 3: Coverage Gap Analysis + +1. Run tests with coverage: `npm test -- --coverage` +2. Analyze with `coverage_analyzer.py --strict --threshold 80` +3. Review critical untested paths in report +4. Prioritize tests for auth, payment, and API routes +5. Re-run analysis to verify improvement + +## Test Pyramid Targets + +| Test Type | Ratio | Focus | +|-----------|-------|-------| +| Unit | 70% | Individual functions, utilities, hooks | +| Integration | 20% | Component interactions, API calls, state | +| E2E | 10% | Critical user journeys, happy paths | + +## Coverage Targets + +| Project Type | Line | Branch | Function | +|--------------|------|--------|----------| +| Startup/MVP | 60% | 50% | 70% | +| Production | 80% | 70% | 85% | +| Enterprise | 90% | 85% | 95% | + +## CI/CD Integration + +```yaml +# .github/workflows/test.yml +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install dependencies + run: npm ci + - name: Run unit tests + run: npm test -- --coverage + - name: Run E2E tests + run: npx playwright test + - name: Upload coverage + uses: codecov/codecov-action@v4 +``` + +## Related Skills + +- **senior-frontend** - React/Next.js component development +- **senior-fullstack** - Full application architecture +- **senior-devops** - CI/CD pipeline setup +- **code-reviewer** - Code review with testing focus + +--- + +**Version:** 2.0.0 +**Last Updated:** January 2026 +**Tech Focus:** React 18+, Next.js 14+, Jest 29+, Playwright 1.40+ diff --git a/engineering-team/senior-qa/SKILL.md b/engineering-team/senior-qa/SKILL.md index d94d2d4..5776ac8 100644 --- a/engineering-team/senior-qa/SKILL.md +++ b/engineering-team/senior-qa/SKILL.md @@ -1,209 +1,395 @@ --- name: senior-qa -description: Comprehensive QA and testing skill for quality assurance, test automation, and testing strategies for ReactJS, NextJS, NodeJS applications. Includes test suite generation, coverage analysis, E2E testing setup, and quality metrics. Use when designing test strategies, writing test cases, implementing test automation, performing manual testing, or analyzing test coverage. +description: This skill should be used when the user asks to "generate tests", "write unit tests", "analyze test coverage", "scaffold E2E tests", "set up Playwright", "configure Jest", "implement testing patterns", or "improve test quality". Use for React/Next.js testing with Jest, React Testing Library, and Playwright. --- -# Senior Qa +# Senior QA Engineer -Complete toolkit for senior qa with modern tools and best practices. +Test automation, coverage analysis, and quality assurance patterns for React and Next.js applications. + +## Table of Contents + +- [Quick Start](#quick-start) +- [Tools Overview](#tools-overview) + - [Test Suite Generator](#1-test-suite-generator) + - [Coverage Analyzer](#2-coverage-analyzer) + - [E2E Test Scaffolder](#3-e2e-test-scaffolder) +- [QA Workflows](#qa-workflows) + - [Unit Test Generation Workflow](#unit-test-generation-workflow) + - [Coverage Analysis Workflow](#coverage-analysis-workflow) + - [E2E Test Setup Workflow](#e2e-test-setup-workflow) +- [Reference Documentation](#reference-documentation) +- [Common Patterns Quick Reference](#common-patterns-quick-reference) + +--- ## Quick Start -### Main Capabilities - -This skill provides three core capabilities through automated scripts: - ```bash -# Script 1: Test Suite Generator -python scripts/test_suite_generator.py [options] +# Generate Jest test stubs for React components +python scripts/test_suite_generator.py src/components/ --output __tests__/ -# Script 2: Coverage Analyzer -python scripts/coverage_analyzer.py [options] +# Analyze test coverage from Jest/Istanbul reports +python scripts/coverage_analyzer.py coverage/coverage-final.json --threshold 80 -# Script 3: E2E Test Scaffolder -python scripts/e2e_test_scaffolder.py [options] +# Scaffold Playwright E2E tests for Next.js routes +python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ ``` -## Core Capabilities +--- + +## Tools Overview ### 1. Test Suite Generator -Automated tool for test suite generator tasks. +Scans React/TypeScript components and generates Jest + React Testing Library test stubs with proper structure. -**Features:** -- Automated scaffolding -- Best practices built-in -- Configurable templates -- Quality checks +**Input:** Source directory containing React components +**Output:** Test files with describe blocks, render tests, interaction tests **Usage:** ```bash -python scripts/test_suite_generator.py [options] +# Basic usage - scan components and generate tests +python scripts/test_suite_generator.py src/components/ --output __tests__/ + +# Output: +# Scanning: src/components/ +# Found 24 React components +# +# Generated tests: +# __tests__/Button.test.tsx (render, click handler, disabled state) +# __tests__/Modal.test.tsx (render, open/close, keyboard events) +# __tests__/Form.test.tsx (render, validation, submission) +# ... +# +# Summary: 24 test files, 87 test cases + +# Include accessibility tests +python scripts/test_suite_generator.py src/ --output __tests__/ --include-a11y + +# Generate with custom template +python scripts/test_suite_generator.py src/ --template custom-template.tsx ``` +**Supported Patterns:** +- Functional components with hooks +- Components with Context providers +- Components with data fetching +- Form components with validation + +--- + ### 2. Coverage Analyzer -Comprehensive analysis and optimization tool. +Parses Jest/Istanbul coverage reports and identifies gaps, uncovered branches, and provides actionable recommendations. -**Features:** -- Deep analysis -- Performance metrics -- Recommendations -- Automated fixes +**Input:** Coverage report (JSON or LCOV format) +**Output:** Coverage analysis with recommendations **Usage:** ```bash -python scripts/coverage_analyzer.py [--verbose] +# Analyze coverage report +python scripts/coverage_analyzer.py coverage/coverage-final.json + +# Output: +# === Coverage Analysis Report === +# Overall: 72.4% (target: 80%) +# +# BY TYPE: +# Statements: 74.2% +# Branches: 68.1% +# Functions: 71.8% +# Lines: 73.5% +# +# CRITICAL GAPS (uncovered business logic): +# src/services/payment.ts:45-67 - Payment processing +# src/hooks/useAuth.ts:23-41 - Authentication flow +# +# RECOMMENDATIONS: +# 1. Add tests for payment service error handling +# 2. Cover authentication edge cases +# 3. Test form validation branches +# +# Files below threshold (80%): +# src/components/Checkout.tsx: 45% +# src/services/api.ts: 62% + +# Enforce threshold (exit 1 if below) +python scripts/coverage_analyzer.py coverage/ --threshold 80 --strict + +# Generate HTML report +python scripts/coverage_analyzer.py coverage/ --format html --output report.html ``` +--- + ### 3. E2E Test Scaffolder -Advanced tooling for specialized tasks. +Scans Next.js pages/app directory and generates Playwright test files with common interactions. -**Features:** -- Expert-level automation -- Custom configurations -- Integration ready -- Production-grade output +**Input:** Next.js pages or app directory +**Output:** Playwright test files organized by route **Usage:** ```bash -python scripts/e2e_test_scaffolder.py [arguments] [options] +# Scaffold E2E tests for Next.js App Router +python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ + +# Output: +# Scanning: src/app/ +# Found 12 routes +# +# Generated E2E tests: +# e2e/home.spec.ts (navigation, hero section) +# e2e/auth/login.spec.ts (form submission, validation) +# e2e/auth/register.spec.ts (registration flow) +# e2e/dashboard.spec.ts (authenticated routes) +# e2e/products/[id].spec.ts (dynamic routes) +# ... +# +# Generated: playwright.config.ts +# Generated: e2e/fixtures/auth.ts + +# Include Page Object Model classes +python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ --include-pom + +# Generate for specific routes +python scripts/e2e_test_scaffolder.py src/app/ --routes "/login,/dashboard,/checkout" ``` +--- + +## QA Workflows + +### Unit Test Generation Workflow + +Use when setting up tests for new or existing React components. + +**Step 1: Scan project for untested components** +```bash +python scripts/test_suite_generator.py src/components/ --scan-only +``` + +**Step 2: Generate test stubs** +```bash +python scripts/test_suite_generator.py src/components/ --output __tests__/ +``` + +**Step 3: Review and customize generated tests** +```typescript +// __tests__/Button.test.tsx (generated) +import { render, screen, fireEvent } from '@testing-library/react'; +import { Button } from '../src/components/Button'; + +describe('Button', () => { + it('renders with label', () => { + render(); + expect(screen.getByRole('button', { name: /click me/i })).toBeInTheDocument(); + }); + + it('calls onClick when clicked', () => { + const handleClick = jest.fn(); + render(); + fireEvent.click(screen.getByRole('button')); + expect(handleClick).toHaveBeenCalledTimes(1); + }); + + // TODO: Add your specific test cases +}); +``` + +**Step 4: Run tests and check coverage** +```bash +npm test -- --coverage +python scripts/coverage_analyzer.py coverage/coverage-final.json +``` + +--- + +### Coverage Analysis Workflow + +Use when improving test coverage or preparing for release. + +**Step 1: Generate coverage report** +```bash +npm test -- --coverage --coverageReporters=json +``` + +**Step 2: Analyze coverage gaps** +```bash +python scripts/coverage_analyzer.py coverage/coverage-final.json --threshold 80 +``` + +**Step 3: Identify critical paths** +```bash +python scripts/coverage_analyzer.py coverage/ --critical-paths +``` + +**Step 4: Generate missing test stubs** +```bash +python scripts/test_suite_generator.py src/ --uncovered-only --output __tests__/ +``` + +**Step 5: Verify improvement** +```bash +npm test -- --coverage +python scripts/coverage_analyzer.py coverage/ --compare previous-coverage.json +``` + +--- + +### E2E Test Setup Workflow + +Use when setting up Playwright for a Next.js project. + +**Step 1: Initialize Playwright (if not installed)** +```bash +npm init playwright@latest +``` + +**Step 2: Scaffold E2E tests from routes** +```bash +python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ +``` + +**Step 3: Configure authentication fixtures** +```typescript +// e2e/fixtures/auth.ts (generated) +import { test as base } from '@playwright/test'; + +export const test = base.extend({ + authenticatedPage: async ({ page }, use) => { + await page.goto('/login'); + await page.fill('[name="email"]', 'test@example.com'); + await page.fill('[name="password"]', 'password'); + await page.click('button[type="submit"]'); + await page.waitForURL('/dashboard'); + await use(page); + }, +}); +``` + +**Step 4: Run E2E tests** +```bash +npx playwright test +npx playwright show-report +``` + +**Step 5: Add to CI pipeline** +```yaml +# .github/workflows/e2e.yml +- name: Run E2E tests + run: npx playwright test +- name: Upload report + uses: actions/upload-artifact@v3 + with: + name: playwright-report + path: playwright-report/ +``` + +--- + ## Reference Documentation -### Testing Strategies +| File | Contains | Use When | +|------|----------|----------| +| `references/testing_strategies.md` | Test pyramid, testing types, coverage targets, CI/CD integration | Designing test strategy | +| `references/test_automation_patterns.md` | Page Object Model, mocking (MSW), fixtures, async patterns | Writing test code | +| `references/qa_best_practices.md` | Testable code, flaky tests, debugging, quality metrics | Improving test quality | -Comprehensive guide available in `references/testing_strategies.md`: +--- -- Detailed patterns and practices -- Code examples -- Best practices -- Anti-patterns to avoid -- Real-world scenarios +## Common Patterns Quick Reference -### Test Automation Patterns +### React Testing Library Queries -Complete workflow documentation in `references/test_automation_patterns.md`: +```typescript +// Preferred (accessible) +screen.getByRole('button', { name: /submit/i }) +screen.getByLabelText(/email/i) +screen.getByPlaceholderText(/search/i) -- Step-by-step processes -- Optimization strategies -- Tool integrations -- Performance tuning -- Troubleshooting guide - -### Qa Best Practices - -Technical reference guide in `references/qa_best_practices.md`: - -- Technology stack details -- Configuration examples -- Integration patterns -- Security considerations -- Scalability guidelines - -## Tech Stack - -**Languages:** TypeScript, JavaScript, Python, Go, Swift, Kotlin -**Frontend:** React, Next.js, React Native, Flutter -**Backend:** Node.js, Express, GraphQL, REST APIs -**Database:** PostgreSQL, Prisma, NeonDB, Supabase -**DevOps:** Docker, Kubernetes, Terraform, GitHub Actions, CircleCI -**Cloud:** AWS, GCP, Azure - -## Development Workflow - -### 1. Setup and Configuration - -```bash -# Install dependencies -npm install -# or -pip install -r requirements.txt - -# Configure environment -cp .env.example .env +// Fallback +screen.getByTestId('custom-element') ``` -### 2. Run Quality Checks +### Async Testing -```bash -# Use the analyzer script -python scripts/coverage_analyzer.py . +```typescript +// Wait for element +await screen.findByText(/loaded/i); -# Review recommendations -# Apply fixes +// Wait for removal +await waitForElementToBeRemoved(() => screen.queryByText(/loading/i)); + +// Wait for condition +await waitFor(() => { + expect(mockFn).toHaveBeenCalled(); +}); ``` -### 3. Implement Best Practices +### Mocking with MSW -Follow the patterns and practices documented in: -- `references/testing_strategies.md` -- `references/test_automation_patterns.md` -- `references/qa_best_practices.md` +```typescript +import { rest } from 'msw'; +import { setupServer } from 'msw/node'; -## Best Practices Summary +const server = setupServer( + rest.get('/api/users', (req, res, ctx) => { + return res(ctx.json([{ id: 1, name: 'John' }])); + }) +); -### Code Quality -- Follow established patterns -- Write comprehensive tests -- Document decisions -- Review regularly +beforeAll(() => server.listen()); +afterEach(() => server.resetHandlers()); +afterAll(() => server.close()); +``` -### Performance -- Measure before optimizing -- Use appropriate caching -- Optimize critical paths -- Monitor in production +### Playwright Locators -### Security -- Validate all inputs -- Use parameterized queries -- Implement proper authentication -- Keep dependencies updated +```typescript +// Preferred +page.getByRole('button', { name: 'Submit' }) +page.getByLabel('Email') +page.getByText('Welcome') -### Maintainability -- Write clear code -- Use consistent naming -- Add helpful comments -- Keep it simple +// Chaining +page.getByRole('listitem').filter({ hasText: 'Product' }) +``` + +### Coverage Thresholds (jest.config.js) + +```javascript +module.exports = { + coverageThreshold: { + global: { + branches: 80, + functions: 80, + lines: 80, + statements: 80, + }, + }, +}; +``` + +--- ## Common Commands ```bash -# Development -npm run dev -npm run build -npm run test -npm run lint +# Jest +npm test # Run all tests +npm test -- --watch # Watch mode +npm test -- --coverage # With coverage +npm test -- Button.test.tsx # Single file -# Analysis -python scripts/coverage_analyzer.py . -python scripts/e2e_test_scaffolder.py --analyze +# Playwright +npx playwright test # Run all E2E tests +npx playwright test --ui # UI mode +npx playwright test --debug # Debug mode +npx playwright codegen # Generate tests -# Deployment -docker build -t app:latest . -docker-compose up -d -kubectl apply -f k8s/ +# Coverage +npm test -- --coverage --coverageReporters=lcov,json +python scripts/coverage_analyzer.py coverage/coverage-final.json ``` - -## Troubleshooting - -### Common Issues - -Check the comprehensive troubleshooting section in `references/qa_best_practices.md`. - -### Getting Help - -- Review reference documentation -- Check script output messages -- Consult tech stack documentation -- Review error logs - -## Resources - -- Pattern Reference: `references/testing_strategies.md` -- Workflow Guide: `references/test_automation_patterns.md` -- Technical Guide: `references/qa_best_practices.md` -- Tool Scripts: `scripts/` directory diff --git a/engineering-team/senior-qa/references/qa_best_practices.md b/engineering-team/senior-qa/references/qa_best_practices.md index a014e93..23f29f5 100644 --- a/engineering-team/senior-qa/references/qa_best_practices.md +++ b/engineering-team/senior-qa/references/qa_best_practices.md @@ -1,103 +1,964 @@ -# Qa Best Practices +# QA Best Practices for React and Next.js -## Overview +Guidelines for writing maintainable tests, debugging failures, and measuring test quality. -This reference guide provides comprehensive information for senior qa. +--- -## Patterns and Practices +## Table of Contents -### Pattern 1: Best Practice Implementation +- [Writing Testable Code](#writing-testable-code) +- [Test Naming Conventions](#test-naming-conventions) +- [Arrange-Act-Assert Pattern](#arrange-act-assert-pattern) +- [Test Isolation Principles](#test-isolation-principles) +- [Handling Flaky Tests](#handling-flaky-tests) +- [Code Review for Testability](#code-review-for-testability) +- [Test Maintenance Strategies](#test-maintenance-strategies) +- [Debugging Failed Tests](#debugging-failed-tests) +- [Quality Metrics and KPIs](#quality-metrics-and-kpis) -**Description:** -Detailed explanation of the pattern. +--- -**When to Use:** -- Scenario 1 -- Scenario 2 -- Scenario 3 +## Writing Testable Code + +Testable code is easy to understand, has clear boundaries, and minimizes dependencies. + +### Dependency Injection + +Instead of creating dependencies inside functions, pass them as parameters. + +**Hard to Test:** -**Implementation:** ```typescript -// Example code implementation -export class Example { - // Implementation details +// src/services/userService.ts +import { prisma } from '../lib/prisma'; +import { sendEmail } from '../lib/email'; + +export async function createUser(data: UserInput) { + const user = await prisma.user.create({ data }); + await sendEmail(user.email, 'Welcome!'); + return user; } ``` -**Benefits:** -- Benefit 1 -- Benefit 2 -- Benefit 3 +**Easy to Test:** -**Trade-offs:** -- Consider 1 -- Consider 2 -- Consider 3 - -### Pattern 2: Advanced Technique - -**Description:** -Another important pattern for senior qa. - -**Implementation:** ```typescript -// Advanced example -async function advancedExample() { - // Code here +// src/services/userService.ts +export function createUserService( + db: PrismaClient, + emailService: EmailService +) { + return { + async createUser(data: UserInput) { + const user = await db.user.create({ data }); + await emailService.send(user.email, 'Welcome!'); + return user; + }, + }; +} + +// Usage in app +const userService = createUserService(prisma, emailService); + +// Usage in tests +const mockDb = { user: { create: jest.fn() } }; +const mockEmail = { send: jest.fn() }; +const testService = createUserService(mockDb, mockEmail); +``` + +### Pure Functions + +Pure functions are deterministic and have no side effects, making them trivial to test. + +**Impure (Hard to Test):** + +```typescript +function formatTimestamp() { + const now = new Date(); + return `${now.getFullYear()}-${now.getMonth() + 1}-${now.getDate()}`; } ``` -## Guidelines +**Pure (Easy to Test):** -### Code Organization -- Clear structure -- Logical separation -- Consistent naming -- Proper documentation +```typescript +function formatTimestamp(date: Date): string { + return `${date.getFullYear()}-${date.getMonth() + 1}-${date.getDate()}`; +} -### Performance Considerations -- Optimization strategies -- Bottleneck identification -- Monitoring approaches -- Scaling techniques +// Test +expect(formatTimestamp(new Date('2024-03-15'))).toBe('2024-3-15'); +``` -### Security Best Practices -- Input validation -- Authentication -- Authorization -- Data protection +### Separation of Concerns -## Common Patterns +Separate business logic from UI and I/O operations. -### Pattern A -Implementation details and examples. +**Mixed Concerns (Hard to Test):** -### Pattern B -Implementation details and examples. +```typescript +// Component with embedded business logic +function CheckoutForm() { + const [total, setTotal] = useState(0); -### Pattern C -Implementation details and examples. + const handleSubmit = async (items: CartItem[]) => { + // Business logic mixed with UI + let sum = 0; + for (const item of items) { + sum += item.price * item.quantity; + if (item.category === 'electronics') { + sum *= 0.9; // 10% discount + } + } + const tax = sum * 0.08; + const finalTotal = sum + tax; -## Anti-Patterns to Avoid + // API call + await fetch('/api/orders', { + method: 'POST', + body: JSON.stringify({ items, total: finalTotal }), + }); -### Anti-Pattern 1 -What not to do and why. + setTotal(finalTotal); + }; -### Anti-Pattern 2 -What not to do and why. + return
...
; +} +``` -## Tools and Resources +**Separated Concerns (Easy to Test):** -### Recommended Tools -- Tool 1: Purpose -- Tool 2: Purpose -- Tool 3: Purpose +```typescript +// Pure business logic (easy to unit test) +export function calculateOrderTotal(items: CartItem[]): number { + return items.reduce((sum, item) => { + const subtotal = item.price * item.quantity; + const discount = item.category === 'electronics' ? 0.9 : 1; + return sum + subtotal * discount; + }, 0); +} -### Further Reading -- Resource 1 -- Resource 2 -- Resource 3 +export function calculateTax(subtotal: number, rate = 0.08): number { + return subtotal * rate; +} -## Conclusion +// Custom hook for order logic (testable with renderHook) +export function useCheckout() { + const [total, setTotal] = useState(0); + const mutation = useMutation(createOrder); -Key takeaways for using this reference guide effectively. + const checkout = async (items: CartItem[]) => { + const subtotal = calculateOrderTotal(items); + const tax = calculateTax(subtotal); + const finalTotal = subtotal + tax; + + await mutation.mutateAsync({ items, total: finalTotal }); + setTotal(finalTotal); + }; + + return { checkout, total, isLoading: mutation.isLoading }; +} + +// Component (integration testable) +function CheckoutForm() { + const { checkout, total, isLoading } = useCheckout(); + return
checkout(items)}>...
; +} +``` + +### Component Design for Testability + +| Pattern | Testability | Example | +|---------|-------------|---------| +| Props over context | High | ` + ); + expect(container.firstChild).toMatchSnapshot(); + }); + + it('renders secondary variant', () => { + const { container } = render( + + ); + expect(container.firstChild).toMatchSnapshot(); + }); + + it('renders disabled state', () => { + const { container } = render( + + ); + expect(container.firstChild).toMatchSnapshot(); + }); +}); +``` + +### Inline Snapshots + +```typescript +// Good for small, stable outputs +it('formats date correctly', () => { + const result = formatDate(new Date('2024-01-15')); + expect(result).toMatchInlineSnapshot(`"January 15, 2024"`); +}); + +it('generates expected error message', () => { + const error = new ValidationError('email', 'Invalid format'); + expect(error.message).toMatchInlineSnapshot( + `"Validation failed for 'email': Invalid format"` + ); +}); +``` + +### Snapshot Best Practices + +1. **Keep snapshots small** - Snapshot specific elements, not entire pages +2. **Use inline snapshots for small outputs** - Easier to review in code +3. **Review snapshot changes carefully** - Don't blindly update +4. **Avoid snapshots for dynamic content** - Filter out timestamps, IDs +5. **Combine with other assertions** - Snapshots complement, not replace + +```typescript +// Filtering dynamic content from snapshots +it('renders user card', () => { + const { container } = render(); + + // Remove dynamic elements before snapshot + const card = container.firstChild; + const timestamp = card.querySelector('.timestamp'); + timestamp?.remove(); + + expect(card).toMatchSnapshot(); +}); +``` + +--- + +## Summary + +1. **Use Page Objects** for complex, reusable page interactions +2. **Build factories** for consistent test data creation +3. **Leverage MSW** for realistic API mocking +4. **Create custom render utilities** for provider wrapping +5. **Master async patterns** to avoid flaky tests +6. **Use snapshots wisely** for stable, static content only diff --git a/engineering-team/senior-qa/references/testing_strategies.md b/engineering-team/senior-qa/references/testing_strategies.md index 76ef9ba..c429012 100644 --- a/engineering-team/senior-qa/references/testing_strategies.md +++ b/engineering-team/senior-qa/references/testing_strategies.md @@ -1,103 +1,649 @@ -# Testing Strategies +# Testing Strategies for React and Next.js Applications -## Overview +Comprehensive guide to test architecture, coverage targets, and CI/CD integration patterns. -This reference guide provides comprehensive information for senior qa. +--- -## Patterns and Practices +## Table of Contents -### Pattern 1: Best Practice Implementation +- [The Testing Pyramid](#the-testing-pyramid) +- [Testing Types Deep Dive](#testing-types-deep-dive) +- [Coverage Targets and Thresholds](#coverage-targets-and-thresholds) +- [Test Organization Patterns](#test-organization-patterns) +- [CI/CD Integration Strategies](#cicd-integration-strategies) +- [Testing Decision Framework](#testing-decision-framework) -**Description:** -Detailed explanation of the pattern. +--- -**When to Use:** -- Scenario 1 -- Scenario 2 -- Scenario 3 +## The Testing Pyramid -**Implementation:** -```typescript -// Example code implementation -export class Example { - // Implementation details -} +The testing pyramid guides how to distribute testing effort across different test types for optimal ROI. + +### Classic Pyramid Structure + +``` + /\ + / \ E2E Tests (5-10%) + /----\ - User journey validation + / \ - Critical path coverage + /--------\ Integration Tests (20-30%) + / \ - Component interactions + / \ - API integration + /--------------\ Unit Tests (60-70%) +/ \ - Individual functions +------------------ - Isolated components ``` -**Benefits:** -- Benefit 1 -- Benefit 2 -- Benefit 3 +### React/Next.js Adapted Pyramid -**Trade-offs:** -- Consider 1 -- Consider 2 -- Consider 3 +For frontend applications, the pyramid shifts slightly: -### Pattern 2: Advanced Technique +| Level | Percentage | Tools | Focus | +|-------|------------|-------|-------| +| Unit | 50-60% | Jest, RTL | Pure functions, hooks, isolated components | +| Integration | 25-35% | RTL, MSW | Component trees, API calls, context | +| E2E | 10-15% | Playwright | Critical user flows, cross-page navigation | -**Description:** -Another important pattern for senior qa. +### Why This Distribution? + +**Unit tests are fast and cheap:** +- Execute in milliseconds +- Pinpoint failures precisely +- Easy to maintain +- Run on every commit + +**Integration tests balance coverage and cost:** +- Test realistic scenarios +- Catch component interaction bugs +- Moderate execution time +- Run on every PR + +**E2E tests are expensive but essential:** +- Validate real user experience +- Catch deployment issues +- Slow and brittle +- Run on staging/production + +--- + +## Testing Types Deep Dive + +### Unit Testing + +**Purpose:** Verify individual units of code work correctly in isolation. + +**What to Unit Test:** +- Pure utility functions +- Custom hooks (with renderHook) +- Individual component rendering +- State reducers +- Validation logic +- Data transformers + +**Example: Testing a Pure Function** -**Implementation:** ```typescript -// Advanced example -async function advancedExample() { - // Code here +// utils/formatPrice.ts +export function formatPrice(cents: number, currency = 'USD'): string { + const formatter = new Intl.NumberFormat('en-US', { + style: 'currency', + currency, + }); + return formatter.format(cents / 100); } + +// utils/formatPrice.test.ts +describe('formatPrice', () => { + it('formats cents to USD by default', () => { + expect(formatPrice(1999)).toBe('$19.99'); + }); + + it('handles zero', () => { + expect(formatPrice(0)).toBe('$0.00'); + }); + + it('supports different currencies', () => { + expect(formatPrice(1999, 'EUR')).toContain('€'); + }); + + it('handles large numbers', () => { + expect(formatPrice(100000000)).toBe('$1,000,000.00'); + }); +}); ``` -## Guidelines +**Example: Testing a Custom Hook** -### Code Organization -- Clear structure -- Logical separation -- Consistent naming -- Proper documentation +```typescript +// hooks/useCounter.ts +export function useCounter(initial = 0) { + const [count, setCount] = useState(initial); + const increment = () => setCount(c => c + 1); + const decrement = () => setCount(c => c - 1); + const reset = () => setCount(initial); + return { count, increment, decrement, reset }; +} -### Performance Considerations -- Optimization strategies -- Bottleneck identification -- Monitoring approaches -- Scaling techniques +// hooks/useCounter.test.ts +import { renderHook, act } from '@testing-library/react'; +import { useCounter } from './useCounter'; -### Security Best Practices -- Input validation -- Authentication -- Authorization -- Data protection +describe('useCounter', () => { + it('starts with initial value', () => { + const { result } = renderHook(() => useCounter(5)); + expect(result.current.count).toBe(5); + }); -## Common Patterns + it('increments count', () => { + const { result } = renderHook(() => useCounter(0)); + act(() => result.current.increment()); + expect(result.current.count).toBe(1); + }); -### Pattern A -Implementation details and examples. + it('decrements count', () => { + const { result } = renderHook(() => useCounter(5)); + act(() => result.current.decrement()); + expect(result.current.count).toBe(4); + }); -### Pattern B -Implementation details and examples. + it('resets to initial value', () => { + const { result } = renderHook(() => useCounter(10)); + act(() => result.current.increment()); + act(() => result.current.reset()); + expect(result.current.count).toBe(10); + }); +}); +``` -### Pattern C -Implementation details and examples. +### Integration Testing -## Anti-Patterns to Avoid +**Purpose:** Verify multiple units work together correctly. -### Anti-Pattern 1 -What not to do and why. +**What to Integration Test:** +- Component trees with multiple children +- Components with context providers +- Form submission flows +- API call and response handling +- State management interactions +- Router-dependent components -### Anti-Pattern 2 -What not to do and why. +**Example: Testing Component with API Call** -## Tools and Resources +```typescript +// components/UserProfile.tsx +export function UserProfile({ userId }: { userId: string }) { + const [user, setUser] = useState(null); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); -### Recommended Tools -- Tool 1: Purpose -- Tool 2: Purpose -- Tool 3: Purpose + useEffect(() => { + fetch(`/api/users/${userId}`) + .then(res => res.json()) + .then(data => setUser(data)) + .catch(err => setError(err.message)) + .finally(() => setLoading(false)); + }, [userId]); -### Further Reading -- Resource 1 -- Resource 2 -- Resource 3 + if (loading) return
Loading...
; + if (error) return
Error: {error}
; + return
{user?.name}
; +} -## Conclusion +// components/UserProfile.test.tsx +import { render, screen, waitFor } from '@testing-library/react'; +import { rest } from 'msw'; +import { setupServer } from 'msw/node'; +import { UserProfile } from './UserProfile'; -Key takeaways for using this reference guide effectively. +const server = setupServer( + rest.get('/api/users/:id', (req, res, ctx) => { + return res(ctx.json({ id: req.params.id, name: 'John Doe' })); + }) +); + +beforeAll(() => server.listen()); +afterEach(() => server.resetHandlers()); +afterAll(() => server.close()); + +describe('UserProfile', () => { + it('shows loading state initially', () => { + render(); + expect(screen.getByText('Loading...')).toBeInTheDocument(); + }); + + it('displays user name after loading', async () => { + render(); + await waitFor(() => { + expect(screen.getByText('John Doe')).toBeInTheDocument(); + }); + }); + + it('displays error on API failure', async () => { + server.use( + rest.get('/api/users/:id', (req, res, ctx) => { + return res(ctx.status(500)); + }) + ); + render(); + await waitFor(() => { + expect(screen.getByText(/Error/)).toBeInTheDocument(); + }); + }); +}); +``` + +### End-to-End Testing + +**Purpose:** Verify complete user flows work in a real browser environment. + +**What to E2E Test:** +- Critical business flows (checkout, signup, login) +- Cross-page navigation sequences +- Authentication flows +- Third-party integrations +- Payment processing +- Form wizards + +**Example: Testing Checkout Flow** + +```typescript +// e2e/checkout.spec.ts +import { test, expect } from '@playwright/test'; + +test.describe('Checkout Flow', () => { + test.beforeEach(async ({ page }) => { + await page.goto('/'); + }); + + test('completes purchase successfully', async ({ page }) => { + // Add product to cart + await page.goto('/products/widget-pro'); + await page.getByRole('button', { name: 'Add to Cart' }).click(); + + // Verify cart updated + await expect(page.getByTestId('cart-count')).toHaveText('1'); + + // Go to checkout + await page.getByRole('link', { name: 'Checkout' }).click(); + + // Fill shipping info + await page.getByLabel('Email').fill('test@example.com'); + await page.getByLabel('Address').fill('123 Test St'); + await page.getByLabel('City').fill('Test City'); + await page.getByLabel('Zip').fill('12345'); + + // Fill payment info (test card) + await page.getByLabel('Card Number').fill('4242424242424242'); + await page.getByLabel('Expiry').fill('12/25'); + await page.getByLabel('CVC').fill('123'); + + // Submit order + await page.getByRole('button', { name: 'Place Order' }).click(); + + // Verify confirmation + await expect(page).toHaveURL(/\/orders\/\w+/); + await expect(page.getByText('Order Confirmed')).toBeVisible(); + }); + + test('shows validation errors for invalid input', async ({ page }) => { + await page.goto('/checkout'); + await page.getByRole('button', { name: 'Place Order' }).click(); + + await expect(page.getByText('Email is required')).toBeVisible(); + await expect(page.getByText('Address is required')).toBeVisible(); + }); +}); +``` + +### Visual Regression Testing + +**Purpose:** Catch unintended visual changes to UI components. + +**Tools:** Playwright visual comparisons, Percy, Chromatic + +**Example: Visual Snapshot Test** + +```typescript +// e2e/visual/components.spec.ts +import { test, expect } from '@playwright/test'; + +test.describe('Visual Regression', () => { + test('button variants render correctly', async ({ page }) => { + await page.goto('/storybook/button'); + await expect(page).toHaveScreenshot('button-variants.png'); + }); + + test('responsive header', async ({ page }) => { + // Desktop + await page.setViewportSize({ width: 1280, height: 720 }); + await page.goto('/'); + await expect(page.locator('header')).toHaveScreenshot('header-desktop.png'); + + // Mobile + await page.setViewportSize({ width: 375, height: 667 }); + await expect(page.locator('header')).toHaveScreenshot('header-mobile.png'); + }); +}); +``` + +### Accessibility Testing + +**Purpose:** Ensure application is usable by people with disabilities. + +**Tools:** jest-axe, @axe-core/playwright + +**Example: Automated A11y Testing** + +```typescript +// Unit/Integration level with jest-axe +import { render } from '@testing-library/react'; +import { axe, toHaveNoViolations } from 'jest-axe'; +import { Button } from './Button'; + +expect.extend(toHaveNoViolations); + +describe('Button accessibility', () => { + it('has no accessibility violations', async () => { + const { container } = render(); + const results = await axe(container); + expect(results).toHaveNoViolations(); + }); +}); + +// E2E level with Playwright + Axe +import { test, expect } from '@playwright/test'; +import AxeBuilder from '@axe-core/playwright'; + +test('homepage has no a11y violations', async ({ page }) => { + await page.goto('/'); + const results = await new AxeBuilder({ page }).analyze(); + expect(results.violations).toEqual([]); +}); +``` + +--- + +## Coverage Targets and Thresholds + +### Recommended Thresholds by Project Type + +| Project Type | Statements | Branches | Functions | Lines | +|--------------|------------|----------|-----------|-------| +| Startup/MVP | 60% | 50% | 60% | 60% | +| Growing Product | 75% | 70% | 75% | 75% | +| Enterprise | 85% | 80% | 85% | 85% | +| Safety Critical | 95% | 90% | 95% | 95% | + +### Coverage by Code Type + +**High Coverage Priority (80%+):** +- Business logic +- State management +- API handlers +- Form validation +- Authentication/authorization +- Payment processing + +**Medium Coverage Priority (60-80%):** +- UI components +- Utility functions +- Data transformers +- Custom hooks + +**Lower Coverage Priority (40-60%):** +- Static pages +- Simple wrappers +- Configuration files +- Types/interfaces + +### Jest Coverage Configuration + +```javascript +// jest.config.js +module.exports = { + collectCoverageFrom: [ + 'src/**/*.{ts,tsx}', + '!src/**/*.d.ts', + '!src/**/*.stories.{ts,tsx}', + '!src/**/index.{ts,tsx}', // barrel files + '!src/types/**', + ], + coverageThreshold: { + global: { + statements: 80, + branches: 75, + functions: 80, + lines: 80, + }, + // Higher thresholds for critical paths + './src/services/payment/': { + statements: 95, + branches: 90, + functions: 95, + lines: 95, + }, + './src/services/auth/': { + statements: 90, + branches: 85, + functions: 90, + lines: 90, + }, + }, + coverageReporters: ['text', 'lcov', 'html', 'json'], +}; +``` + +--- + +## Test Organization Patterns + +### Co-located Tests (Recommended for React) + +``` +src/ +├── components/ +│ ├── Button/ +│ │ ├── Button.tsx +│ │ ├── Button.test.tsx # Unit tests +│ │ ├── Button.stories.tsx # Storybook +│ │ └── index.ts +│ └── Form/ +│ ├── Form.tsx +│ ├── Form.test.tsx +│ └── Form.integration.test.tsx # Integration tests +├── hooks/ +│ ├── useAuth.ts +│ └── useAuth.test.ts +└── utils/ + ├── formatters.ts + └── formatters.test.ts +``` + +### Separate Test Directory + +``` +src/ +├── components/ +├── hooks/ +└── utils/ + +__tests__/ +├── unit/ +│ ├── components/ +│ ├── hooks/ +│ └── utils/ +├── integration/ +│ └── flows/ +└── fixtures/ + ├── users.json + └── products.json + +e2e/ +├── specs/ +│ ├── auth.spec.ts +│ └── checkout.spec.ts +├── fixtures/ +│ └── auth.ts +└── pages/ # Page Object Models + ├── LoginPage.ts + └── CheckoutPage.ts +``` + +### Test File Naming Conventions + +| Pattern | Use Case | +|---------|----------| +| `*.test.ts` | Unit tests | +| `*.spec.ts` | Integration/E2E tests | +| `*.integration.test.ts` | Explicit integration tests | +| `*.e2e.spec.ts` | Explicit E2E tests | +| `*.a11y.test.ts` | Accessibility tests | +| `*.visual.spec.ts` | Visual regression tests | + +--- + +## CI/CD Integration Strategies + +### Pipeline Stages + +```yaml +# .github/workflows/test.yml +name: Test Pipeline + +on: + push: + branches: [main, dev] + pull_request: + branches: [main, dev] + +jobs: + unit: + name: Unit Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + - run: npm ci + - run: npm run test:unit -- --coverage + - uses: codecov/codecov-action@v4 + with: + files: coverage/lcov.info + fail_ci_if_error: true + + integration: + name: Integration Tests + runs-on: ubuntu-latest + needs: unit + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + - run: npm ci + - run: npm run test:integration + + e2e: + name: E2E Tests + runs-on: ubuntu-latest + needs: integration + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + - run: npm ci + - run: npx playwright install --with-deps + - run: npm run build + - run: npm run test:e2e + - uses: actions/upload-artifact@v4 + if: failure() + with: + name: playwright-report + path: playwright-report/ +``` + +### Test Splitting for Speed + +```yaml +# Run E2E tests in parallel across multiple machines +e2e: + strategy: + matrix: + shard: [1, 2, 3, 4] + steps: + - run: npx playwright test --shard=${{ matrix.shard }}/4 +``` + +### PR Gating Rules + +| Test Type | When to Run | Block Merge? | +|-----------|-------------|--------------| +| Unit | Every commit | Yes | +| Integration | Every PR | Yes | +| E2E (smoke) | Every PR | Yes | +| E2E (full) | Merge to main | No (alert only) | +| Visual | Every PR | No (review required) | +| Performance | Weekly/Release | No (alert only) | + +--- + +## Testing Decision Framework + +### When to Write Which Test + +``` +Is it a pure function with no side effects? +├── Yes → Unit test +└── No + ├── Does it make API calls or use context? + │ ├── Yes → Integration test with mocking + │ └── No + │ ├── Is it a critical user flow? + │ │ ├── Yes → E2E test + │ │ └── No → Integration test + └── Is it UI-focused with many visual states? + ├── Yes → Storybook + Visual test + └── No → Component unit test +``` + +### Test ROI Matrix + +| Test Type | Write Time | Run Time | Maintenance | Confidence | +|-----------|------------|----------|-------------|------------| +| Unit | Low | Very Fast | Low | Medium | +| Integration | Medium | Fast | Medium | High | +| E2E | High | Slow | High | Very High | +| Visual | Low | Medium | Medium | High (UI) | + +### When NOT to Test + +- Generated code (GraphQL types, Prisma client) +- Third-party library internals +- Implementation details (internal state, private methods) +- Simple pass-through wrappers +- Type definitions + +### Red Flags in Testing Strategy + +| Red Flag | Problem | Solution | +|----------|---------|----------| +| E2E tests > 30% | Slow CI, flaky tests | Push logic down to integration | +| Only unit tests | Missing interaction bugs | Add integration tests | +| Testing mocks | Not testing real behavior | Test behavior, not implementation | +| 100% coverage goal | Diminishing returns | Focus on critical paths | +| No E2E tests | Missing deployment issues | Add smoke tests for critical flows | + +--- + +## Summary + +1. **Follow the pyramid:** 60% unit, 30% integration, 10% E2E +2. **Set thresholds by risk:** Higher coverage for critical paths +3. **Co-locate tests:** Keep tests close to source code +4. **Automate in CI:** Run tests on every PR, gate merges on failure +5. **Decide wisely:** Not everything needs every type of test diff --git a/engineering-team/senior-qa/scripts/coverage_analyzer.py b/engineering-team/senior-qa/scripts/coverage_analyzer.py index 73e7c08..874428d 100755 --- a/engineering-team/senior-qa/scripts/coverage_analyzer.py +++ b/engineering-team/senior-qa/scripts/coverage_analyzer.py @@ -1,81 +1,799 @@ #!/usr/bin/env python3 """ Coverage Analyzer -Automated tool for senior qa tasks + +Parses Jest/Istanbul coverage reports and identifies gaps, uncovered branches, +and provides actionable recommendations for improving test coverage. + +Usage: + python coverage_analyzer.py coverage/coverage-final.json --threshold 80 + python coverage_analyzer.py coverage/ --format html --output report.html + python coverage_analyzer.py coverage/ --critical-paths """ import os import sys import json import argparse +import re from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple, Any +from dataclasses import dataclass, field, asdict +from datetime import datetime +from collections import defaultdict + + +@dataclass +class FileCoverage: + """Coverage data for a single file""" + path: str + statements: Tuple[int, int] # (covered, total) + branches: Tuple[int, int] + functions: Tuple[int, int] + lines: Tuple[int, int] + uncovered_lines: List[int] = field(default_factory=list) + uncovered_branches: List[str] = field(default_factory=list) + + @property + def statement_pct(self) -> float: + return (self.statements[0] / self.statements[1] * 100) if self.statements[1] > 0 else 100 + + @property + def branch_pct(self) -> float: + return (self.branches[0] / self.branches[1] * 100) if self.branches[1] > 0 else 100 + + @property + def function_pct(self) -> float: + return (self.functions[0] / self.functions[1] * 100) if self.functions[1] > 0 else 100 + + @property + def line_pct(self) -> float: + return (self.lines[0] / self.lines[1] * 100) if self.lines[1] > 0 else 100 + + +@dataclass +class CoverageGap: + """An identified coverage gap""" + file: str + gap_type: str # 'statements', 'branches', 'functions', 'lines' + lines: List[int] + severity: str # 'critical', 'high', 'medium', 'low' + description: str + recommendation: str + + +@dataclass +class CoverageSummary: + """Overall coverage summary""" + statements: Tuple[int, int] + branches: Tuple[int, int] + functions: Tuple[int, int] + lines: Tuple[int, int] + files_analyzed: int + files_below_threshold: int = 0 + + +class CoverageParser: + """Parses various coverage report formats""" + + def __init__(self, verbose: bool = False): + self.verbose = verbose + + def parse(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]: + """Parse coverage data from file or directory""" + if path.is_file(): + if path.suffix == '.json': + return self._parse_istanbul_json(path) + elif path.suffix == '.info' or 'lcov' in path.name: + return self._parse_lcov(path) + elif path.is_dir(): + # Look for common coverage files + for filename in ['coverage-final.json', 'coverage-summary.json', 'lcov.info']: + candidate = path / filename + if candidate.exists(): + return self.parse(candidate) + + # Check for coverage-final.json in coverage directory + coverage_json = path / 'coverage-final.json' + if coverage_json.exists(): + return self._parse_istanbul_json(coverage_json) + + raise ValueError(f"Could not find or parse coverage data at: {path}") + + def _parse_istanbul_json(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]: + """Parse Istanbul/Jest JSON coverage format""" + with open(path, 'r') as f: + data = json.load(f) + + files = {} + total_statements = [0, 0] + total_branches = [0, 0] + total_functions = [0, 0] + total_lines = [0, 0] + + for file_path, file_data in data.items(): + # Skip node_modules + if 'node_modules' in file_path: + continue + + # Parse statement coverage + s_map = file_data.get('statementMap', {}) + s_hits = file_data.get('s', {}) + covered_statements = sum(1 for h in s_hits.values() if h > 0) + total_statements[0] += covered_statements + total_statements[1] += len(s_map) + + # Parse branch coverage + b_map = file_data.get('branchMap', {}) + b_hits = file_data.get('b', {}) + covered_branches = sum( + sum(1 for h in hits if h > 0) + for hits in b_hits.values() + ) + total_branch_count = sum(len(b['locations']) for b in b_map.values()) + total_branches[0] += covered_branches + total_branches[1] += total_branch_count + + # Parse function coverage + fn_map = file_data.get('fnMap', {}) + fn_hits = file_data.get('f', {}) + covered_functions = sum(1 for h in fn_hits.values() if h > 0) + total_functions[0] += covered_functions + total_functions[1] += len(fn_map) + + # Determine uncovered lines + uncovered_lines = [] + for stmt_id, hits in s_hits.items(): + if hits == 0 and stmt_id in s_map: + stmt = s_map[stmt_id] + start_line = stmt.get('start', {}).get('line', 0) + if start_line not in uncovered_lines: + uncovered_lines.append(start_line) + + # Count lines + line_coverage = self._calculate_line_coverage(s_map, s_hits) + total_lines[0] += line_coverage[0] + total_lines[1] += line_coverage[1] + + # Identify uncovered branches + uncovered_branches = [] + for branch_id, hits in b_hits.items(): + for idx, hit in enumerate(hits): + if hit == 0: + uncovered_branches.append(f"{branch_id}:{idx}") + + files[file_path] = FileCoverage( + path=file_path, + statements=(covered_statements, len(s_map)), + branches=(covered_branches, total_branch_count), + functions=(covered_functions, len(fn_map)), + lines=line_coverage, + uncovered_lines=sorted(uncovered_lines)[:50], # Limit + uncovered_branches=uncovered_branches[:20] + ) + + summary = CoverageSummary( + statements=tuple(total_statements), + branches=tuple(total_branches), + functions=tuple(total_functions), + lines=tuple(total_lines), + files_analyzed=len(files) + ) + + return files, summary + + def _calculate_line_coverage(self, s_map: Dict, s_hits: Dict) -> Tuple[int, int]: + """Calculate line coverage from statement data""" + lines = set() + covered_lines = set() + + for stmt_id, stmt in s_map.items(): + start_line = stmt.get('start', {}).get('line', 0) + end_line = stmt.get('end', {}).get('line', start_line) + for line in range(start_line, end_line + 1): + lines.add(line) + if s_hits.get(stmt_id, 0) > 0: + covered_lines.add(line) + + return (len(covered_lines), len(lines)) + + def _parse_lcov(self, path: Path) -> Tuple[Dict[str, FileCoverage], CoverageSummary]: + """Parse LCOV format coverage data""" + with open(path, 'r') as f: + content = f.read() + + files = {} + current_file = None + current_data = {} + + total = { + 'statements': [0, 0], + 'branches': [0, 0], + 'functions': [0, 0], + 'lines': [0, 0] + } + + for line in content.split('\n'): + line = line.strip() + + if line.startswith('SF:'): + current_file = line[3:] + current_data = { + 'lines_hit': 0, 'lines_total': 0, + 'functions_hit': 0, 'functions_total': 0, + 'branches_hit': 0, 'branches_total': 0, + 'uncovered_lines': [] + } + elif line.startswith('DA:'): + parts = line[3:].split(',') + if len(parts) >= 2: + line_num = int(parts[0]) + hits = int(parts[1]) + current_data['lines_total'] += 1 + if hits > 0: + current_data['lines_hit'] += 1 + else: + current_data['uncovered_lines'].append(line_num) + elif line.startswith('FN:'): + current_data['functions_total'] += 1 + elif line.startswith('FNDA:'): + parts = line[5:].split(',') + if len(parts) >= 1 and int(parts[0]) > 0: + current_data['functions_hit'] += 1 + elif line.startswith('BRDA:'): + parts = line[5:].split(',') + current_data['branches_total'] += 1 + if len(parts) >= 4 and parts[3] != '-' and int(parts[3]) > 0: + current_data['branches_hit'] += 1 + elif line == 'end_of_record' and current_file: + # Skip node_modules + if 'node_modules' not in current_file: + files[current_file] = FileCoverage( + path=current_file, + statements=(current_data['lines_hit'], current_data['lines_total']), + branches=(current_data['branches_hit'], current_data['branches_total']), + functions=(current_data['functions_hit'], current_data['functions_total']), + lines=(current_data['lines_hit'], current_data['lines_total']), + uncovered_lines=current_data['uncovered_lines'][:50] + ) + + for key in total: + if key == 'statements' or key == 'lines': + total[key][0] += current_data['lines_hit'] + total[key][1] += current_data['lines_total'] + elif key == 'branches': + total[key][0] += current_data['branches_hit'] + total[key][1] += current_data['branches_total'] + elif key == 'functions': + total[key][0] += current_data['functions_hit'] + total[key][1] += current_data['functions_total'] + + current_file = None + + summary = CoverageSummary( + statements=tuple(total['statements']), + branches=tuple(total['branches']), + functions=tuple(total['functions']), + lines=tuple(total['lines']), + files_analyzed=len(files) + ) + + return files, summary + class CoverageAnalyzer: - """Main class for coverage analyzer functionality""" - - def __init__(self, target_path: str, verbose: bool = False): - self.target_path = Path(target_path) + """Analyzes coverage data and generates recommendations""" + + CRITICAL_PATTERNS = [ + r'auth', r'payment', r'security', r'login', r'register', + r'checkout', r'order', r'transaction', r'billing' + ] + + SERVICE_PATTERNS = [ + r'service', r'api', r'handler', r'controller', r'middleware' + ] + + def __init__( + self, + threshold: int = 80, + critical_paths: bool = False, + verbose: bool = False + ): + self.threshold = threshold + self.critical_paths = critical_paths self.verbose = verbose - self.results = {} - - def run(self) -> Dict: - """Execute the main functionality""" - print(f"🚀 Running {self.__class__.__name__}...") - print(f"📁 Target: {self.target_path}") - - try: - self.validate_target() - self.analyze() - self.generate_report() - - print("✅ Completed successfully!") - return self.results - - except Exception as e: - print(f"❌ Error: {e}") + + def analyze( + self, + files: Dict[str, FileCoverage], + summary: CoverageSummary + ) -> Tuple[List[CoverageGap], Dict[str, Any]]: + """Analyze coverage and return gaps and recommendations""" + gaps = [] + recommendations = { + 'critical': [], + 'high': [], + 'medium': [], + 'low': [] + } + + # Analyze each file + for file_path, coverage in files.items(): + file_gaps = self._analyze_file(file_path, coverage) + gaps.extend(file_gaps) + + # Sort gaps by severity + severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} + gaps.sort(key=lambda g: (severity_order[g.severity], -len(g.lines))) + + # Generate recommendations + for gap in gaps: + recommendations[gap.severity].append({ + 'file': gap.file, + 'type': gap.gap_type, + 'lines': gap.lines[:10], # Limit + 'description': gap.description, + 'recommendation': gap.recommendation + }) + + # Add summary stats + stats = { + 'overall_statement_pct': (summary.statements[0] / summary.statements[1] * 100) if summary.statements[1] > 0 else 100, + 'overall_branch_pct': (summary.branches[0] / summary.branches[1] * 100) if summary.branches[1] > 0 else 100, + 'overall_function_pct': (summary.functions[0] / summary.functions[1] * 100) if summary.functions[1] > 0 else 100, + 'overall_line_pct': (summary.lines[0] / summary.lines[1] * 100) if summary.lines[1] > 0 else 100, + 'files_analyzed': summary.files_analyzed, + 'files_below_threshold': sum( + 1 for f in files.values() + if f.line_pct < self.threshold + ), + 'total_gaps': len(gaps), + 'critical_gaps': len(recommendations['critical']), + 'threshold': self.threshold, + 'meets_threshold': (summary.lines[0] / summary.lines[1] * 100) >= self.threshold if summary.lines[1] > 0 else True + } + + return gaps, { + 'recommendations': recommendations, + 'stats': stats + } + + def _analyze_file(self, file_path: str, coverage: FileCoverage) -> List[CoverageGap]: + """Analyze a single file for coverage gaps""" + gaps = [] + + # Determine if file is critical + is_critical = any( + re.search(pattern, file_path.lower()) + for pattern in self.CRITICAL_PATTERNS + ) + + is_service = any( + re.search(pattern, file_path.lower()) + for pattern in self.SERVICE_PATTERNS + ) + + # Determine severity based on file type and coverage level + if is_critical: + base_severity = 'critical' + target_threshold = 95 + elif is_service: + base_severity = 'high' + target_threshold = 85 + else: + base_severity = 'medium' + target_threshold = self.threshold + + # Check line coverage + if coverage.line_pct < target_threshold: + severity = base_severity if coverage.line_pct < 50 else self._lower_severity(base_severity) + + gaps.append(CoverageGap( + file=file_path, + gap_type='lines', + lines=coverage.uncovered_lines[:20], + severity=severity, + description=f"Line coverage at {coverage.line_pct:.1f}% (target: {target_threshold}%)", + recommendation=self._get_line_recommendation(coverage) + )) + + # Check branch coverage + if coverage.branch_pct < target_threshold - 5: # Allow 5% less for branches + severity = base_severity if coverage.branch_pct < 40 else self._lower_severity(base_severity) + + gaps.append(CoverageGap( + file=file_path, + gap_type='branches', + lines=[], + severity=severity, + description=f"Branch coverage at {coverage.branch_pct:.1f}%", + recommendation=f"Add tests for conditional logic. {len(coverage.uncovered_branches)} uncovered branches." + )) + + # Check function coverage + if coverage.function_pct < target_threshold: + severity = self._lower_severity(base_severity) + + gaps.append(CoverageGap( + file=file_path, + gap_type='functions', + lines=[], + severity=severity, + description=f"Function coverage at {coverage.function_pct:.1f}%", + recommendation="Add tests for uncovered functions/methods." + )) + + return gaps + + def _lower_severity(self, severity: str) -> str: + """Lower severity by one level""" + mapping = { + 'critical': 'high', + 'high': 'medium', + 'medium': 'low', + 'low': 'low' + } + return mapping[severity] + + def _get_line_recommendation(self, coverage: FileCoverage) -> str: + """Generate recommendation for line coverage gaps""" + if coverage.line_pct < 30: + return "This file has very low coverage. Consider adding basic render/unit tests first." + elif coverage.line_pct < 60: + return "Add tests covering the main functionality and happy paths." + else: + return "Focus on edge cases and error handling paths." + + +class ReportGenerator: + """Generates coverage reports in various formats""" + + def __init__(self, verbose: bool = False): + self.verbose = verbose + + def generate_text_report( + self, + files: Dict[str, FileCoverage], + summary: CoverageSummary, + analysis: Dict[str, Any], + threshold: int + ) -> str: + """Generate a text report""" + lines = [] + + # Header + lines.append("=" * 60) + lines.append("COVERAGE ANALYSIS REPORT") + lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + lines.append("=" * 60) + lines.append("") + + # Overall summary + stats = analysis['stats'] + lines.append("OVERALL COVERAGE:") + lines.append(f" Statements: {stats['overall_statement_pct']:.1f}%") + lines.append(f" Branches: {stats['overall_branch_pct']:.1f}%") + lines.append(f" Functions: {stats['overall_function_pct']:.1f}%") + lines.append(f" Lines: {stats['overall_line_pct']:.1f}%") + lines.append("") + + # Threshold check + threshold_status = "PASS" if stats['meets_threshold'] else "FAIL" + lines.append(f"Threshold ({threshold}%): {threshold_status}") + lines.append(f"Files analyzed: {stats['files_analyzed']}") + lines.append(f"Files below threshold: {stats['files_below_threshold']}") + lines.append("") + + # Critical gaps + recs = analysis['recommendations'] + if recs['critical']: + lines.append("-" * 60) + lines.append("CRITICAL GAPS (requires immediate attention):") + for rec in recs['critical'][:5]: + lines.append(f" - {rec['file']}") + lines.append(f" {rec['description']}") + if rec['lines']: + lines.append(f" Uncovered lines: {', '.join(map(str, rec['lines'][:5]))}") + lines.append("") + + # High priority gaps + if recs['high']: + lines.append("-" * 60) + lines.append("HIGH PRIORITY GAPS:") + for rec in recs['high'][:5]: + lines.append(f" - {rec['file']}") + lines.append(f" {rec['description']}") + lines.append("") + + # Files below threshold + below_threshold = [ + (path, cov) for path, cov in files.items() + if cov.line_pct < threshold + ] + below_threshold.sort(key=lambda x: x[1].line_pct) + + if below_threshold: + lines.append("-" * 60) + lines.append(f"FILES BELOW {threshold}% THRESHOLD:") + for path, cov in below_threshold[:10]: + short_path = path.split('/')[-1] if '/' in path else path + lines.append(f" {cov.line_pct:5.1f}% {short_path}") + if len(below_threshold) > 10: + lines.append(f" ... and {len(below_threshold) - 10} more files") + lines.append("") + + # Recommendations + lines.append("-" * 60) + lines.append("RECOMMENDATIONS:") + all_recs = ( + recs['critical'][:2] + recs['high'][:2] + recs['medium'][:2] + ) + for i, rec in enumerate(all_recs[:5], 1): + lines.append(f" {i}. {rec['recommendation']}") + lines.append(f" File: {rec['file']}") + lines.append("") + + lines.append("=" * 60) + return '\n'.join(lines) + + def generate_html_report( + self, + files: Dict[str, FileCoverage], + summary: CoverageSummary, + analysis: Dict[str, Any], + threshold: int + ) -> str: + """Generate an HTML report""" + stats = analysis['stats'] + recs = analysis['recommendations'] + + html = f""" + + + + + Coverage Analysis Report + + + +

Coverage Analysis Report

+

Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

+ +
+
+
{stats['overall_statement_pct']:.1f}%
+
Statements
+
+
+
{stats['overall_branch_pct']:.1f}%
+
Branches
+
+
+
{stats['overall_function_pct']:.1f}%
+
Functions
+
+
+
{stats['overall_line_pct']:.1f}%
+
Lines
+
+
+ +

Threshold Status: {'PASS' if stats['meets_threshold'] else 'FAIL'}

+

Target: {threshold}% | Files Analyzed: {stats['files_analyzed']} | Below Threshold: {stats['files_below_threshold']}

+ +

Coverage Gaps

+ + + + + + + + + + +""" + + # Add gaps to table + all_gaps = ( + [(g, 'critical') for g in recs['critical']] + + [(g, 'high') for g in recs['high']] + + [(g, 'medium') for g in recs['medium'][:5]] + ) + + for gap, severity in all_gaps[:15]: + row_class = f"gap-{severity}" if severity in ['critical', 'high'] else "" + html += f""" + + + + + +""" + + html += """ +
SeverityFileIssueRecommendation
{severity.upper()}{gap['file'].split('/')[-1]}{gap['description']}{gap['recommendation']}
+ +

File Coverage Details

+ + + + + + + + + + + +""" + + # Sort files by line coverage + sorted_files = sorted(files.items(), key=lambda x: x[1].line_pct) + + for path, cov in sorted_files[:20]: + short_path = path.split('/')[-1] if '/' in path else path + html += f""" + + + + + + +""" + + html += """ +
FileStatementsBranchesFunctionsLines
{short_path}{cov.statement_pct:.1f}%{cov.branch_pct:.1f}%{cov.function_pct:.1f}%{cov.line_pct:.1f}%
+ + +""" + return html + + +class CoverageAnalyzerTool: + """Main tool class""" + + def __init__( + self, + coverage_path: str, + threshold: int = 80, + critical_paths: bool = False, + strict: bool = False, + output_format: str = 'text', + output_path: Optional[str] = None, + verbose: bool = False + ): + self.coverage_path = Path(coverage_path) + self.threshold = threshold + self.critical_paths = critical_paths + self.strict = strict + self.output_format = output_format + self.output_path = output_path + self.verbose = verbose + + def run(self) -> Dict[str, Any]: + """Run the coverage analysis""" + print(f"Analyzing coverage from: {self.coverage_path}") + + # Parse coverage data + parser = CoverageParser(self.verbose) + files, summary = parser.parse(self.coverage_path) + + print(f"Found coverage data for {len(files)} files") + + # Analyze coverage + analyzer = CoverageAnalyzer( + threshold=self.threshold, + critical_paths=self.critical_paths, + verbose=self.verbose + ) + gaps, analysis = analyzer.analyze(files, summary) + + # Generate report + reporter = ReportGenerator(self.verbose) + + if self.output_format == 'html': + report = reporter.generate_html_report(files, summary, analysis, self.threshold) + else: + report = reporter.generate_text_report(files, summary, analysis, self.threshold) + + # Output report + if self.output_path: + with open(self.output_path, 'w') as f: + f.write(report) + print(f"Report written to: {self.output_path}") + else: + print(report) + + # Return results + results = { + 'status': 'pass' if analysis['stats']['meets_threshold'] else 'fail', + 'threshold': self.threshold, + 'coverage': { + 'statements': analysis['stats']['overall_statement_pct'], + 'branches': analysis['stats']['overall_branch_pct'], + 'functions': analysis['stats']['overall_function_pct'], + 'lines': analysis['stats']['overall_line_pct'] + }, + 'files_analyzed': summary.files_analyzed, + 'files_below_threshold': analysis['stats']['files_below_threshold'], + 'total_gaps': analysis['stats']['total_gaps'], + 'critical_gaps': analysis['stats']['critical_gaps'] + } + + # Exit with error if strict mode and below threshold + if self.strict and not analysis['stats']['meets_threshold']: + print(f"\nFailed: Coverage {analysis['stats']['overall_line_pct']:.1f}% below threshold {self.threshold}%") sys.exit(1) - - def validate_target(self): - """Validate the target path exists and is accessible""" - if not self.target_path.exists(): - raise ValueError(f"Target path does not exist: {self.target_path}") - - if self.verbose: - print(f"✓ Target validated: {self.target_path}") - - def analyze(self): - """Perform the main analysis or operation""" - if self.verbose: - print("📊 Analyzing...") - - # Main logic here - self.results['status'] = 'success' - self.results['target'] = str(self.target_path) - self.results['findings'] = [] - - # Add analysis results - if self.verbose: - print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings") - - def generate_report(self): - """Generate and display the report""" - print("\n" + "="*50) - print("REPORT") - print("="*50) - print(f"Target: {self.results.get('target')}") - print(f"Status: {self.results.get('status')}") - print(f"Findings: {len(self.results.get('findings', []))}") - print("="*50 + "\n") + + return results + def main(): """Main entry point""" parser = argparse.ArgumentParser( - description="Coverage Analyzer" + description="Analyze Jest/Istanbul coverage reports and identify gaps", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Basic analysis + python coverage_analyzer.py coverage/coverage-final.json + + # With threshold enforcement + python coverage_analyzer.py coverage/ --threshold 80 --strict + + # Generate HTML report + python coverage_analyzer.py coverage/ --format html --output report.html + + # Focus on critical paths + python coverage_analyzer.py coverage/ --critical-paths + """ ) parser.add_argument( - 'target', - help='Target path to analyze or process' + 'coverage', + help='Path to coverage file or directory' + ) + parser.add_argument( + '--threshold', '-t', + type=int, + default=80, + help='Coverage threshold percentage (default: 80)' + ) + parser.add_argument( + '--strict', + action='store_true', + help='Exit with error if coverage is below threshold' + ) + parser.add_argument( + '--critical-paths', + action='store_true', + help='Focus analysis on critical business paths' + ) + parser.add_argument( + '--format', '-f', + choices=['text', 'html', 'json'], + default='text', + help='Output format (default: text)' + ) + parser.add_argument( + '--output', '-o', + help='Output file path' ) parser.add_argument( '--verbose', '-v', @@ -85,30 +803,34 @@ def main(): parser.add_argument( '--json', action='store_true', - help='Output results as JSON' + help='Output results as JSON (summary only)' ) - parser.add_argument( - '--output', '-o', - help='Output file path' - ) - + args = parser.parse_args() - - tool = CoverageAnalyzer( - args.target, - verbose=args.verbose - ) - - results = tool.run() - - if args.json: - output = json.dumps(results, indent=2) - if args.output: - with open(args.output, 'w') as f: - f.write(output) - print(f"Results written to {args.output}") - else: - print(output) + + try: + tool = CoverageAnalyzerTool( + coverage_path=args.coverage, + threshold=args.threshold, + critical_paths=args.critical_paths, + strict=args.strict, + output_format=args.format, + output_path=args.output, + verbose=args.verbose + ) + + results = tool.run() + + if args.json: + print(json.dumps(results, indent=2)) + + except Exception as e: + print(f"Error: {e}") + if args.verbose: + import traceback + traceback.print_exc() + sys.exit(1) + if __name__ == '__main__': main() diff --git a/engineering-team/senior-qa/scripts/e2e_test_scaffolder.py b/engineering-team/senior-qa/scripts/e2e_test_scaffolder.py index e28610a..87e566e 100755 --- a/engineering-team/senior-qa/scripts/e2e_test_scaffolder.py +++ b/engineering-team/senior-qa/scripts/e2e_test_scaffolder.py @@ -1,81 +1,788 @@ #!/usr/bin/env python3 """ E2E Test Scaffolder -Automated tool for senior qa tasks + +Scans Next.js pages/app directory and generates Playwright test files +with common interactions, Page Object Model classes, and configuration. + +Usage: + python e2e_test_scaffolder.py src/app/ --output e2e/ + python e2e_test_scaffolder.py pages/ --include-pom --routes "/login,/dashboard" """ import os import sys import json import argparse +import re from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple, Set +from dataclasses import dataclass, field, asdict +from datetime import datetime + + +@dataclass +class RouteInfo: + """Information about a detected route""" + path: str # URL path e.g., /dashboard + file_path: str # File system path + route_type: str # 'page', 'layout', 'api', 'dynamic' + has_params: bool + params: List[str] + has_form: bool + has_auth: bool + interactions: List[str] + + +@dataclass +class TestSpec: + """A Playwright test specification""" + route: RouteInfo + test_cases: List[str] + imports: Set[str] = field(default_factory=set) + + +@dataclass +class PageObject: + """Page Object Model class definition""" + name: str + route: str + locators: List[Tuple[str, str, str]] # (name, selector, description) + methods: List[Tuple[str, str]] # (name, code) + + +class RouteScanner: + """Scans Next.js directories for routes""" + + # Pattern to detect page files + PAGE_PATTERNS = { + 'page.tsx', 'page.ts', 'page.jsx', 'page.js', # App Router + 'index.tsx', 'index.ts', 'index.jsx', 'index.js' # Pages Router + } + + # Patterns indicating specific features + FORM_PATTERNS = [ + r' bool: + """Detect if using App Router or Pages Router""" + # App Router: has 'app' directory with page.tsx files + # Pages Router: has 'pages' directory with index.tsx files + app_dir = self.source_path / 'app' + if app_dir.exists() and list(app_dir.rglob('page.*')): + return True + + return 'app' in str(self.source_path).lower() + + def scan(self, filter_routes: Optional[List[str]] = None) -> List[RouteInfo]: + """Scan for all routes""" + self._scan_directory(self.source_path) + + # Filter if specific routes requested + if filter_routes: + self.routes = [ + r for r in self.routes + if any(fr in r.path for fr in filter_routes) + ] + + return self.routes + + def _scan_directory(self, directory: Path, url_path: str = ''): + """Recursively scan directory for routes""" + if not directory.exists(): + return + + for item in directory.iterdir(): + if item.name.startswith('.') or item.name == 'node_modules': + continue + + if item.is_dir(): + # Handle route groups (parentheses) and dynamic routes + dir_name = item.name + + if dir_name.startswith('(') and dir_name.endswith(')'): + # Route group - doesn't add to URL path + self._scan_directory(item, url_path) + elif dir_name.startswith('[') and dir_name.endswith(']'): + # Dynamic route + param_name = dir_name[1:-1] + if param_name.startswith('...'): + # Catch-all route + new_path = f"{url_path}/[...{param_name[3:]}]" + else: + new_path = f"{url_path}/[{param_name}]" + self._scan_directory(item, new_path) + elif dir_name == 'api': + # API routes - scan but mark differently + self._scan_api_directory(item, '/api') + else: + new_path = f"{url_path}/{dir_name}" + self._scan_directory(item, new_path) + + elif item.is_file(): + self._process_file(item, url_path) + + def _process_file(self, file_path: Path, url_path: str): + """Process a potential page file""" + if file_path.name not in self.PAGE_PATTERNS: + return + + # Skip if it's a layout or other special file + if any(x in file_path.name for x in ['layout', 'loading', 'error', 'template']): + return + + try: + content = file_path.read_text(encoding='utf-8') + except Exception: + return + + # Determine route path + if url_path == '': + route_path = '/' + else: + route_path = url_path + + # Detect dynamic parameters + params = re.findall(r'\[([^\]]+)\]', route_path) + has_params = len(params) > 0 + + # Detect features + has_form = any(re.search(p, content) for p in self.FORM_PATTERNS) + has_auth = any(re.search(p, content, re.IGNORECASE) for p in self.AUTH_PATTERNS) + + # Detect interactions + interactions = [] + for interaction, pattern in self.INTERACTION_PATTERNS.items(): + if re.search(pattern, content): + interactions.append(interaction) + + route = RouteInfo( + path=route_path, + file_path=str(file_path), + route_type='dynamic' if has_params else 'page', + has_params=has_params, + params=params, + has_form=has_form, + has_auth=has_auth, + interactions=interactions + ) + + self.routes.append(route) + + if self.verbose: + print(f" Found route: {route_path}") + + def _scan_api_directory(self, directory: Path, url_path: str): + """Scan API routes (mark them differently)""" + for item in directory.iterdir(): + if item.is_dir(): + new_path = f"{url_path}/{item.name}" + self._scan_api_directory(item, new_path) + elif item.is_file() and item.suffix in {'.ts', '.tsx', '.js', '.jsx'}: + # API routes don't get E2E tests typically + pass + + +class TestGenerator: + """Generates Playwright test files""" + + def __init__(self, include_pom: bool = False, verbose: bool = False): + self.include_pom = include_pom + self.verbose = verbose + + def generate(self, route: RouteInfo) -> str: + """Generate a test file for a route""" + lines = [] + + # Imports + lines.append("import { test, expect } from '@playwright/test';") + + if self.include_pom: + page_class = self._get_page_class_name(route.path) + lines.append(f"import {{ {page_class} }} from './pages/{page_class}';") + + lines.append('') + + # Test describe block + route_name = route.path if route.path != '/' else 'Home' + lines.append(f"test.describe('{route_name}', () => {{") + + # Generate test cases based on route features + test_cases = self._generate_test_cases(route) + + for test_case in test_cases: + lines.append('') + lines.append(test_case) + + lines.append('});') + lines.append('') + + return '\n'.join(lines) + + def _generate_test_cases(self, route: RouteInfo) -> List[str]: + """Generate test cases based on route features""" + cases = [] + url = self._get_test_url(route) + + # Basic navigation test + cases.append(f''' test('loads successfully', async ({{ page }}) => {{ + await page.goto('{url}'); + await expect(page).toHaveURL(/{re.escape(route.path.replace('[', '').replace(']', '.*'))}/); + // TODO: Add specific content assertions + }});''') + + # Page title test + cases.append(f''' test('has correct title', async ({{ page }}) => {{ + await page.goto('{url}'); + // TODO: Update expected title + await expect(page).toHaveTitle(/.*/); + }});''') + + # Auth-related tests + if route.has_auth: + cases.append(f''' test('redirects unauthenticated users', async ({{ page }}) => {{ + await page.goto('{url}'); + // TODO: Verify redirect to login + // await expect(page).toHaveURL('/login'); + }}); + + test('allows authenticated access', async ({{ page }}) => {{ + // TODO: Set up authentication + // await page.context().addCookies([{{ name: 'session', value: '...' }}]); + await page.goto('{url}'); + await expect(page).toHaveURL(/{re.escape(route.path.replace('[', '').replace(']', '.*'))}/); + }});''') + + # Form tests + if route.has_form: + cases.append(f''' test('form submission works', async ({{ page }}) => {{ + await page.goto('{url}'); + + // TODO: Fill in form fields + // await page.getByLabel('Email').fill('test@example.com'); + // await page.getByLabel('Password').fill('password123'); + + // Submit form + // await page.getByRole('button', {{ name: 'Submit' }}).click(); + + // TODO: Assert success state + // await expect(page.getByText('Success')).toBeVisible(); + }}); + + test('shows validation errors', async ({{ page }}) => {{ + await page.goto('{url}'); + + // Submit without filling required fields + await page.getByRole('button', {{ name: /submit/i }}).click(); + + // TODO: Assert validation errors shown + // await expect(page.getByText('Required')).toBeVisible(); + }});''') + + # Click interaction tests + if 'click' in route.interactions: + cases.append(f''' test('button interactions work', async ({{ page }}) => {{ + await page.goto('{url}'); + + // TODO: Find and click interactive elements + // const button = page.getByRole('button', {{ name: '...' }}); + // await button.click(); + // await expect(page.getByText('...')).toBeVisible(); + }});''') + + # Navigation tests + if 'navigation' in route.interactions: + cases.append(f''' test('navigation works correctly', async ({{ page }}) => {{ + await page.goto('{url}'); + + // TODO: Click navigation links + // await page.getByRole('link', {{ name: '...' }}).click(); + // await expect(page).toHaveURL('...'); + }});''') + + # Modal tests + if 'modal' in route.interactions: + cases.append(f''' test('modal opens and closes', async ({{ page }}) => {{ + await page.goto('{url}'); + + // TODO: Open modal + // await page.getByRole('button', {{ name: 'Open' }}).click(); + // await expect(page.getByRole('dialog')).toBeVisible(); + + // TODO: Close modal + // await page.getByRole('button', {{ name: 'Close' }}).click(); + // await expect(page.getByRole('dialog')).not.toBeVisible(); + }});''') + + # Dynamic route test + if route.has_params: + cases.append(f''' test('handles dynamic parameters', async ({{ page }}) => {{ + // TODO: Test with different parameter values + await page.goto('{url}'); + await expect(page.locator('body')).toBeVisible(); + }});''') + + return cases + + def _get_test_url(self, route: RouteInfo) -> str: + """Get a testable URL for the route""" + url = route.path + + # Replace dynamic segments with example values + for param in route.params: + if param.startswith('...'): + url = url.replace(f'[...{param[3:]}]', 'example/path') + else: + url = url.replace(f'[{param}]', 'test-id') + + return url + + def _get_page_class_name(self, route_path: str) -> str: + """Get Page Object class name from route path""" + if route_path == '/': + return 'HomePage' + + # Remove leading slash and convert to PascalCase + name = route_path.strip('/') + name = re.sub(r'\[.*?\]', '', name) # Remove dynamic segments + parts = name.split('/') + return ''.join(p.title() for p in parts if p) + 'Page' + + +class PageObjectGenerator: + """Generates Page Object Model classes""" + + def __init__(self, verbose: bool = False): + self.verbose = verbose + + def generate(self, route: RouteInfo) -> str: + """Generate a Page Object class for a route""" + class_name = self._get_class_name(route.path) + url = route.path + + # Replace dynamic segments + for param in route.params: + url = url.replace(f'[{param}]', f'${{{param}}}') + + lines = [] + + # Imports + lines.append("import { Page, Locator, expect } from '@playwright/test';") + lines.append('') + + # Class definition + lines.append(f"export class {class_name} {{") + lines.append(" readonly page: Page;") + + # Common locators + locators = self._get_locators(route) + for name, selector, _ in locators: + lines.append(f" readonly {name}: Locator;") + + lines.append('') + + # Constructor + lines.append(" constructor(page: Page) {") + lines.append(" this.page = page;") + for name, selector, _ in locators: + lines.append(f" this.{name} = page.{selector};") + lines.append(" }") + lines.append('') + + # Navigation method + if route.has_params: + param_args = ', '.join(f'{p}: string' for p in route.params) + url_parts = url.split('/') + url_template = '/'.join( + f'${{{p}}}' if f'${{{p}}}' in part else part + for p, part in zip(route.params, url_parts) + ) + lines.append(f" async goto({param_args}) {{") + lines.append(f" await this.page.goto(`{url_template}`);") + else: + lines.append(" async goto() {") + lines.append(f" await this.page.goto('{route.path}');") + lines.append(" }") + lines.append('') + + # Add methods based on features + methods = self._get_methods(route, locators) + for method_name, method_code in methods: + lines.append(method_code) + lines.append('') + + lines.append('}') + lines.append('') + + return '\n'.join(lines) + + def _get_class_name(self, route_path: str) -> str: + """Get class name from route path""" + if route_path == '/': + return 'HomePage' + + name = route_path.strip('/') + name = re.sub(r'\[.*?\]', '', name) + parts = name.split('/') + return ''.join(p.title() for p in parts if p) + 'Page' + + def _get_locators(self, route: RouteInfo) -> List[Tuple[str, str, str]]: + """Get common locators for a page""" + locators = [] + + # Always add a heading locator + locators.append(('heading', "getByRole('heading', { level: 1 })", 'Main heading')) + + if route.has_form: + locators.extend([ + ('submitButton', "getByRole('button', { name: /submit/i })", 'Form submit button'), + ('form', "locator('form')", 'Main form element'), + ]) + + if route.has_auth: + locators.extend([ + ('emailInput', "getByLabel('Email')", 'Email input field'), + ('passwordInput', "getByLabel('Password')", 'Password input field'), + ]) + + if 'navigation' in route.interactions: + locators.append(('navLinks', "getByRole('navigation').getByRole('link')", 'Navigation links')) + + if 'modal' in route.interactions: + locators.append(('modal', "getByRole('dialog')", 'Modal dialog')) + + return locators + + def _get_methods( + self, + route: RouteInfo, + locators: List[Tuple[str, str, str]] + ) -> List[Tuple[str, str]]: + """Get methods for the page object""" + methods = [] + + # Wait for load method + methods.append(('waitForLoad', ''' async waitForLoad() { + await expect(this.heading).toBeVisible(); + }''')) + + if route.has_form: + methods.append(('submitForm', ''' async submitForm() { + await this.submitButton.click(); + }''')) + + if route.has_auth: + methods.append(('login', ''' async login(email: string, password: string) { + await this.emailInput.fill(email); + await this.passwordInput.fill(password); + await this.submitButton.click(); + }''')) + + if 'modal' in route.interactions: + methods.append(('waitForModal', ''' async waitForModal() { + await expect(this.modal).toBeVisible(); + }''')) + methods.append(('closeModal', ''' async closeModal() { + await this.page.keyboard.press('Escape'); + await expect(this.modal).not.toBeVisible(); + }''')) + + return methods + + +class ConfigGenerator: + """Generates Playwright configuration""" + + def generate_config(self) -> str: + """Generate playwright.config.ts""" + return '''import { defineConfig, devices } from '@playwright/test'; + +/** + * Playwright Test Configuration + * @see https://playwright.dev/docs/test-configuration + */ +export default defineConfig({ + testDir: './e2e', + fullyParallel: true, + forbidOnly: !!process.env.CI, + retries: process.env.CI ? 2 : 0, + workers: process.env.CI ? 1 : undefined, + reporter: [ + ['html', { open: 'never' }], + ['list'], + ], + use: { + baseURL: process.env.BASE_URL || 'http://localhost:3000', + trace: 'on-first-retry', + screenshot: 'only-on-failure', + }, + projects: [ + { + name: 'chromium', + use: { ...devices['Desktop Chrome'] }, + }, + { + name: 'firefox', + use: { ...devices['Desktop Firefox'] }, + }, + { + name: 'webkit', + use: { ...devices['Desktop Safari'] }, + }, + { + name: 'Mobile Chrome', + use: { ...devices['Pixel 5'] }, + }, + ], + webServer: { + command: 'npm run dev', + url: 'http://localhost:3000', + reuseExistingServer: !process.env.CI, + timeout: 120 * 1000, + }, +}); +''' + + def generate_auth_fixture(self) -> str: + """Generate authentication fixture""" + return '''import { test as base, Page } from '@playwright/test'; + +interface AuthFixtures { + authenticatedPage: Page; +} + +export const test = base.extend({ + authenticatedPage: async ({ page }, use) => { + // Option 1: Login via UI + // await page.goto('/login'); + // await page.getByLabel('Email').fill(process.env.TEST_EMAIL || 'test@example.com'); + // await page.getByLabel('Password').fill(process.env.TEST_PASSWORD || 'password'); + // await page.getByRole('button', { name: 'Sign in' }).click(); + // await page.waitForURL('/dashboard'); + + // Option 2: Login via API + // const response = await page.request.post('/api/auth/login', { + // data: { + // email: process.env.TEST_EMAIL, + // password: process.env.TEST_PASSWORD, + // }, + // }); + // const { token } = await response.json(); + // await page.context().addCookies([ + // { name: 'auth-token', value: token, domain: 'localhost', path: '/' } + // ]); + + await use(page); + }, +}); + +export { expect } from '@playwright/test'; +''' + class E2ETestScaffolder: - """Main class for e2e test scaffolder functionality""" - - def __init__(self, target_path: str, verbose: bool = False): - self.target_path = Path(target_path) + """Main scaffolder class""" + + def __init__( + self, + source_path: str, + output_path: Optional[str] = None, + include_pom: bool = False, + routes: Optional[str] = None, + verbose: bool = False + ): + self.source_path = Path(source_path) + self.output_path = Path(output_path) if output_path else Path('e2e') + self.include_pom = include_pom + self.routes_filter = routes.split(',') if routes else None self.verbose = verbose - self.results = {} - + self.results = { + 'status': 'success', + 'source': str(self.source_path), + 'routes': [], + 'generated_files': [], + 'summary': {} + } + def run(self) -> Dict: - """Execute the main functionality""" - print(f"🚀 Running {self.__class__.__name__}...") - print(f"📁 Target: {self.target_path}") - - try: - self.validate_target() - self.analyze() - self.generate_report() - - print("✅ Completed successfully!") - return self.results - - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) - - def validate_target(self): - """Validate the target path exists and is accessible""" - if not self.target_path.exists(): - raise ValueError(f"Target path does not exist: {self.target_path}") - - if self.verbose: - print(f"✓ Target validated: {self.target_path}") - - def analyze(self): - """Perform the main analysis or operation""" - if self.verbose: - print("📊 Analyzing...") - - # Main logic here - self.results['status'] = 'success' - self.results['target'] = str(self.target_path) - self.results['findings'] = [] - - # Add analysis results - if self.verbose: - print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings") - - def generate_report(self): - """Generate and display the report""" - print("\n" + "="*50) - print("REPORT") - print("="*50) - print(f"Target: {self.results.get('target')}") - print(f"Status: {self.results.get('status')}") - print(f"Findings: {len(self.results.get('findings', []))}") - print("="*50 + "\n") + """Run the scaffolder""" + print(f"Scanning: {self.source_path}") + + # Validate source path + if not self.source_path.exists(): + raise ValueError(f"Source path does not exist: {self.source_path}") + + # Scan for routes + scanner = RouteScanner(self.source_path, self.verbose) + routes = scanner.scan(self.routes_filter) + + print(f"Found {len(routes)} routes") + + # Create output directories + self.output_path.mkdir(parents=True, exist_ok=True) + if self.include_pom: + (self.output_path / 'pages').mkdir(exist_ok=True) + + # Generate test files + test_generator = TestGenerator(self.include_pom, self.verbose) + pom_generator = PageObjectGenerator(self.verbose) if self.include_pom else None + config_generator = ConfigGenerator() + + # Generate tests for each route + for route in routes: + # Generate test file + test_content = test_generator.generate(route) + test_filename = self._get_test_filename(route.path) + test_path = self.output_path / test_filename + + test_path.write_text(test_content, encoding='utf-8') + + self.results['generated_files'].append({ + 'type': 'test', + 'route': route.path, + 'path': str(test_path) + }) + + print(f" {test_filename}") + + # Generate Page Object if enabled + if self.include_pom: + pom_content = pom_generator.generate(route) + pom_filename = self._get_pom_filename(route.path) + pom_path = self.output_path / 'pages' / pom_filename + + pom_path.write_text(pom_content, encoding='utf-8') + + self.results['generated_files'].append({ + 'type': 'page_object', + 'route': route.path, + 'path': str(pom_path) + }) + + print(f" pages/{pom_filename}") + + # Generate config files if not exists + config_path = Path('playwright.config.ts') + if not config_path.exists(): + config_content = config_generator.generate_config() + config_path.write_text(config_content, encoding='utf-8') + self.results['generated_files'].append({ + 'type': 'config', + 'path': str(config_path) + }) + print(f" playwright.config.ts") + + # Generate auth fixture + fixtures_dir = self.output_path / 'fixtures' + fixtures_dir.mkdir(exist_ok=True) + auth_fixture_path = fixtures_dir / 'auth.ts' + if not auth_fixture_path.exists(): + auth_content = config_generator.generate_auth_fixture() + auth_fixture_path.write_text(auth_content, encoding='utf-8') + self.results['generated_files'].append({ + 'type': 'fixture', + 'path': str(auth_fixture_path) + }) + print(f" fixtures/auth.ts") + + # Store route info + self.results['routes'] = [asdict(r) for r in routes] + + # Summary + self.results['summary'] = { + 'total_routes': len(routes), + 'total_files': len(self.results['generated_files']), + 'output_directory': str(self.output_path), + 'include_pom': self.include_pom + } + + print('') + print(f"Summary: {len(routes)} routes, {len(self.results['generated_files'])} files generated") + + return self.results + + def _get_test_filename(self, route_path: str) -> str: + """Get test filename from route path""" + if route_path == '/': + return 'home.spec.ts' + + name = route_path.strip('/') + name = re.sub(r'\[([^\]]+)\]', r'\1', name) # [id] -> id + name = name.replace('/', '-') + return f"{name}.spec.ts" + + def _get_pom_filename(self, route_path: str) -> str: + """Get Page Object filename from route path""" + if route_path == '/': + return 'HomePage.ts' + + name = route_path.strip('/') + name = re.sub(r'\[.*?\]', '', name) + parts = name.split('/') + class_name = ''.join(p.title() for p in parts if p) + 'Page' + return f"{class_name}.ts" + def main(): """Main entry point""" parser = argparse.ArgumentParser( - description="E2E Test Scaffolder" + description="Generate Playwright E2E tests from Next.js routes", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Scaffold E2E tests for App Router + python e2e_test_scaffolder.py src/app/ --output e2e/ + + # Include Page Object Models + python e2e_test_scaffolder.py src/app/ --include-pom + + # Generate for specific routes only + python e2e_test_scaffolder.py src/app/ --routes "/login,/dashboard,/checkout" + + # Verbose output + python e2e_test_scaffolder.py pages/ -v + """ ) parser.add_argument( - 'target', - help='Target path to analyze or process' + 'source', + help='Source directory (app/ or pages/)' + ) + parser.add_argument( + '--output', '-o', + default='e2e', + help='Output directory for test files (default: e2e/)' + ) + parser.add_argument( + '--include-pom', + action='store_true', + help='Generate Page Object Model classes' + ) + parser.add_argument( + '--routes', + help='Comma-separated list of routes to generate tests for' ) parser.add_argument( '--verbose', '-v', @@ -87,28 +794,27 @@ def main(): action='store_true', help='Output results as JSON' ) - parser.add_argument( - '--output', '-o', - help='Output file path' - ) - + args = parser.parse_args() - - tool = E2ETestScaffolder( - args.target, - verbose=args.verbose - ) - - results = tool.run() - - if args.json: - output = json.dumps(results, indent=2) - if args.output: - with open(args.output, 'w') as f: - f.write(output) - print(f"Results written to {args.output}") - else: - print(output) + + try: + scaffolder = E2ETestScaffolder( + source_path=args.source, + output_path=args.output, + include_pom=args.include_pom, + routes=args.routes, + verbose=args.verbose + ) + + results = scaffolder.run() + + if args.json: + print(json.dumps(results, indent=2)) + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + if __name__ == '__main__': main() diff --git a/engineering-team/senior-qa/scripts/test_suite_generator.py b/engineering-team/senior-qa/scripts/test_suite_generator.py index fed6e5e..45dd56e 100755 --- a/engineering-team/senior-qa/scripts/test_suite_generator.py +++ b/engineering-team/senior-qa/scripts/test_suite_generator.py @@ -1,81 +1,572 @@ #!/usr/bin/env python3 """ Test Suite Generator -Automated tool for senior qa tasks + +Scans React/TypeScript components and generates Jest + React Testing Library +test stubs with proper structure, accessibility tests, and common patterns. + +Usage: + python test_suite_generator.py src/components/ --output __tests__/ + python test_suite_generator.py src/ --include-a11y --scan-only """ import os import sys import json import argparse +import re from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple, Set +from dataclasses import dataclass, field, asdict +from datetime import datetime + + +@dataclass +class ComponentInfo: + """Information about a detected React component""" + name: str + file_path: str + component_type: str # 'functional', 'class', 'forwardRef', 'memo' + has_props: bool + props: List[str] + has_hooks: List[str] + has_context: bool + has_effects: bool + has_state: bool + has_callbacks: bool + exports: List[str] + imports: List[str] + + +@dataclass +class TestCase: + """A single test case to generate""" + name: str + description: str + test_type: str # 'render', 'interaction', 'a11y', 'props', 'state' + code: str + + +@dataclass +class TestFile: + """A complete test file to generate""" + component: ComponentInfo + test_cases: List[TestCase] = field(default_factory=list) + imports: Set[str] = field(default_factory=set) + + +class ComponentScanner: + """Scans source files for React components""" + + # Patterns for detecting React components + FUNCTIONAL_COMPONENT = re.compile( + r'^(?:export\s+)?(?:const|function)\s+([A-Z][a-zA-Z0-9]*)\s*[=:]?\s*(?:\([^)]*\)\s*(?::\s*[^=]+)?\s*=>|function\s*\([^)]*\))', + re.MULTILINE + ) + + ARROW_COMPONENT = re.compile( + r'^(?:export\s+)?const\s+([A-Z][a-zA-Z0-9]*)\s*=\s*(?:React\.)?(?:memo|forwardRef)?\s*\(', + re.MULTILINE + ) + + CLASS_COMPONENT = re.compile( + r'^(?:export\s+)?class\s+([A-Z][a-zA-Z0-9]*)\s+extends\s+(?:React\.)?(?:Component|PureComponent)', + re.MULTILINE + ) + + HOOK_PATTERN = re.compile(r'use([A-Z][a-zA-Z0-9]*)\s*\(') + PROPS_PATTERN = re.compile(r'(?:props\.|{\s*([^}]+)\s*}\s*=\s*props|:\s*([A-Z][a-zA-Z0-9]*Props))') + CONTEXT_PATTERN = re.compile(r'useContext\s*\(|\.Provider|\.Consumer') + EFFECT_PATTERN = re.compile(r'useEffect\s*\(|useLayoutEffect\s*\(') + STATE_PATTERN = re.compile(r'useState\s*\(|useReducer\s*\(|this\.state') + CALLBACK_PATTERN = re.compile(r'on[A-Z][a-zA-Z]*\s*[=:]|handle[A-Z][a-zA-Z]*\s*[=:]') + + def __init__(self, source_path: Path, verbose: bool = False): + self.source_path = source_path + self.verbose = verbose + self.components: List[ComponentInfo] = [] + + def scan(self) -> List[ComponentInfo]: + """Scan the source path for React components""" + extensions = {'.tsx', '.jsx', '.ts', '.js'} + + for root, dirs, files in os.walk(self.source_path): + # Skip node_modules and test directories + dirs[:] = [d for d in dirs if d not in {'node_modules', '__tests__', 'test', 'tests', '.git'}] + + for file in files: + if Path(file).suffix in extensions: + file_path = Path(root) / file + self._scan_file(file_path) + + return self.components + + def _scan_file(self, file_path: Path): + """Scan a single file for components""" + try: + content = file_path.read_text(encoding='utf-8') + except Exception as e: + if self.verbose: + print(f"Warning: Could not read {file_path}: {e}") + return + + # Skip test files + if '.test.' in file_path.name or '.spec.' in file_path.name: + return + + # Skip files without JSX indicators + if 'return' not in content or ('<' not in content and 'jsx' not in content.lower()): + # Could still be a hook + if not self.HOOK_PATTERN.search(content): + return + + # Find functional components + for match in self.FUNCTIONAL_COMPONENT.finditer(content): + name = match.group(1) + self._add_component(name, file_path, content, 'functional') + + # Find arrow function components + for match in self.ARROW_COMPONENT.finditer(content): + name = match.group(1) + component_type = 'functional' + if 'memo(' in content: + component_type = 'memo' + elif 'forwardRef(' in content: + component_type = 'forwardRef' + self._add_component(name, file_path, content, component_type) + + # Find class components + for match in self.CLASS_COMPONENT.finditer(content): + name = match.group(1) + self._add_component(name, file_path, content, 'class') + + def _add_component(self, name: str, file_path: Path, content: str, component_type: str): + """Add a component to the list if not already present""" + # Check if already added + for comp in self.components: + if comp.name == name and comp.file_path == str(file_path): + return + + # Extract hooks used + hooks = list(set(self.HOOK_PATTERN.findall(content))) + + # Extract prop names (simplified) + props = [] + props_match = self.PROPS_PATTERN.search(content) + if props_match: + props_str = props_match.group(1) or '' + props = [p.strip().split(':')[0].strip() for p in props_str.split(',') if p.strip()] + + # Extract imports + imports = re.findall(r"import\s+(?:{[^}]+}|[^;]+)\s+from\s+['\"]([^'\"]+)['\"]", content) + + # Extract exports + exports = re.findall(r"export\s+(?:default\s+)?(?:const|function|class)\s+(\w+)", content) + + component = ComponentInfo( + name=name, + file_path=str(file_path), + component_type=component_type, + has_props=bool(props) or 'props' in content.lower(), + props=props[:10], # Limit props + has_hooks=hooks[:10], # Limit hooks + has_context=bool(self.CONTEXT_PATTERN.search(content)), + has_effects=bool(self.EFFECT_PATTERN.search(content)), + has_state=bool(self.STATE_PATTERN.search(content)), + has_callbacks=bool(self.CALLBACK_PATTERN.search(content)), + exports=exports[:5], + imports=imports[:10] + ) + + self.components.append(component) + + if self.verbose: + print(f" Found: {name} ({component_type}) in {file_path.name}") + + +class TestGenerator: + """Generates Jest + React Testing Library test files""" + + def __init__(self, include_a11y: bool = False, template: Optional[str] = None): + self.include_a11y = include_a11y + self.template = template + + def generate(self, component: ComponentInfo) -> TestFile: + """Generate a test file for a component""" + test_file = TestFile(component=component) + + # Build imports + test_file.imports.add("import { render, screen } from '@testing-library/react';") + + if component.has_callbacks: + test_file.imports.add("import userEvent from '@testing-library/user-event';") + + if component.has_effects or component.has_state: + test_file.imports.add("import { waitFor } from '@testing-library/react';") + + if self.include_a11y: + test_file.imports.add("import { axe, toHaveNoViolations } from 'jest-axe';") + + # Add component import + relative_path = self._get_relative_import(component.file_path) + test_file.imports.add(f"import {{ {component.name} }} from '{relative_path}';") + + # Generate test cases + test_file.test_cases.append(self._generate_render_test(component)) + + if component.has_props: + test_file.test_cases.append(self._generate_props_test(component)) + + if component.has_callbacks: + test_file.test_cases.append(self._generate_interaction_test(component)) + + if component.has_state: + test_file.test_cases.append(self._generate_state_test(component)) + + if self.include_a11y: + test_file.test_cases.append(self._generate_a11y_test(component)) + + return test_file + + def _get_relative_import(self, file_path: str) -> str: + """Get the relative import path for a component""" + path = Path(file_path) + # Remove extension + stem = path.stem + if stem == 'index': + return f"../{path.parent.name}" + return f"../{path.parent.name}/{stem}" + + def _generate_render_test(self, component: ComponentInfo) -> TestCase: + """Generate a basic render test""" + props_str = self._get_mock_props(component) + + code = f''' it('renders without crashing', () => {{ + render(<{component.name}{props_str} />); + }}); + + it('renders expected content', () => {{ + render(<{component.name}{props_str} />); + // TODO: Add specific content assertions + // expect(screen.getByRole('...')).toBeInTheDocument(); + }});''' + + return TestCase( + name='render', + description='Basic render tests', + test_type='render', + code=code + ) + + def _generate_props_test(self, component: ComponentInfo) -> TestCase: + """Generate props-related tests""" + props = component.props[:3] if component.props else ['prop1'] + + prop_tests = [] + for prop in props: + prop_tests.append(f''' it('renders with {prop} prop', () => {{ + render(<{component.name} {prop}="test-value" />); + // TODO: Assert that {prop} affects rendering + }});''') + + code = '\n\n'.join(prop_tests) + + return TestCase( + name='props', + description='Props handling tests', + test_type='props', + code=code + ) + + def _generate_interaction_test(self, component: ComponentInfo) -> TestCase: + """Generate user interaction tests""" + code = f''' it('handles user interaction', async () => {{ + const user = userEvent.setup(); + const handleClick = jest.fn(); + + render(<{component.name} onClick={{handleClick}} />); + + // TODO: Find the interactive element + const button = screen.getByRole('button'); + await user.click(button); + + expect(handleClick).toHaveBeenCalledTimes(1); + }}); + + it('handles keyboard navigation', async () => {{ + const user = userEvent.setup(); + render(<{component.name} />); + + // TODO: Add keyboard interaction tests + // await user.tab(); + // expect(screen.getByRole('...')).toHaveFocus(); + }});''' + + return TestCase( + name='interaction', + description='User interaction tests', + test_type='interaction', + code=code + ) + + def _generate_state_test(self, component: ComponentInfo) -> TestCase: + """Generate state-related tests""" + code = f''' it('updates state correctly', async () => {{ + const user = userEvent.setup(); + render(<{component.name} />); + + // TODO: Trigger state change + // await user.click(screen.getByRole('button')); + + // TODO: Assert state change is reflected in UI + await waitFor(() => {{ + // expect(screen.getByText('...')).toBeInTheDocument(); + }}); + }});''' + + return TestCase( + name='state', + description='State management tests', + test_type='state', + code=code + ) + + def _generate_a11y_test(self, component: ComponentInfo) -> TestCase: + """Generate accessibility test""" + props_str = self._get_mock_props(component) + + code = f''' it('has no accessibility violations', async () => {{ + const {{ container }} = render(<{component.name}{props_str} />); + const results = await axe(container); + expect(results).toHaveNoViolations(); + }});''' + + return TestCase( + name='accessibility', + description='Accessibility tests', + test_type='a11y', + code=code + ) + + def _get_mock_props(self, component: ComponentInfo) -> str: + """Generate mock props string for a component""" + if not component.has_props or not component.props: + return '' + + # Return empty for simplicity, user should fill in + return ' {...mockProps}' + + def format_test_file(self, test_file: TestFile) -> str: + """Format the complete test file content""" + lines = [] + + # Imports + lines.append("import '@testing-library/jest-dom';") + for imp in sorted(test_file.imports): + lines.append(imp) + + lines.append('') + + # A11y setup if needed + if self.include_a11y: + lines.append('expect.extend(toHaveNoViolations);') + lines.append('') + + # Mock props if component has props + if test_file.component.has_props: + lines.append('// TODO: Define mock props') + lines.append('const mockProps = {};') + lines.append('') + + # Describe block + lines.append(f"describe('{test_file.component.name}', () => {{") + + # Test cases grouped by type + test_types = {} + for test_case in test_file.test_cases: + if test_case.test_type not in test_types: + test_types[test_case.test_type] = [] + test_types[test_case.test_type].append(test_case) + + for test_type, cases in test_types.items(): + for case in cases: + lines.append('') + lines.append(f' // {case.description}') + lines.append(case.code) + + lines.append('});') + lines.append('') + + return '\n'.join(lines) + class TestSuiteGenerator: - """Main class for test suite generator functionality""" - - def __init__(self, target_path: str, verbose: bool = False): - self.target_path = Path(target_path) + """Main class for generating test suites""" + + def __init__( + self, + source_path: str, + output_path: Optional[str] = None, + include_a11y: bool = False, + scan_only: bool = False, + verbose: bool = False, + template: Optional[str] = None + ): + self.source_path = Path(source_path) + self.output_path = Path(output_path) if output_path else None + self.include_a11y = include_a11y + self.scan_only = scan_only self.verbose = verbose - self.results = {} - + self.template = template + self.results = { + 'status': 'success', + 'source': str(self.source_path), + 'components': [], + 'generated_files': [], + 'summary': {} + } + def run(self) -> Dict: - """Execute the main functionality""" - print(f"🚀 Running {self.__class__.__name__}...") - print(f"📁 Target: {self.target_path}") - - try: - self.validate_target() - self.analyze() - self.generate_report() - - print("✅ Completed successfully!") + """Execute the test suite generation""" + print(f"Scanning: {self.source_path}") + + # Validate source path + if not self.source_path.exists(): + raise ValueError(f"Source path does not exist: {self.source_path}") + + # Scan for components + scanner = ComponentScanner(self.source_path, self.verbose) + components = scanner.scan() + + print(f"Found {len(components)} React components") + + if self.scan_only: + self._report_scan_results(components) return self.results - - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) - - def validate_target(self): - """Validate the target path exists and is accessible""" - if not self.target_path.exists(): - raise ValueError(f"Target path does not exist: {self.target_path}") - - if self.verbose: - print(f"✓ Target validated: {self.target_path}") - - def analyze(self): - """Perform the main analysis or operation""" - if self.verbose: - print("📊 Analyzing...") - - # Main logic here - self.results['status'] = 'success' - self.results['target'] = str(self.target_path) - self.results['findings'] = [] - - # Add analysis results - if self.verbose: - print(f"✓ Analysis complete: {len(self.results.get('findings', []))} findings") - - def generate_report(self): - """Generate and display the report""" - print("\n" + "="*50) - print("REPORT") - print("="*50) - print(f"Target: {self.results.get('target')}") - print(f"Status: {self.results.get('status')}") - print(f"Findings: {len(self.results.get('findings', []))}") - print("="*50 + "\n") + + # Generate tests + if not self.output_path: + # Default to __tests__ in source directory + self.output_path = self.source_path / '__tests__' + + self.output_path.mkdir(parents=True, exist_ok=True) + + generator = TestGenerator(self.include_a11y, self.template) + + total_tests = 0 + for component in components: + test_file = generator.generate(component) + content = generator.format_test_file(test_file) + + # Write test file + test_filename = f"{component.name}.test.tsx" + test_path = self.output_path / test_filename + + test_path.write_text(content, encoding='utf-8') + + test_count = len(test_file.test_cases) + total_tests += test_count + + self.results['generated_files'].append({ + 'component': component.name, + 'path': str(test_path), + 'test_cases': test_count + }) + + print(f" {test_filename} ({test_count} test cases)") + + # Store component info + self.results['components'] = [asdict(c) for c in components] + + # Summary + self.results['summary'] = { + 'total_components': len(components), + 'total_files': len(self.results['generated_files']), + 'total_test_cases': total_tests, + 'output_directory': str(self.output_path) + } + + print('') + print(f"Summary: {len(components)} test files, {total_tests} test cases") + + return self.results + + def _report_scan_results(self, components: List[ComponentInfo]): + """Report scan results without generating tests""" + print('') + print("=" * 60) + print("COMPONENT SCAN RESULTS") + print("=" * 60) + + # Group by type + by_type = {} + for comp in components: + comp_type = comp.component_type + if comp_type not in by_type: + by_type[comp_type] = [] + by_type[comp_type].append(comp) + + for comp_type, comps in sorted(by_type.items()): + print(f"\n{comp_type.upper()} COMPONENTS ({len(comps)}):") + for comp in comps: + hooks_str = f" [hooks: {', '.join(comp.has_hooks[:3])}]" if comp.has_hooks else "" + state_str = " [stateful]" if comp.has_state else "" + print(f" - {comp.name}{hooks_str}{state_str}") + print(f" {comp.file_path}") + + print('') + print("=" * 60) + print(f"Total: {len(components)} components") + print("=" * 60) + + self.results['components'] = [asdict(c) for c in components] + self.results['summary'] = { + 'total_components': len(components), + 'by_type': {k: len(v) for k, v in by_type.items()} + } + def main(): """Main entry point""" parser = argparse.ArgumentParser( - description="Test Suite Generator" + description="Generate Jest + React Testing Library test stubs for React components", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Scan and generate tests + python test_suite_generator.py src/components/ --output __tests__/ + + # Scan only (don't generate) + python test_suite_generator.py src/components/ --scan-only + + # Include accessibility tests + python test_suite_generator.py src/ --include-a11y --output tests/ + + # Verbose output + python test_suite_generator.py src/components/ -v + """ ) parser.add_argument( - 'target', - help='Target path to analyze or process' + 'source', + help='Source directory containing React components' + ) + parser.add_argument( + '--output', '-o', + help='Output directory for test files (default: /__tests__/)' + ) + parser.add_argument( + '--include-a11y', + action='store_true', + help='Include accessibility tests using jest-axe' + ) + parser.add_argument( + '--scan-only', + action='store_true', + help='Scan and report components without generating tests' + ) + parser.add_argument( + '--template', + help='Custom template file for test generation' ) parser.add_argument( '--verbose', '-v', @@ -87,28 +578,28 @@ def main(): action='store_true', help='Output results as JSON' ) - parser.add_argument( - '--output', '-o', - help='Output file path' - ) - + args = parser.parse_args() - - tool = TestSuiteGenerator( - args.target, - verbose=args.verbose - ) - - results = tool.run() - - if args.json: - output = json.dumps(results, indent=2) - if args.output: - with open(args.output, 'w') as f: - f.write(output) - print(f"Results written to {args.output}") - else: - print(output) + + try: + generator = TestSuiteGenerator( + args.source, + output_path=args.output, + include_a11y=args.include_a11y, + scan_only=args.scan_only, + verbose=args.verbose, + template=args.template + ) + + results = generator.run() + + if args.json: + print(json.dumps(results, indent=2)) + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + if __name__ == '__main__': main()