From 885fe8b023f2d04618dca6e356b80a18a5b09df8 Mon Sep 17 00:00:00 2001 From: Reza Rezvani Date: Mon, 9 Mar 2026 08:14:11 +0100 Subject: [PATCH] docs: update all documentation with accurate counts and regenerated skill pages - Update skill count to 170, Python tools to 213, references to 314 across all docs - Regenerate all 170 skill doc pages from latest SKILL.md sources - Update CLAUDE.md with v2.1.1 highlights, accurate architecture tree, and roadmap - Update README.md badges and overview table - Update marketplace.json metadata description and version - Update mkdocs.yml, index.md, getting-started.md with correct numbers --- .claude-plugin/marketplace.json | 6 +- CLAUDE.md | 52 +- README.md | 8 +- docs/getting-started.md | 2 +- docs/index.md | 8 +- .../contract-and-proposal-writer.md | 2 + .../customer-success-manager.md | 140 +-- .../business-growth/revenue-operations.md | 73 +- docs/skills/business-growth/sales-engineer.md | 222 ++--- .../skills/c-level-advisor/c-level-advisor.md | 98 ++ docs/skills/c-level-advisor/cto-advisor.md | 108 ++- .../executive-mentor-board-prep.md | 2 + .../executive-mentor-challenge.md | 2 + .../executive-mentor-hard-call.md | 2 + .../executive-mentor-postmortem.md | 2 + .../executive-mentor-stress-test.md | 2 + .../aws-solution-architect.md | 175 +++- .../email-template-builder.md | 16 +- .../engineering-team/incident-commander.md | 203 +--- .../engineering-team/ms365-tenant-manager.md | 320 +++---- .../playwright-pro-browserstack.md | 6 +- .../engineering-team/playwright-pro-fix.md | 2 +- .../engineering-team/playwright-pro-init.md | 16 +- .../engineering-team/playwright-pro-review.md | 2 +- .../skills/engineering-team/playwright-pro.md | 39 + .../self-improving-agent-extract.md | 2 +- .../skills/engineering-team/senior-backend.md | 79 +- .../senior-computer-vision.md | 94 +- .../engineering-team/senior-data-engineer.md | 802 +--------------- .../engineering-team/senior-data-scientist.md | 373 ++++---- docs/skills/engineering-team/senior-devops.md | 346 ++++--- .../engineering-team/senior-frontend.md | 2 +- docs/skills/engineering-team/senior-qa.md | 78 +- docs/skills/engineering-team/senior-secops.md | 169 +--- .../engineering-team/senior-security.md | 137 +-- .../stripe-integration-expert.md | 6 +- docs/skills/engineering-team/tdd-guide.md | 98 +- docs/skills/engineering/agent-designer.md | 2 + .../engineering/agent-workflow-designer.md | 8 +- .../skills/engineering/api-design-reviewer.md | 8 +- .../engineering/api-test-suite-builder.md | 508 +---------- .../skills/engineering/changelog-generator.md | 2 + .../engineering/ci-cd-pipeline-builder.md | 2 + .../skills/engineering/codebase-onboarding.md | 43 +- docs/skills/engineering/database-designer.md | 478 +--------- .../engineering/database-schema-designer.md | 289 +----- docs/skills/engineering/dependency-auditor.md | 2 + .../skills/engineering/env-secrets-manager.md | 358 +------- .../engineering/git-worktree-manager.md | 2 + docs/skills/engineering/mcp-server-builder.md | 2 + .../skills/engineering/migration-architect.md | 2 + docs/skills/engineering/monorepo-navigator.md | 516 +---------- .../engineering/observability-designer.md | 2 + .../engineering/performance-profiler.md | 475 +--------- docs/skills/engineering/pr-review-expert.md | 2 + docs/skills/engineering/rag-architect.md | 2 + docs/skills/engineering/release-manager.md | 2 + docs/skills/engineering/runbook-generator.md | 2 + .../engineering/skill-security-auditor.md | 2 +- docs/skills/engineering/skill-tester.md | 10 +- docs/skills/engineering/tech-debt-tracker.md | 488 +--------- docs/skills/finance/financial-analyst.md | 38 +- .../marketing-skill/analytics-tracking.md | 6 +- .../marketing-skill/app-store-optimization.md | 65 +- .../marketing-skill/brand-guidelines.md | 260 +----- .../marketing-skill/campaign-analytics.md | 93 +- .../marketing-skill/content-strategy.md | 276 +----- docs/skills/marketing-skill/email-sequence.md | 207 +---- docs/skills/marketing-skill/form-cro.md | 269 +----- .../skills/marketing-skill/launch-strategy.md | 315 +------ .../marketing-demand-acquisition.md | 27 - .../marketing-skill/marketing-strategy-pmm.md | 19 +- docs/skills/marketing-skill/popup-cro.md | 259 +----- .../prompt-engineer-toolkit.md | 88 +- docs/skills/marketing-skill/seo-audit.md | 287 +----- .../skills/marketing-skill/signup-flow-cro.md | 178 +--- .../product-team/competitive-teardown.md | 409 ++------- .../product-team/landing-page-generator.md | 290 +----- .../product-team/product-manager-toolkit.md | 154 +--- .../skills/product-team/product-strategist.md | 250 +---- docs/skills/product-team/saas-scaffolder.md | 118 +-- .../project-management/atlassian-admin.md | 410 +++------ .../project-management/atlassian-templates.md | 863 ++++-------------- .../project-management/confluence-expert.md | 220 +---- docs/skills/project-management/jira-expert.md | 153 ++-- .../skills/project-management/scrum-master.md | 528 +++-------- docs/skills/project-management/senior-pm.md | 209 ++--- docs/skills/ra-qm-team/isms-audit-expert.md | 58 +- .../quality-manager-qms-iso13485.md | 88 +- .../ra-qm-team/regulatory-affairs-head.md | 244 ++--- .../ra-qm-team/risk-management-specialist.md | 75 +- mkdocs.yml | 2 +- 92 files changed, 2465 insertions(+), 10894 deletions(-) diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index a3a5184..f232c83 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -4,12 +4,12 @@ "name": "Alireza Rezvani", "url": "https://alirezarezvani.com" }, - "description": "Production-ready skill packages for Claude AI - 86 expert skills across marketing, engineering, product, C-level advisory, project management, regulatory compliance, business growth, and finance", + "description": "Production-ready skill packages for Claude AI - 170 expert skills across marketing, engineering, product, C-level advisory, project management, regulatory compliance, business growth, and finance", "homepage": "https://github.com/alirezarezvani/claude-skills", "repository": "https://github.com/alirezarezvani/claude-skills", "metadata": { - "description": "87+ production-ready skill packages across 9 domains: marketing, engineering, engineering-advanced, product, C-level advisory, project management, regulatory compliance, business growth, and finance", - "version": "2.1.0" + "description": "170 production-ready skill packages across 9 domains: marketing, engineering, engineering-advanced, product, C-level advisory, project management, regulatory compliance, business growth, and finance", + "version": "2.1.1" }, "plugins": [ { diff --git a/CLAUDE.md b/CLAUDE.md index 4896ea8..0d60333 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co This is a **comprehensive skills library** for Claude AI and Claude Code - reusable, production-ready skill packages that bundle domain expertise, best practices, analysis tools, and strategic frameworks. The repository provides modular skills that teams can download and use directly in their workflows. -**Current Scope:** 134 production-ready skills across 9 domains with 185+ Python automation tools and 250+ reference guides. +**Current Scope:** 170 production-ready skills across 9 domains with 210+ Python automation tools, 310+ reference guides, 12 agents, and 5 slash commands. **Key Distinction**: This is NOT a traditional application. It's a library of skill packages meant to be extracted and deployed by users into their own Claude workflows. @@ -36,18 +36,22 @@ This repository uses **modular documentation**. For domain-specific guidance, se ``` claude-code-skills/ ├── .claude-plugin/ # Plugin registry (marketplace.json) -├── agents/ # cs-* prefixed agents (in development) -├── marketing-skill/ # 7 marketing skills + Python tools +├── agents/ # 12 cs-* prefixed agents across all domains +├── commands/ # 5 slash commands (changelog, tdd, tech-debt, etc.) +├── engineering-team/ # 23 core engineering skills + Playwright Pro + Self-Improving Agent +├── engineering/ # 25 POWERFUL-tier advanced skills ├── product-team/ # 8 product skills + Python tools -├── engineering-team/ # 22 core engineering skills + Python tools -├── engineering/ # 25 POWERFUL-tier advanced skills (v2.0.0) -├── c-level-advisor/ # 2 C-level skills -├── project-management/ # 6 PM skills + Atlassian MCP + packaged-skills +├── marketing-skill/ # 42 marketing skills (7 pods) + Python tools +├── c-level-advisor/ # 28 C-level advisory skills (10 roles + orchestration) +├── project-management/ # 6 PM skills + Atlassian MCP ├── ra-qm-team/ # 12 RA/QM compliance skills ├── business-growth/ # 4 business & growth skills + Python tools ├── finance/ # 1 finance skill + Python tools +├── eval-workspace/ # Skill evaluation results (Tessl) ├── standards/ # 5 standards library files ├── templates/ # Reusable templates +├── docs/ # MkDocs Material documentation site +├── scripts/ # Build scripts (docs generation) └── documentation/ # Implementation plans, sprints, delivery ``` @@ -120,29 +124,29 @@ See [standards/git/git-workflow-standards.md](standards/git/git-workflow-standar ## Current Version -**Version:** v2.0.0 (released 2026-02-16) +**Version:** v2.1.1 (latest) -**v2.0.0 Highlights:** +**v2.1.1 Highlights:** +- 18 skills optimized from 66-83% to 85-100% via Tessl quality review +- 21 over-500-line skills split into SKILL.md + references/ +- YAML frontmatter (name + description) added to all SKILL.md files +- 6 new agents + 5 slash commands for full domain coverage +- MkDocs Material documentation site at alirezarezvani.github.io/claude-skills + +**v2.0.0 (2026-02-16):** - 25 POWERFUL-tier engineering skills added (engineering/ folder) - Plugin marketplace infrastructure (.claude-plugin/marketplace.json) -- VirusTotal security scanning for skills (CI) - Multi-platform support: Claude Code, OpenAI Codex, OpenClaw -- Skills enhanced with production Python scripts and Anthropic best practices -**Past Sprints:** See [documentation/delivery/](documentation/delivery/) for sprint history. +**Past Sprints:** See [documentation/delivery/](documentation/delivery/) and [CHANGELOG.md](CHANGELOG.md) for history. ## Roadmap -**Phase 1-2 Complete:** 134 production-ready skills deployed across 9 domains -- Marketing (7), C-Level (33), Product (8), PM (6), Engineering Core (23), Engineering Advanced (14), RA/QM (12), Business & Growth (4), Finance (1) -- 160+ Python automation tools, 200+ reference guides -- Complete enterprise coverage from marketing through regulatory compliance, sales, customer success, and finance - -**Next Priorities:** -- **Phase 3 (Q2 2026):** Marketing expansion - SEO optimizer, social media manager, growth marketer -- **Phase 4 (Q3 2026):** Specialized domains - Mobile, blockchain, web3, advanced analytics - -**Target:** 100+ skills by Q3 2026 +**Phase 1-2 Complete:** 170 production-ready skills deployed across 9 domains +- Engineering Core (23), Engineering POWERFUL (25), Product (8), Marketing (42), PM (6), C-Level (28), RA/QM (12), Business & Growth (4), Finance (1) +- 210+ Python automation tools, 310+ reference guides, 12 agents, 5 commands +- Complete enterprise coverage from engineering through regulatory compliance, sales, customer success, and finance +- MkDocs Material docs site with 170+ indexed pages for SEO See domain-specific roadmaps in each skill folder's README.md or roadmap files. @@ -181,5 +185,5 @@ See domain-specific roadmaps in each skill folder's README.md or roadmap files. --- **Last Updated:** March 2026 -**Version:** v2.0.0 -**Status:** 134 skills deployed across 9 domains, plugin marketplace active +**Version:** v2.1.1 +**Status:** 170 skills deployed across 9 domains, 18 marketplace plugins, docs site live diff --git a/README.md b/README.md index 54410ff..955b75a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Skills](https://img.shields.io/badge/Skills-170-brightgreen.svg)](#skills-overview) -[![Agents](https://img.shields.io/badge/Agents-11-blue.svg)](#agents) +[![Agents](https://img.shields.io/badge/Agents-12-blue.svg)](#agents) [![Commands](https://img.shields.io/badge/Commands-5-orange.svg)](#commands) [![Stars](https://img.shields.io/github/stars/alirezarezvani/claude-skills?style=flat)](https://github.com/alirezarezvani/claude-skills/stargazers) [![SkillCheck Validated](https://img.shields.io/badge/SkillCheck-Validated-4c1)](https://getskillcheck.com) @@ -71,7 +71,7 @@ git clone https://github.com/alirezarezvani/claude-skills.git ## Skills Overview -**169 skills across 9 domains:** +**170 skills across 9 domains:** | Domain | Skills | Highlights | Details | |--------|--------|------------|---------| @@ -175,7 +175,7 @@ for MDR Annex II compliance gaps. ## Python Analysis Tools -160+ CLI tools ship with the skills: +210+ CLI tools ship with the skills: ```bash # Brand voice analysis @@ -211,7 +211,7 @@ Add the marketplace with `/plugin marketplace add alirezarezvani/claude-skills`, Yes. Skills work natively with Claude Code, OpenAI Codex, and OpenClaw. See Quick Install above. **Are the Python tools dependency-free?** -Yes. All 160+ Python CLI tools use the standard library only — zero pip installs required. +Yes. All 210+ Python CLI tools use the standard library only — zero pip installs required. **How do I create my own Claude Code skill?** Each skill is a folder with a `SKILL.md` (frontmatter + instructions), optional `scripts/`, `references/`, and `assets/`. See the [Skills & Agents Factory](https://github.com/alirezarezvani/claude-code-skills-agents-factory) for a step-by-step guide. diff --git a/docs/getting-started.md b/docs/getting-started.md index 4e7cbd7..fe65271 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -99,7 +99,7 @@ AI-augmented development. Optimize for SEO. ## Python Tools -All 160+ tools use the standard library only — zero pip installs. +All 210+ tools use the standard library only — zero pip installs. ```bash # Security audit a skill before installing diff --git a/docs/index.md b/docs/index.md index 1decd8a..f2afccc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,6 +1,6 @@ --- title: Claude Code Skills & Plugins -description: "169 production-ready skills and plugins for Claude Code, OpenAI Codex, and OpenClaw. Reusable expertise bundles for engineering, product, marketing, compliance, and more." +description: "170 production-ready skills and plugins for Claude Code, OpenAI Codex, and OpenClaw. Reusable expertise bundles for engineering, product, marketing, compliance, and more." hide: - toc - edit @@ -14,7 +14,7 @@ hide: # Claude Code Skills -169 production-ready skills that transform AI coding agents into specialized professionals. +170 production-ready skills that transform AI coding agents into specialized professionals. { .hero-subtitle } **Claude Code** | **OpenAI Codex** | **OpenClaw** @@ -27,13 +27,13 @@ hide:
-- :material-counter:{ .lg .middle } **169** +- :material-counter:{ .lg .middle } **170** --- Production-ready skills -- :material-language-python:{ .lg .middle } **160+** +- :material-language-python:{ .lg .middle } **210+** --- diff --git a/docs/skills/business-growth/contract-and-proposal-writer.md b/docs/skills/business-growth/contract-and-proposal-writer.md index af0b816..1df09fe 100644 --- a/docs/skills/business-growth/contract-and-proposal-writer.md +++ b/docs/skills/business-growth/contract-and-proposal-writer.md @@ -10,6 +10,8 @@ description: "Contract & Proposal Writer - Claude Code skill from the Business & --- +# Contract & Proposal Writer + **Tier:** POWERFUL **Category:** Business Growth **Domain:** Legal Documents, Business Development, Client Relations diff --git a/docs/skills/business-growth/customer-success-manager.md b/docs/skills/business-growth/customer-success-manager.md index 2c0f18b..030119e 100644 --- a/docs/skills/business-growth/customer-success-manager.md +++ b/docs/skills/business-growth/customer-success-manager.md @@ -18,7 +18,6 @@ Production-grade customer success analytics with multi-dimensional health scorin ## Table of Contents -- [Capabilities](#capabilities) - [Input Requirements](#input-requirements) - [Output Formats](#output-formats) - [How to Use](#how-to-use) @@ -30,135 +29,21 @@ Production-grade customer success analytics with multi-dimensional health scorin --- -## Capabilities - -- **Customer Health Scoring**: Multi-dimensional weighted scoring across usage, engagement, support, and relationship dimensions with Red/Yellow/Green classification -- **Churn Risk Analysis**: Behavioral signal detection with tier-based intervention playbooks and time-to-renewal urgency multipliers -- **Expansion Opportunity Scoring**: Adoption depth analysis, whitespace mapping, and revenue opportunity estimation with effort-vs-impact prioritization -- **Segment-Aware Benchmarking**: Configurable thresholds for Enterprise, Mid-Market, and SMB customer segments -- **Trend Analysis**: Period-over-period comparison to detect improving or declining trajectories -- **Executive Reporting**: QBR templates, success plans, and executive business review templates - ---- - ## Input Requirements -All scripts accept a JSON file as positional input argument. See `assets/sample_customer_data.json` for complete examples. +All scripts accept a JSON file as positional input argument. See `assets/sample_customer_data.json` for complete schema examples and sample data. ### Health Score Calculator -```json -{ - "customers": [ - { - "customer_id": "CUST-001", - "name": "Acme Corp", - "segment": "enterprise", - "arr": 120000, - "usage": { - "login_frequency": 85, - "feature_adoption": 72, - "dau_mau_ratio": 0.45 - }, - "engagement": { - "support_ticket_volume": 3, - "meeting_attendance": 90, - "nps_score": 8, - "csat_score": 4.2 - }, - "support": { - "open_tickets": 2, - "escalation_rate": 0.05, - "avg_resolution_hours": 18 - }, - "relationship": { - "executive_sponsor_engagement": 80, - "multi_threading_depth": 4, - "renewal_sentiment": "positive" - }, - "previous_period": { - "usage_score": 70, - "engagement_score": 65, - "support_score": 75, - "relationship_score": 60 - } - } - ] -} -``` +Required fields per customer object: `customer_id`, `name`, `segment`, `arr`, and nested objects `usage` (login_frequency, feature_adoption, dau_mau_ratio), `engagement` (support_ticket_volume, meeting_attendance, nps_score, csat_score), `support` (open_tickets, escalation_rate, avg_resolution_hours), `relationship` (executive_sponsor_engagement, multi_threading_depth, renewal_sentiment), and `previous_period` scores for trend analysis. ### Churn Risk Analyzer -```json -{ - "customers": [ - { - "customer_id": "CUST-001", - "name": "Acme Corp", - "segment": "enterprise", - "arr": 120000, - "contract_end_date": "2026-06-30", - "usage_decline": { - "login_trend": -15, - "feature_adoption_change": -10, - "dau_mau_change": -0.08 - }, - "engagement_drop": { - "meeting_cancellations": 2, - "response_time_days": 5, - "nps_change": -3 - }, - "support_issues": { - "open_escalations": 1, - "unresolved_critical": 0, - "satisfaction_trend": "declining" - }, - "relationship_signals": { - "champion_left": false, - "sponsor_change": false, - "competitor_mentions": 1 - }, - "commercial_factors": { - "contract_type": "annual", - "pricing_complaints": false, - "budget_cuts_mentioned": false - } - } - ] -} -``` +Required fields per customer object: `customer_id`, `name`, `segment`, `arr`, `contract_end_date`, and nested objects `usage_decline`, `engagement_drop`, `support_issues`, `relationship_signals`, and `commercial_factors`. ### Expansion Opportunity Scorer -```json -{ - "customers": [ - { - "customer_id": "CUST-001", - "name": "Acme Corp", - "segment": "enterprise", - "arr": 120000, - "contract": { - "licensed_seats": 100, - "active_seats": 95, - "plan_tier": "professional", - "available_tiers": ["professional", "enterprise", "enterprise_plus"] - }, - "product_usage": { - "core_platform": {"adopted": true, "usage_pct": 85}, - "analytics_module": {"adopted": true, "usage_pct": 60}, - "integrations_module": {"adopted": false, "usage_pct": 0}, - "api_access": {"adopted": true, "usage_pct": 40}, - "advanced_reporting": {"adopted": false, "usage_pct": 0} - }, - "departments": { - "current": ["engineering", "product"], - "potential": ["marketing", "sales", "support"] - } - } - ] -} -``` +Required fields per customer object: `customer_id`, `name`, `segment`, `arr`, and nested objects `contract` (licensed_seats, active_seats, plan_tier, available_tiers), `product_usage` (per-module adoption flags and usage percentages), and `departments` (current and potential). --- @@ -194,17 +79,26 @@ python scripts/expansion_opportunity_scorer.py assets/sample_customer_data.json ```bash # 1. Score customer health across portfolio python scripts/health_score_calculator.py customer_portfolio.json --format json > health_results.json +# Verify: confirm health_results.json contains the expected number of customer records before continuing # 2. Identify at-risk accounts python scripts/churn_risk_analyzer.py customer_portfolio.json --format json > risk_results.json +# Verify: confirm risk_results.json is non-empty and risk tiers are present for each customer # 3. Find expansion opportunities in healthy accounts python scripts/expansion_opportunity_scorer.py customer_portfolio.json --format json > expansion_results.json +# Verify: confirm expansion_results.json lists opportunities ranked by priority # 4. Prepare QBR using templates # Reference: assets/qbr_template.md ``` +**Error handling:** If a script exits with an error, check that: +- The input JSON matches the required schema for that script (see Input Requirements above) +- All required fields are present and correctly typed +- Python 3.7+ is being used (`python --version`) +- Output files from prior steps are non-empty before piping into subsequent steps + --- ## Scripts @@ -297,12 +191,10 @@ python scripts/expansion_opportunity_scorer.py customer_data.json --format json ## Best Practices -1. **Score regularly**: Run health scoring weekly for Enterprise, bi-weekly for Mid-Market, monthly for SMB +1. **Combine signals**: Use all three scripts together for a complete customer picture 2. **Act on trends, not snapshots**: A declining Green is more urgent than a stable Yellow -3. **Combine signals**: Use all three scripts together for a complete customer picture -4. **Calibrate thresholds**: Adjust segment benchmarks based on your product and industry -5. **Document interventions**: Track what actions you took and outcomes for playbook refinement -6. **Prepare with data**: Run scripts before every QBR and executive meeting +3. **Calibrate thresholds**: Adjust segment benchmarks based on your product and industry per `references/health-scoring-framework.md` +4. **Prepare with data**: Run scripts before every QBR and executive meeting; reference `references/cs-playbooks.md` for intervention guidance --- diff --git a/docs/skills/business-growth/revenue-operations.md b/docs/skills/business-growth/revenue-operations.md index d183e81..e7d63e8 100644 --- a/docs/skills/business-growth/revenue-operations.md +++ b/docs/skills/business-growth/revenue-operations.md @@ -14,20 +14,7 @@ description: "Revenue Operations - Claude Code skill from the Business & Growth Pipeline analysis, forecast accuracy tracking, and GTM efficiency measurement for SaaS revenue teams. -## Table of Contents - -- [Quick Start](#quick-start) -- [Tools Overview](#tools-overview) - - [Pipeline Analyzer](#1-pipeline-analyzer) - - [Forecast Accuracy Tracker](#2-forecast-accuracy-tracker) - - [GTM Efficiency Calculator](#3-gtm-efficiency-calculator) -- [Revenue Operations Workflows](#revenue-operations-workflows) - - [Weekly Pipeline Review](#weekly-pipeline-review) - - [Forecast Accuracy Review](#forecast-accuracy-review) - - [GTM Efficiency Audit](#gtm-efficiency-audit) - - [Quarterly Business Review](#quarterly-business-review) -- [Reference Documentation](#reference-documentation) -- [Templates](#templates) +> **Output formats:** All scripts support `--format text` (human-readable) and `--format json` (dashboards/integrations). --- @@ -58,11 +45,7 @@ Analyzes sales pipeline health including coverage ratios, stage conversion rates **Usage:** ```bash -# Text report (human-readable) python scripts/pipeline_analyzer.py --input pipeline.json --format text - -# JSON output (for dashboards/integrations) -python scripts/pipeline_analyzer.py --input pipeline.json --format json ``` **Key Metrics Calculated:** @@ -104,15 +87,11 @@ Tracks forecast accuracy over time using MAPE, detects systematic bias, analyzes **Usage:** ```bash -# Track forecast accuracy python scripts/forecast_accuracy_tracker.py forecast_data.json --format text - -# JSON output for trend analysis -python scripts/forecast_accuracy_tracker.py forecast_data.json --format json ``` **Key Metrics Calculated:** -- **MAPE** -- Mean Absolute Percentage Error: mean(|actual - forecast| / |actual|) x 100 +- **MAPE** -- mean(|actual - forecast| / |actual|) x 100 - **Forecast Bias** -- Over-forecasting (positive) vs under-forecasting (negative) tendency - **Weighted Accuracy** -- MAPE weighted by deal value for materiality - **Period Trends** -- Improving, stable, or declining accuracy over time @@ -153,11 +132,7 @@ Calculates core SaaS GTM efficiency metrics with industry benchmarking, ratings, **Usage:** ```bash -# Calculate all GTM efficiency metrics python scripts/gtm_efficiency_calculator.py gtm_data.json --format text - -# JSON output for dashboards -python scripts/gtm_efficiency_calculator.py gtm_data.json --format json ``` **Key Metrics Calculated:** @@ -208,57 +183,69 @@ python scripts/gtm_efficiency_calculator.py gtm_data.json --format json Use this workflow for your weekly pipeline inspection cadence. -1. **Generate pipeline report:** +1. **Verify input data:** Confirm pipeline export is current and all required fields (stage, value, close_date, owner) are populated before proceeding. + +2. **Generate pipeline report:** ```bash python scripts/pipeline_analyzer.py --input current_pipeline.json --format text ``` -2. **Review key indicators:** +3. **Cross-check output totals** against your CRM source system to confirm data integrity. + +4. **Review key indicators:** - Pipeline coverage ratio (is it above 3x quota?) - Deals aging beyond threshold (which deals need intervention?) - Concentration risk (are we over-reliant on a few large deals?) - Stage distribution (is there a healthy funnel shape?) -3. **Document using template:** Use `assets/pipeline_review_template.md` +5. **Document using template:** Use `assets/pipeline_review_template.md` -4. **Action items:** Address aging deals, redistribute pipeline concentration, fill coverage gaps +6. **Action items:** Address aging deals, redistribute pipeline concentration, fill coverage gaps ### Forecast Accuracy Review Use monthly or quarterly to evaluate and improve forecasting discipline. -1. **Generate accuracy report:** +1. **Verify input data:** Confirm all forecast periods have corresponding actuals and no periods are missing before running. + +2. **Generate accuracy report:** ```bash python scripts/forecast_accuracy_tracker.py forecast_history.json --format text ``` -2. **Analyze patterns:** +3. **Cross-check actuals** against closed-won records in your CRM before drawing conclusions. + +4. **Analyze patterns:** - Is MAPE trending down (improving)? - Which reps or segments have the highest error rates? - Is there systematic over- or under-forecasting? -3. **Document using template:** Use `assets/forecast_report_template.md` +5. **Document using template:** Use `assets/forecast_report_template.md` -4. **Improvement actions:** Coach high-bias reps, adjust methodology, improve data hygiene +6. **Improvement actions:** Coach high-bias reps, adjust methodology, improve data hygiene ### GTM Efficiency Audit Use quarterly or during board prep to evaluate go-to-market efficiency. -1. **Calculate efficiency metrics:** +1. **Verify input data:** Confirm revenue, cost, and customer figures reconcile with finance records before running. + +2. **Calculate efficiency metrics:** ```bash python scripts/gtm_efficiency_calculator.py quarterly_data.json --format text ``` -2. **Benchmark against targets:** - - Magic Number signals GTM spend efficiency - - LTV:CAC validates unit economics - - CAC Payback shows capital efficiency - - Rule of 40 balances growth and profitability +3. **Cross-check computed ARR and spend totals** against your finance system before sharing results. -3. **Document using template:** Use `assets/gtm_dashboard_template.md` +4. **Benchmark against targets:** + - Magic Number (>0.75) + - LTV:CAC (>3:1) + - CAC Payback (<18 months) + - Rule of 40 (>40%) -4. **Strategic decisions:** Adjust spend allocation, optimize channels, improve retention +5. **Document using template:** Use `assets/gtm_dashboard_template.md` + +6. **Strategic decisions:** Adjust spend allocation, optimize channels, improve retention ### Quarterly Business Review diff --git a/docs/skills/business-growth/sales-engineer.md b/docs/skills/business-growth/sales-engineer.md index 10612c9..3870da3 100644 --- a/docs/skills/business-growth/sales-engineer.md +++ b/docs/skills/business-growth/sales-engineer.md @@ -12,112 +12,125 @@ description: "Sales Engineer Skill - Claude Code skill from the Business & Growt # Sales Engineer Skill -A production-ready skill package for pre-sales engineering that bridges technical expertise and sales execution. Provides automated analysis for RFP/RFI responses, competitive positioning, and proof-of-concept planning. - -## Overview - -**Role:** Sales Engineer / Solutions Architect -**Domain:** Pre-Sales Engineering, Solution Design, Technical Demos, Proof of Concepts -**Business Type:** SaaS / Pre-Sales Engineering - -### What This Skill Does - -- **RFP/RFI Response Analysis** - Score requirement coverage, identify gaps, generate bid/no-bid recommendations -- **Competitive Technical Positioning** - Build feature comparison matrices, identify differentiators and vulnerabilities -- **POC Planning** - Generate timelines, resource plans, success criteria, and evaluation scorecards -- **Demo Preparation** - Structure demo scripts with talking points and objection handling -- **Technical Proposal Creation** - Framework for solution architecture and implementation planning -- **Win/Loss Analysis** - Data-driven competitive assessment for deal strategy - -### Key Metrics - -| Metric | Description | Target | -|--------|-------------|--------| -| Win Rate | Deals won / total opportunities | >30% | -| Sales Cycle Length | Average days from discovery to close | <90 days | -| POC Conversion Rate | POCs resulting in closed deals | >60% | -| Customer Engagement Score | Stakeholder participation in evaluation | >75% | -| RFP Coverage Score | Requirements fully addressed | >80% | - ## 5-Phase Workflow ### Phase 1: Discovery & Research **Objective:** Understand customer requirements, technical environment, and business drivers. -**Activities:** -1. Conduct technical discovery calls with stakeholders -2. Map customer's current architecture and pain points -3. Identify integration requirements and constraints -4. Document security and compliance requirements -5. Assess competitive landscape for this opportunity +**Checklist:** +- [ ] Conduct technical discovery calls with stakeholders +- [ ] Map customer's current architecture and pain points +- [ ] Identify integration requirements and constraints +- [ ] Document security and compliance requirements +- [ ] Assess competitive landscape for this opportunity -**Tools:** Use `rfp_response_analyzer.py` to score initial requirement alignment. +**Tools:** Run `rfp_response_analyzer.py` to score initial requirement alignment. + +```bash +python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json --format json > phase1_rfp_results.json +``` **Output:** Technical discovery document, requirement map, initial coverage assessment. +**Validation checkpoint:** Coverage score must be >50% and must-have gaps ≤3 before proceeding to Phase 2. Check with: +```bash +python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json --format json | python -c "import sys,json; r=json.load(sys.stdin); print('PROCEED' if r['coverage_score']>50 and r['must_have_gaps']<=3 else 'REVIEW')" +``` + +--- + ### Phase 2: Solution Design **Objective:** Design a solution architecture that addresses customer requirements. -**Activities:** -1. Map product capabilities to customer requirements -2. Design integration architecture -3. Identify customization needs and development effort -4. Build competitive differentiation strategy -5. Create solution architecture diagrams +**Checklist:** +- [ ] Map product capabilities to customer requirements +- [ ] Design integration architecture +- [ ] Identify customization needs and development effort +- [ ] Build competitive differentiation strategy +- [ ] Create solution architecture diagrams -**Tools:** Use `competitive_matrix_builder.py` to identify differentiators and vulnerabilities. +**Tools:** Run `competitive_matrix_builder.py` using Phase 1 data to identify differentiators and vulnerabilities. + +```bash +python scripts/competitive_matrix_builder.py competitive_data.json --format json > phase2_competitive.json + +python -c "import json; d=json.load(open('phase2_competitive.json')); print('Differentiators:', d['differentiators']); print('Vulnerabilities:', d['vulnerabilities'])" +``` **Output:** Solution architecture, competitive positioning, technical differentiation strategy. +**Validation checkpoint:** Confirm at least one strong differentiator exists per customer priority before proceeding to Phase 3. If no differentiators found, escalate to Product Team (see Integration Points). + +--- + ### Phase 3: Demo Preparation & Delivery **Objective:** Deliver compelling technical demonstrations tailored to stakeholder priorities. -**Activities:** -1. Build demo environment matching customer's use case -2. Create demo script with talking points per stakeholder role -3. Prepare objection handling responses -4. Rehearse failure scenarios and recovery paths -5. Collect feedback and adjust approach +**Checklist:** +- [ ] Build demo environment matching customer's use case +- [ ] Create demo script with talking points per stakeholder role +- [ ] Prepare objection handling responses +- [ ] Rehearse failure scenarios and recovery paths +- [ ] Collect feedback and adjust approach -**Templates:** Use `demo_script_template.md` for structured demo preparation. +**Templates:** Use `assets/demo_script_template.md` for structured demo preparation. **Output:** Customized demo, stakeholder-specific talking points, feedback capture. +**Validation checkpoint:** Demo script must cover every must-have requirement flagged in `phase1_rfp_results.json` before delivery. Cross-reference with: +```bash +python -c "import json; rfp=json.load(open('phase1_rfp_results.json')); [print('UNCOVERED:', r) for r in rfp['must_have_requirements'] if r['coverage']=='Gap']" +``` + +--- + ### Phase 4: POC & Evaluation **Objective:** Execute a structured proof-of-concept that validates the solution. -**Activities:** -1. Define POC scope, success criteria, and timeline -2. Allocate resources and set up environment -3. Execute phased testing (core, advanced, edge cases) -4. Track progress against success criteria -5. Generate evaluation scorecard +**Checklist:** +- [ ] Define POC scope, success criteria, and timeline +- [ ] Allocate resources and set up environment +- [ ] Execute phased testing (core, advanced, edge cases) +- [ ] Track progress against success criteria +- [ ] Generate evaluation scorecard -**Tools:** Use `poc_planner.py` to generate the complete POC plan. +**Tools:** Run `poc_planner.py` to generate the complete POC plan. -**Templates:** Use `poc_scorecard_template.md` for evaluation tracking. +```bash +python scripts/poc_planner.py poc_data.json --format json > phase4_poc_plan.json + +python -c "import json; p=json.load(open('phase4_poc_plan.json')); print('Go/No-Go:', p['recommendation'])" +``` + +**Templates:** Use `assets/poc_scorecard_template.md` for evaluation tracking. **Output:** POC plan, evaluation scorecard, go/no-go recommendation. +**Validation checkpoint:** POC conversion requires scorecard score >60% across all evaluation dimensions (functionality, performance, integration, usability, support). If score <60%, document gaps and loop back to Phase 2 for solution redesign. + +--- + ### Phase 5: Proposal & Closing **Objective:** Deliver a technical proposal that supports the commercial close. -**Activities:** -1. Compile POC results and success metrics -2. Create technical proposal with implementation plan -3. Address outstanding objections with evidence -4. Support pricing and packaging discussions -5. Conduct win/loss analysis post-decision +**Checklist:** +- [ ] Compile POC results and success metrics +- [ ] Create technical proposal with implementation plan +- [ ] Address outstanding objections with evidence +- [ ] Support pricing and packaging discussions +- [ ] Conduct win/loss analysis post-decision -**Templates:** Use `technical_proposal_template.md` for the proposal document. +**Templates:** Use `assets/technical_proposal_template.md` for the proposal document. **Output:** Technical proposal, implementation timeline, risk mitigation plan. +--- + ## Python Automation Tools ### 1. RFP Response Analyzer @@ -126,63 +139,42 @@ A production-ready skill package for pre-sales engineering that bridges technica **Purpose:** Parse RFP/RFI requirements, score coverage, identify gaps, and generate bid/no-bid recommendations. -**Coverage Categories:** -- **Full (100%)** - Requirement fully met by current product -- **Partial (50%)** - Requirement partially met, workaround or configuration needed -- **Planned (25%)** - On product roadmap, not yet available -- **Gap (0%)** - Not supported, no current plan - -**Priority Weighting:** -- Must-Have: 3x weight -- Should-Have: 2x weight -- Nice-to-Have: 1x weight +**Coverage Categories:** Full (100%), Partial (50%), Planned (25%), Gap (0%). +**Priority Weighting:** Must-Have 3×, Should-Have 2×, Nice-to-Have 1×. **Bid/No-Bid Logic:** -- **Bid:** Coverage score >70% AND must-have gaps <=3 -- **Conditional Bid:** Coverage score 50-70% OR must-have gaps 2-3 -- **No-Bid:** Coverage score <50% OR must-have gaps >3 +- **Bid:** Coverage >70% AND must-have gaps ≤3 +- **Conditional Bid:** Coverage 50–70% OR must-have gaps 2–3 +- **No-Bid:** Coverage <50% OR must-have gaps >3 **Usage:** ```bash -# Human-readable output -python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json - -# JSON output -python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json --format json - -# Help +python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json # human-readable +python scripts/rfp_response_analyzer.py assets/sample_rfp_data.json --format json # JSON output python scripts/rfp_response_analyzer.py --help ``` **Input Format:** See `assets/sample_rfp_data.json` for the complete schema. +--- + ### 2. Competitive Matrix Builder **Script:** `scripts/competitive_matrix_builder.py` **Purpose:** Generate feature comparison matrices, calculate competitive scores, identify differentiators and vulnerabilities. -**Feature Scoring:** -- **Full (3)** - Complete feature support -- **Partial (2)** - Partial or limited feature support -- **Limited (1)** - Minimal or basic feature support -- **None (0)** - Feature not available +**Feature Scoring:** Full (3), Partial (2), Limited (1), None (0). **Usage:** ```bash -# Human-readable output -python scripts/competitive_matrix_builder.py competitive_data.json - -# JSON output -python scripts/competitive_matrix_builder.py competitive_data.json --format json +python scripts/competitive_matrix_builder.py competitive_data.json # human-readable +python scripts/competitive_matrix_builder.py competitive_data.json --format json # JSON output ``` -**Output Includes:** -- Feature comparison matrix with scores -- Weighted competitive scores per product -- Differentiators (features where our product leads) -- Vulnerabilities (features where competitors lead) -- Win themes based on differentiators +**Output Includes:** Feature comparison matrix, weighted competitive scores, differentiators, vulnerabilities, and win themes. + +--- ### 3. POC Planner @@ -191,27 +183,20 @@ python scripts/competitive_matrix_builder.py competitive_data.json --format json **Purpose:** Generate structured POC plans with timeline, resource allocation, success criteria, and evaluation scorecards. **Default Phase Breakdown:** -- **Week 1:** Setup - Environment provisioning, data migration, configuration -- **Weeks 2-3:** Core Testing - Primary use cases, integration testing -- **Week 4:** Advanced Testing - Edge cases, performance, security -- **Week 5:** Evaluation - Scorecard completion, stakeholder review, go/no-go +- **Week 1:** Setup — environment provisioning, data migration, configuration +- **Weeks 2–3:** Core Testing — primary use cases, integration testing +- **Week 4:** Advanced Testing — edge cases, performance, security +- **Week 5:** Evaluation — scorecard completion, stakeholder review, go/no-go **Usage:** ```bash -# Human-readable output -python scripts/poc_planner.py poc_data.json - -# JSON output -python scripts/poc_planner.py poc_data.json --format json +python scripts/poc_planner.py poc_data.json # human-readable +python scripts/poc_planner.py poc_data.json --format json # JSON output ``` -**Output Includes:** -- POC plan with phased timeline -- Resource allocation (SE, engineering, customer) -- Success criteria with measurable metrics -- Evaluation scorecard (functionality, performance, integration, usability, support) -- Risk register with mitigation strategies -- Go/No-Go recommendation framework +**Output Includes:** Phased POC plan, resource allocation, success criteria, evaluation scorecard, risk register, and go/no-go recommendation framework. + +--- ## Reference Knowledge Bases @@ -231,13 +216,6 @@ python scripts/poc_planner.py poc_data.json --format json | `assets/sample_rfp_data.json` | Sample RFP data for testing the analyzer | | `assets/expected_output.json` | Expected output from rfp_response_analyzer.py | -## Communication Style - -- **Technical yet accessible** - Translate complex concepts for business stakeholders -- **Confident and consultative** - Position as trusted advisor, not vendor -- **Evidence-based** - Back every claim with data, demos, or case studies -- **Stakeholder-aware** - Tailor depth and focus to audience (CTO vs. end user vs. procurement) - ## Integration Points - **Marketing Skills** - Leverage competitive intelligence and messaging frameworks from `../../marketing-skill/` diff --git a/docs/skills/c-level-advisor/c-level-advisor.md b/docs/skills/c-level-advisor/c-level-advisor.md index c6cd637..0f9812a 100644 --- a/docs/skills/c-level-advisor/c-level-advisor.md +++ b/docs/skills/c-level-advisor/c-level-advisor.md @@ -18,8 +18,106 @@ A complete virtual board of directors for founders and executives. ``` 1. Run /cs:setup → creates company-context.md (all agents read this) + ✓ Verify company-context.md was created and contains your company name, + stage, and core metrics before proceeding. 2. Ask any strategic question → Chief of Staff routes to the right role 3. For big decisions → /cs:board triggers a multi-role board meeting + ✓ Confirm at least 3 roles have weighed in before accepting a conclusion. +``` + +### Commands + +#### `/cs:setup` — Onboarding Questionnaire + +Walks through the following prompts and writes `company-context.md` to the project root. Run once per company or when context changes significantly. + +``` +Q1. What is your company name and one-line description? +Q2. What stage are you at? (Idea / Pre-seed / Seed / Series A / Series B+) +Q3. What is your current ARR (or MRR) and runway in months? +Q4. What is your team size and structure? +Q5. What industry and customer segment do you serve? +Q6. What are your top 3 priorities for the next 90 days? +Q7. What is your biggest current risk or blocker? +``` + +After collecting answers, the agent writes structured output: + +```markdown +# Company Context +- Name: +- Stage: +- Industry: +- Team size: +- Key metrics: +- Top priorities: +- Key risks: +``` + +#### `/cs:board` — Full Board Meeting + +Convenes all relevant executive roles in three phases: + +``` +Phase 1 — Framing: Chief of Staff states the decision and success criteria. +Phase 2 — Isolation: Each role produces independent analysis (no cross-talk). +Phase 3 — Debate: Roles surface conflicts, stress-test assumptions, align on + a recommendation. Dissenting views are preserved in the log. +``` + +Use for high-stakes or cross-functional decisions. Confirm at least 3 roles have weighed in before accepting a conclusion. + +### Chief of Staff Routing Matrix + +When a question arrives without a role prefix, the Chief of Staff maps it to the appropriate executive using these primary signals: + +| Topic Signal | Primary Role | Supporting Roles | +|---|---|---| +| Fundraising, valuation, burn | CFO | CEO, CRO | +| Architecture, build vs. buy, tech debt | CTO | CPO, CISO | +| Hiring, culture, performance | CHRO | CEO, Executive Mentor | +| GTM, demand gen, positioning | CMO | CRO, CPO | +| Revenue, pipeline, sales motion | CRO | CMO, CFO | +| Security, compliance, risk | CISO | CTO, CFO | +| Product roadmap, prioritisation | CPO | CTO, CMO | +| Ops, process, scaling | COO | CFO, CHRO | +| Vision, strategy, investor relations | CEO | Executive Mentor | +| Career, founder psychology, leadership | Executive Mentor | CEO, CHRO | +| Multi-domain / unclear | Chief of Staff convenes board | All relevant roles | + +### Invoking a Specific Role Directly + +To bypass Chief of Staff routing and address one executive directly, prefix your question with the role name: + +``` +CFO: What is our optimal burn rate heading into a Series A? +CTO: Should we rebuild our auth layer in-house or buy a solution? +CHRO: How do we design a performance review process for a 15-person team? +``` + +The Chief of Staff still logs the exchange; only routing is skipped. + +### Example: Strategic Question + +**Input:** "Should we raise a Series A now or extend runway and grow ARR first?" + +**Output format:** +- **Bottom Line:** Extend runway 6 months; raise at $2M ARR for better terms. +- **What:** Current $800K ARR is below the threshold most Series A investors benchmark. +- **Why:** Raising now increases dilution risk; 6-month extension is achievable with current burn. +- **How to Act:** Cut 2 low-ROI channels, hit $2M ARR, then run a 6-week fundraise sprint. +- **Your Decision:** Proceed with extension / Raise now anyway (choose one). + +### Example: company-context.md (after /cs:setup) + +```markdown +# Company Context +- Name: Acme Inc. +- Stage: Seed ($800K ARR) +- Industry: B2B SaaS +- Team size: 12 +- Key metrics: 15% MoM growth, 18-month runway +- Top priorities: Series A readiness, enterprise GTM ``` ## What's Included diff --git a/docs/skills/c-level-advisor/cto-advisor.md b/docs/skills/c-level-advisor/cto-advisor.md index 87e01d3..0ab6e8a 100644 --- a/docs/skills/c-level-advisor/cto-advisor.md +++ b/docs/skills/c-level-advisor/cto-advisor.md @@ -27,19 +27,19 @@ python scripts/team_scaling_calculator.py # Model engineering team growth and c ## Core Responsibilities ### 1. Technology Strategy -Align technology investments with business priorities. Not "what's exciting" — "what moves the needle." +Align technology investments with business priorities. **Strategy components:** - Technology vision (3-year: where the platform is going) - Architecture roadmap (what to build, refactor, or replace) - Innovation budget (10-20% of engineering capacity for experimentation) - Build vs buy decisions (default: buy unless it's your core IP) -- Technical debt strategy (not elimination — management) +- Technical debt strategy (management, not elimination) See `references/technology_evaluation_framework.md` for the full evaluation framework. ### 2. Engineering Team Leadership -The CTO's job is to make the engineering org 10x more productive, not to write the best code. +Scale the engineering org's productivity — not individual output. **Scaling engineering:** - Hire for the next stage, not the current one @@ -56,7 +56,7 @@ The CTO's job is to make the engineering org 10x more productive, not to write t See `references/engineering_metrics.md` for DORA metrics and the engineering health dashboard. ### 3. Architecture Governance -You don't make every architecture decision. You create the framework for making good ones. +Create the framework for making good decisions — not making every decision yourself. **Architecture Decision Records (ADRs):** - Every significant decision gets documented: context, options, decision, consequences @@ -73,11 +73,96 @@ Every vendor is a dependency. Every dependency is a risk. ### 5. Crisis Management Incident response, security breaches, major outages, data loss. -**Your role in a crisis:** Not to fix it yourself. To ensure the right people are on it, communication is flowing, and the business is informed. Post-crisis: blameless retrospective within 48 hours. +**Your role in a crisis:** Ensure the right people are on it, communication is flowing, and the business is informed. Post-crisis: blameless retrospective within 48 hours. + +## Workflows + +### Tech Debt Assessment Workflow + +**Step 1 — Run the analyzer** +```bash +python scripts/tech_debt_analyzer.py --output report.json +``` + +**Step 2 — Interpret results** +The analyzer produces a severity-scored inventory. Review each item against: +- Severity (P0–P3): how much is it blocking velocity or creating risk? +- Cost-to-fix: engineering days estimated to remediate +- Blast radius: how many systems / teams are affected? + +**Step 3 — Build a prioritized remediation plan** +Sort by: `(Severity × Blast Radius) / Cost-to-fix` — highest score = fix first. +Group items into: (a) immediate sprint, (b) next quarter, (c) tracked backlog. + +**Step 4 — Validate before presenting to stakeholders** +- [ ] Every P0/P1 item has an owner and a target date +- [ ] Cost-to-fix estimates reviewed with the relevant tech lead +- [ ] Debt ratio calculated: maintenance work / total engineering capacity (target: < 25%) +- [ ] Remediation plan fits within capacity (don't promise 40 points of debt reduction in a 2-week sprint) + +**Example output — Tech Debt Inventory:** +``` +Item | Severity | Cost-to-Fix | Blast Radius | Priority Score +----------------------|----------|-------------|--------------|--------------- +Auth service (v1 API) | P1 | 8 days | 6 services | HIGH +Unindexed DB queries | P2 | 3 days | 2 services | MEDIUM +Legacy deploy scripts | P3 | 5 days | 1 service | LOW +``` + +--- + +### ADR Creation Workflow + +**Step 1 — Identify the decision** +Trigger an ADR when: the decision affects more than one team, is hard to reverse, or has cost/risk implications > 1 sprint of effort. + +**Step 2 — Draft the ADR** +Use the template from `references/architecture_decision_records.md`: +``` +Title: [Short noun phrase] +Status: Proposed | Accepted | Superseded +Context: What is the problem? What constraints exist? +Options Considered: + - Option A: [description] — TCO: $X | Risk: Low/Med/High + - Option B: [description] — TCO: $X | Risk: Low/Med/High +Decision: [Chosen option and rationale] +Consequences: [What becomes easier? What becomes harder?] +``` + +**Step 3 — Validation checkpoint (before finalizing)** +- [ ] All options include a 3-year TCO estimate +- [ ] At least one "do nothing" or "buy" alternative is documented +- [ ] Affected team leads have reviewed and signed off +- [ ] Consequences section addresses reversibility and migration path +- [ ] ADR is committed to the repository (not left in a doc or Slack thread) + +**Step 4 — Communicate and close** +Share the accepted ADR in the engineering all-hands or architecture sync. Link it from the relevant service's README. + +--- + +### Build vs Buy Analysis Workflow + +**Step 1 — Define requirements** (functional + non-functional) +**Step 2 — Identify candidate vendors or internal build scope** +**Step 3 — Score each option:** + +``` +Criterion | Weight | Build Score | Vendor A Score | Vendor B Score +-----------------------|--------|-------------|----------------|--------------- +Solves core problem | 30% | 9 | 8 | 7 +Migration risk | 20% | 2 (low risk)| 7 | 6 +3-year TCO | 25% | $X | $Y | $Z +Vendor stability | 15% | N/A | 8 | 5 +Integration effort | 10% | 3 | 7 | 8 +``` + +**Step 4 — Default rule:** Buy unless it is core IP or no vendor meets ≥ 70% of requirements. +**Step 5 — Document the decision as an ADR** (see ADR workflow above). ## Key Questions a CTO Asks -- "What's our biggest technical risk right now? Not the most annoying — the most dangerous." +- "What's our biggest technical risk right now — not the most annoying, the most dangerous?" - "If we 10x our traffic tomorrow, what breaks first?" - "How much of our engineering time goes to maintenance vs new features?" - "What would a new engineer say about our codebase after their first week?" @@ -103,16 +188,13 @@ Incident response, security breaches, major outages, data loss. ## Red Flags -- Tech debt is growing faster than you're paying it down -- Deployment frequency is declining (a leading indicator of team health) -- Senior engineers are leaving (they see problems before management does) -- "It works on my machine" is still a thing +- Tech debt ratio > 30% and growing faster than it's being paid down +- Deployment frequency declining over 4+ weeks - No ADRs for the last 3 major decisions - The CTO is the only person who can deploy to production -- Security audit hasn't happened in 12+ months -- The team dreads on-call rotation - Build times exceed 10 minutes -- No one can explain the system architecture to a new hire in 30 minutes +- Single points of failure on critical systems with no mitigation plan +- The team dreads on-call rotation ## Integration with C-Suite Roles diff --git a/docs/skills/c-level-advisor/executive-mentor-board-prep.md b/docs/skills/c-level-advisor/executive-mentor-board-prep.md index 5157e1b..b843fa2 100644 --- a/docs/skills/c-level-advisor/executive-mentor-board-prep.md +++ b/docs/skills/c-level-advisor/executive-mentor-board-prep.md @@ -10,6 +10,8 @@ description: "/em:board-prep — Board Meeting Preparation - Claude Code skill f --- +# /em:board-prep — Board Meeting Preparation + **Command:** `/em:board-prep ` Prepare for the adversarial version of your board, not the friendly one. Every hard question they'll ask. Every number you need cold. The narrative that acknowledges weakness without losing the room. diff --git a/docs/skills/c-level-advisor/executive-mentor-challenge.md b/docs/skills/c-level-advisor/executive-mentor-challenge.md index a86bbbf..7f27d08 100644 --- a/docs/skills/c-level-advisor/executive-mentor-challenge.md +++ b/docs/skills/c-level-advisor/executive-mentor-challenge.md @@ -10,6 +10,8 @@ description: "/em:challenge — Pre-Mortem Plan Analysis - Claude Code skill fro --- +# /em:challenge — Pre-Mortem Plan Analysis + **Command:** `/em:challenge ` Systematically finds weaknesses in any plan before reality does. Not to kill the plan — to make it survive contact with reality. diff --git a/docs/skills/c-level-advisor/executive-mentor-hard-call.md b/docs/skills/c-level-advisor/executive-mentor-hard-call.md index 248ced2..36229d8 100644 --- a/docs/skills/c-level-advisor/executive-mentor-hard-call.md +++ b/docs/skills/c-level-advisor/executive-mentor-hard-call.md @@ -10,6 +10,8 @@ description: "/em:hard-call — Framework for Decisions With No Good Options - C --- +# /em:hard-call — Framework for Decisions With No Good Options + **Command:** `/em:hard-call ` For the decisions that keep you up at 3am. Firing a co-founder. Laying off 20% of the team. Killing a product that customers love. Pivoting. Shutting down. diff --git a/docs/skills/c-level-advisor/executive-mentor-postmortem.md b/docs/skills/c-level-advisor/executive-mentor-postmortem.md index a73268c..d37fbad 100644 --- a/docs/skills/c-level-advisor/executive-mentor-postmortem.md +++ b/docs/skills/c-level-advisor/executive-mentor-postmortem.md @@ -10,6 +10,8 @@ description: "/em:postmortem — Honest Analysis of What Went Wrong - Claude Cod --- +# /em:postmortem — Honest Analysis of What Went Wrong + **Command:** `/em:postmortem ` Not blame. Understanding. The failed deal, the missed quarter, the feature that flopped, the hire that didn't work out. What actually happened, why, and what changes as a result. diff --git a/docs/skills/c-level-advisor/executive-mentor-stress-test.md b/docs/skills/c-level-advisor/executive-mentor-stress-test.md index e4b4d7c..f7abe34 100644 --- a/docs/skills/c-level-advisor/executive-mentor-stress-test.md +++ b/docs/skills/c-level-advisor/executive-mentor-stress-test.md @@ -10,6 +10,8 @@ description: "/em:stress-test — Business Assumption Stress Testing - Claude Co --- +# /em:stress-test — Business Assumption Stress Testing + **Command:** `/em:stress-test ` Take any business assumption and break it before the market does. Revenue projections. Market size. Competitive moat. Hiring velocity. Customer retention. diff --git a/docs/skills/engineering-team/aws-solution-architect.md b/docs/skills/engineering-team/aws-solution-architect.md index 8425e0b..cdb5e37 100644 --- a/docs/skills/engineering-team/aws-solution-architect.md +++ b/docs/skills/engineering-team/aws-solution-architect.md @@ -16,36 +16,6 @@ Design scalable, cost-effective AWS architectures for startups with infrastructu --- -## Table of Contents - -- [Trigger Terms](#trigger-terms) -- [Workflow](#workflow) -- [Tools](#tools) -- [Quick Start](#quick-start) -- [Input Requirements](#input-requirements) -- [Output Formats](#output-formats) - ---- - -## Trigger Terms - -Use this skill when you encounter: - -| Category | Terms | -|----------|-------| -| **Architecture Design** | serverless architecture, AWS architecture, cloud design, microservices, three-tier | -| **IaC Generation** | CloudFormation, CDK, Terraform, infrastructure as code, deploy template | -| **Serverless** | Lambda, API Gateway, DynamoDB, Step Functions, EventBridge, AppSync | -| **Containers** | ECS, Fargate, EKS, container orchestration, Docker on AWS | -| **Cost Optimization** | reduce AWS costs, optimize spending, right-sizing, Savings Plans | -| **Database** | Aurora, RDS, DynamoDB design, database migration, data modeling | -| **Security** | IAM policies, VPC design, encryption, Cognito, WAF | -| **CI/CD** | CodePipeline, CodeBuild, CodeDeploy, GitHub Actions AWS | -| **Monitoring** | CloudWatch, X-Ray, observability, alarms, dashboards | -| **Migration** | migrate to AWS, lift and shift, replatform, DMS | - ---- - ## Workflow ### Step 1: Gather Requirements @@ -69,6 +39,18 @@ Run the architecture designer to get pattern recommendations: python scripts/architecture_designer.py --input requirements.json ``` +**Example output:** + +```json +{ + "recommended_pattern": "serverless_web", + "service_stack": ["S3", "CloudFront", "API Gateway", "Lambda", "DynamoDB", "Cognito"], + "estimated_monthly_cost_usd": 35, + "pros": ["Low ops overhead", "Pay-per-use", "Auto-scaling"], + "cons": ["Cold starts", "15-min Lambda limit", "Eventual consistency"] +} +``` + Select from recommended patterns: - **Serverless Web**: S3 + CloudFront + API Gateway + Lambda + DynamoDB - **Event-Driven Microservices**: EventBridge + Lambda + SQS + Step Functions @@ -77,6 +59,8 @@ Select from recommended patterns: See `references/architecture_patterns.md` for detailed pattern specifications. +**Validation checkpoint:** Confirm the recommended pattern matches the team's operational maturity and compliance requirements before proceeding to Step 3. + ### Step 3: Generate IaC Templates Create infrastructure-as-code for the selected pattern: @@ -84,8 +68,76 @@ Create infrastructure-as-code for the selected pattern: ```bash # Serverless stack (CloudFormation) python scripts/serverless_stack.py --app-name my-app --region us-east-1 +``` -# Output: CloudFormation YAML template ready to deploy +**Example CloudFormation YAML output (core serverless resources):** + +```yaml +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 + +Parameters: + AppName: + Type: String + Default: my-app + +Resources: + ApiFunction: + Type: AWS::Serverless::Function + Properties: + Handler: index.handler + Runtime: nodejs20.x + MemorySize: 512 + Timeout: 30 + Environment: + Variables: + TABLE_NAME: !Ref DataTable + Policies: + - DynamoDBCrudPolicy: + TableName: !Ref DataTable + Events: + ApiEvent: + Type: Api + Properties: + Path: /{proxy+} + Method: ANY + + DataTable: + Type: AWS::DynamoDB::Table + Properties: + BillingMode: PAY_PER_REQUEST + AttributeDefinitions: + - AttributeName: pk + AttributeType: S + - AttributeName: sk + AttributeType: S + KeySchema: + - AttributeName: pk + KeyType: HASH + - AttributeName: sk + KeyType: RANGE +``` + +> Full templates including API Gateway, Cognito, IAM roles, and CloudWatch logging are generated by `serverless_stack.py` and also available in `references/architecture_patterns.md`. + +**Example CDK TypeScript snippet (three-tier pattern):** + +```typescript +import * as ecs from 'aws-cdk-lib/aws-ecs'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as rds from 'aws-cdk-lib/aws-rds'; + +const vpc = new ec2.Vpc(this, 'AppVpc', { maxAzs: 2 }); + +const cluster = new ecs.Cluster(this, 'AppCluster', { vpc }); + +const db = new rds.ServerlessCluster(this, 'AppDb', { + engine: rds.DatabaseClusterEngine.auroraPostgres({ + version: rds.AuroraPostgresEngineVersion.VER_15_2, + }), + vpc, + scaling: { minCapacity: 0.5, maxCapacity: 4 }, +}); ``` ### Step 4: Review Costs @@ -96,6 +148,20 @@ Analyze estimated costs and optimization opportunities: python scripts/cost_optimizer.py --resources current_setup.json --monthly-spend 2000 ``` +**Example output:** + +```json +{ + "current_monthly_usd": 2000, + "recommendations": [ + { "action": "Right-size RDS db.r5.2xlarge → db.r5.large", "savings_usd": 420, "priority": "high" }, + { "action": "Purchase 1-yr Compute Savings Plan at 40% utilization", "savings_usd": 310, "priority": "high" }, + { "action": "Move S3 objects >90 days to Glacier Instant Retrieval", "savings_usd": 85, "priority": "medium" } + ], + "total_potential_savings_usd": 815 +} +``` + Output includes: - Monthly cost breakdown by service - Right-sizing recommendations @@ -120,7 +186,7 @@ cdk deploy terraform init && terraform apply ``` -### Step 6: Validate +### Step 6: Validate and Handle Failures Verify deployment and set up monitoring: @@ -132,6 +198,30 @@ aws cloudformation describe-stacks --stack-name my-app-stack aws cloudwatch put-metric-alarm --alarm-name high-errors ... ``` +**If stack creation fails:** + +1. Check the failure reason: + ```bash + aws cloudformation describe-stack-events \ + --stack-name my-app-stack \ + --query 'StackEvents[?ResourceStatus==`CREATE_FAILED`]' + ``` +2. Review CloudWatch Logs for Lambda or ECS errors. +3. Fix the template or resource configuration. +4. Delete the failed stack before retrying: + ```bash + aws cloudformation delete-stack --stack-name my-app-stack + # Wait for deletion + aws cloudformation wait stack-delete-complete --stack-name my-app-stack + # Redeploy + aws cloudformation create-stack ... + ``` + +**Common failure causes:** +- IAM permission errors → verify `--capabilities CAPABILITY_IAM` and role trust policies +- Resource limit exceeded → request quota increase via Service Quotas console +- Invalid template syntax → run `aws cloudformation validate-template --template-body file://template.yaml` before deploying + --- ## Tools @@ -274,10 +364,7 @@ Provide these details for architecture design: - Pattern recommendation with rationale - Service stack diagram (ASCII) -- Configuration specifications -- Monthly cost estimate -- Scaling characteristics -- Trade-offs and limitations +- Monthly cost estimate and trade-offs ### IaC Templates @@ -287,10 +374,8 @@ Provide these details for architecture design: ### Cost Analysis -- Current spend breakdown -- Optimization recommendations with savings -- Priority action list (high/medium/low) -- Implementation checklist +- Current spend breakdown with optimization recommendations +- Priority action list (high/medium/low) and implementation checklist --- @@ -301,13 +386,3 @@ Provide these details for architecture design: | `references/architecture_patterns.md` | 6 patterns: serverless, microservices, three-tier, data processing, GraphQL, multi-region | | `references/service_selection.md` | Decision matrices for compute, database, storage, messaging | | `references/best_practices.md` | Serverless design, cost optimization, security hardening, scalability | - ---- - -## Limitations - -- Lambda: 15-minute execution, 10GB memory max -- API Gateway: 29-second timeout, 10MB payload -- DynamoDB: 400KB item size, eventually consistent by default -- Regional availability varies by service -- Some services have AWS-specific lock-in diff --git a/docs/skills/engineering-team/email-template-builder.md b/docs/skills/engineering-team/email-template-builder.md index 6618298..ed903af 100644 --- a/docs/skills/engineering-team/email-template-builder.md +++ b/docs/skills/engineering-team/email-template-builder.md @@ -10,6 +10,8 @@ description: "Email Template Builder - Claude Code skill from the Engineering - --- +# Email Template Builder + **Tier:** POWERFUL **Category:** Engineering Team **Domain:** Transactional Email / Communications Infrastructure @@ -167,7 +169,7 @@ import { Button, Heading, Text } from "@react-email/components" import { EmailLayout } from "../components/layout/email-layout" interface WelcomeEmailProps { - name: string + name: "string" confirmUrl: string trialDays?: number } @@ -226,7 +228,7 @@ import { EmailLayout } from "../components/layout/email-layout" interface InvoiceItem { description: string; amount: number } interface InvoiceEmailProps { - name: string + name: "string" invoiceNumber: string invoiceDate: string dueDate: string @@ -333,7 +335,7 @@ export async function sendEmail(to: string, payload: EmailPayload) { to, subject: template.subject, html: trackedHtml, - tags: [{ name: "email_type", value: payload.type }], + tags: [{ name: "email-type", value: payload.type }], }) return result @@ -366,8 +368,8 @@ export async function sendEmail(to: string, payload: EmailPayload) { // emails/i18n/en.ts export const en = { welcome: { - preview: (name: string) => `Welcome to MyApp, ${name}!`, - heading: (name: string) => `Welcome to MyApp, ${name}!`, + preview: (name: "string-welcome-to-myapp-name" + heading: (name: "string-welcome-to-myapp-name" body: (days: number) => `You've got ${days} days to explore everything.`, cta: "Confirm Email Address", }, @@ -376,8 +378,8 @@ export const en = { // emails/i18n/de.ts export const de = { welcome: { - preview: (name: string) => `Willkommen bei MyApp, ${name}!`, - heading: (name: string) => `Willkommen bei MyApp, ${name}!`, + preview: (name: "string-willkommen-bei-myapp-name" + heading: (name: "string-willkommen-bei-myapp-name" body: (days: number) => `Du hast ${days} Tage Zeit, alles zu erkunden.`, cta: "E-Mail-Adresse bestätigen", }, diff --git a/docs/skills/engineering-team/incident-commander.md b/docs/skills/engineering-team/incident-commander.md index 22ab952..0b0e5ac 100644 --- a/docs/skills/engineering-team/incident-commander.md +++ b/docs/skills/engineering-team/incident-commander.md @@ -10,6 +10,8 @@ description: "Incident Commander Skill - Claude Code skill from the Engineering --- +# Incident Commander Skill + **Category:** Engineering Team **Tier:** POWERFUL **Author:** Claude Skills Team @@ -374,204 +376,7 @@ Status page: {link} - **{Pitfall}:** {description and how to avoid} ## Reference Information -- **Architecture Diagram:** {link} -- **Monitoring Dashboard:** {link} -- **Related Runbooks:** {links to dependent service runbooks} -``` - -### Post-Incident Review (PIR) Framework - -#### PIR Timeline and Ownership - -**Timeline:** -- **24 hours:** Initial PIR draft completed by Incident Commander -- **3 business days:** Final PIR published with all stakeholder input -- **1 week:** Action items assigned with owners and due dates -- **4 weeks:** Follow-up review on action item progress - -**Roles:** -- **PIR Owner:** Incident Commander (can delegate writing but owns completion) -- **Technical Contributors:** All engineers involved in response -- **Review Committee:** Engineering leadership, affected product teams -- **Action Item Owners:** Assigned based on expertise and capacity - -#### Root Cause Analysis Frameworks - -#### 1. Five Whys Method - -The Five Whys technique involves asking "why" repeatedly to drill down to root causes: - -**Example Application:** -- **Problem:** Database became unresponsive during peak traffic -- **Why 1:** Why did the database become unresponsive? → Connection pool was exhausted -- **Why 2:** Why was the connection pool exhausted? → Application was creating more connections than usual -- **Why 3:** Why was the application creating more connections? → New feature wasn't properly connection pooling -- **Why 4:** Why wasn't the feature properly connection pooling? → Code review missed this pattern -- **Why 5:** Why did code review miss this? → No automated checks for connection pooling patterns - -**Best Practices:** -- Ask "why" at least 3 times, often need 5+ iterations -- Focus on process failures, not individual blame -- Each "why" should point to a actionable system improvement -- Consider multiple root cause paths, not just one linear chain - -#### 2. Fishbone (Ishikawa) Diagram - -Systematic analysis across multiple categories of potential causes: - -**Categories:** -- **People:** Training, experience, communication, handoffs -- **Process:** Procedures, change management, review processes -- **Technology:** Architecture, tooling, monitoring, automation -- **Environment:** Infrastructure, dependencies, external factors - -**Application Method:** -1. State the problem clearly at the "head" of the fishbone -2. For each category, brainstorm potential contributing factors -3. For each factor, ask what caused that factor (sub-causes) -4. Identify the factors most likely to be root causes -5. Validate root causes with evidence from the incident - -#### 3. Timeline Analysis - -Reconstruct the incident chronologically to identify decision points and missed opportunities: - -**Timeline Elements:** -- **Detection:** When was the issue first observable? When was it first detected? -- **Notification:** How quickly were the right people informed? -- **Response:** What actions were taken and how effective were they? -- **Communication:** When were stakeholders updated? -- **Resolution:** What finally resolved the issue? - -**Analysis Questions:** -- Where were there delays and what caused them? -- What decisions would we make differently with perfect information? -- Where did communication break down? -- What automation could have detected/resolved faster? - -### Escalation Paths - -#### Technical Escalation - -**Level 1:** On-call engineer -- **Responsibility:** Initial response and common issue resolution -- **Escalation Trigger:** Issue not resolved within SLA timeframe -- **Timeframe:** 15 minutes (SEV1), 30 minutes (SEV2) - -**Level 2:** Senior engineer/Team lead -- **Responsibility:** Complex technical issues requiring deeper expertise -- **Escalation Trigger:** Level 1 requests help or timeout occurs -- **Timeframe:** 30 minutes (SEV1), 1 hour (SEV2) - -**Level 3:** Engineering Manager/Staff Engineer -- **Responsibility:** Cross-team coordination and architectural decisions -- **Escalation Trigger:** Issue spans multiple systems or teams -- **Timeframe:** 45 minutes (SEV1), 2 hours (SEV2) - -**Level 4:** Director of Engineering/CTO -- **Responsibility:** Resource allocation and business impact decisions -- **Escalation Trigger:** Extended outage or significant business impact -- **Timeframe:** 1 hour (SEV1), 4 hours (SEV2) - -#### Business Escalation - -**Customer Impact Assessment:** -- **High:** Revenue loss, SLA breaches, customer churn risk -- **Medium:** User experience degradation, support ticket volume -- **Low:** Internal tools, development impact only - -**Escalation Matrix:** - -| Severity | Duration | Business Escalation | -|----------|----------|-------------------| -| SEV1 | Immediate | VP Engineering | -| SEV1 | 30 minutes | CTO + Customer Success VP | -| SEV1 | 1 hour | CEO + Full Executive Team | -| SEV2 | 2 hours | VP Engineering | -| SEV2 | 4 hours | CTO | -| SEV3 | 1 business day | Engineering Manager | - -### Status Page Management - -#### Update Principles - -1. **Transparency:** Provide factual information without speculation -2. **Timeliness:** Update within committed timeframes -3. **Clarity:** Use customer-friendly language, avoid technical jargon -4. **Completeness:** Include impact scope, status, and next update time - -#### Status Categories - -- **Operational:** All systems functioning normally -- **Degraded Performance:** Some users may experience slowness -- **Partial Outage:** Subset of features unavailable -- **Major Outage:** Service unavailable for most/all users -- **Under Maintenance:** Planned maintenance window - -#### Update Template - -``` -{Timestamp} - {Status Category} - -{Brief description of current state} - -Impact: {who is affected and how} -Cause: {root cause if known, "under investigation" if not} -Resolution: {what's being done to fix it} - -Next update: {specific time} - -We apologize for any inconvenience this may cause. -``` - -### Action Item Framework - -#### Action Item Categories - -1. **Immediate Fixes** - - Critical bugs discovered during incident - - Security vulnerabilities exposed - - Data integrity issues - -2. **Process Improvements** - - Communication gaps - - Escalation procedure updates - - Runbook additions/updates - -3. **Technical Debt** - - Architecture improvements - - Monitoring enhancements - - Automation opportunities - -4. **Organizational Changes** - - Team structure adjustments - - Training requirements - - Tool/platform investments - -#### Action Item Template - -``` -**Title:** {Concise description of the action} -**Priority:** {Critical/High/Medium/Low} -**Category:** {Fix/Process/Technical/Organizational} -**Owner:** {Assigned person} -**Due Date:** {Specific date} -**Success Criteria:** {How will we know this is complete} -**Dependencies:** {What needs to happen first} -**Related PIRs:** {Links to other incidents this addresses} - -**Description:** -{Detailed description of what needs to be done and why} - -**Implementation Plan:** -1. {Step 1} -2. {Step 2} -3. {Validation step} - -**Progress Updates:** -- {Date}: {Progress update} -- {Date}: {Progress update} -``` +→ See references/reference-information.md for details ## Usage Examples @@ -675,4 +480,4 @@ The Incident Commander skill provides a comprehensive framework for managing inc The key to successful incident management is preparation, practice, and continuous learning. Use this framework as a starting point, but adapt it to your organization's specific needs, culture, and technical environment. -Remember: The goal isn't to prevent all incidents (which is impossible), but to detect them quickly, respond effectively, communicate clearly, and learn continuously. \ No newline at end of file +Remember: The goal isn't to prevent all incidents (which is impossible), but to detect them quickly, respond effectively, communicate clearly, and learn continuously. diff --git a/docs/skills/engineering-team/ms365-tenant-manager.md b/docs/skills/engineering-team/ms365-tenant-manager.md index 46ea28a..d43b934 100644 --- a/docs/skills/engineering-team/ms365-tenant-manager.md +++ b/docs/skills/engineering-team/ms365-tenant-manager.md @@ -16,136 +16,38 @@ Expert guidance and automation for Microsoft 365 Global Administrators managing --- -## Table of Contents - -- [Trigger Phrases](#trigger-phrases) -- [Quick Start](#quick-start) -- [Tools](#tools) -- [Workflows](#workflows) -- [Best Practices](#best-practices) -- [Reference Guides](#reference-guides) -- [Limitations](#limitations) - ---- - -## Trigger Phrases - -Use this skill when you hear: -- "set up Microsoft 365 tenant" -- "create Office 365 users" -- "configure Azure AD" -- "generate PowerShell script for M365" -- "set up Conditional Access" -- "bulk user provisioning" -- "M365 security audit" -- "license management" -- "Exchange Online configuration" -- "Teams administration" - ---- - ## Quick Start -### Generate Security Audit Script +### Run a Security Audit -```bash -python scripts/powershell_generator.py --action audit --output audit_script.ps1 +```powershell +Connect-MgGraph -Scopes "Directory.Read.All","Policy.Read.All","AuditLog.Read.All" +Get-MgSubscribedSku | Select-Object SkuPartNumber, ConsumedUnits, @{N="Total";E={$_.PrepaidUnits.Enabled}} +Get-MgPolicyAuthorizationPolicy | Select-Object AllowInvitesFrom, DefaultUserRolePermissions ``` -### Create Bulk User Provisioning Script +### Bulk Provision Users from CSV -```bash -python scripts/user_management.py --action provision --csv users.csv --license E3 +```powershell +# CSV columns: DisplayName, UserPrincipalName, Department, LicenseSku +Import-Csv .\new_users.csv | ForEach-Object { + $passwordProfile = @{ Password = (New-Guid).ToString().Substring(0,16) + "!"; ForceChangePasswordNextSignIn = $true } + New-MgUser -DisplayName $_.DisplayName -UserPrincipalName $_.UserPrincipalName ` + -Department $_.Department -AccountEnabled -PasswordProfile $passwordProfile +} ``` -### Configure Conditional Access Policy +### Create a Conditional Access Policy (MFA for Admins) -```bash -python scripts/powershell_generator.py --action conditional-access --require-mfa --include-admins -``` - ---- - -## Tools - -### powershell_generator.py - -Generates ready-to-use PowerShell scripts for Microsoft 365 administration. - -**Usage:** - -```bash -# Generate security audit script -python scripts/powershell_generator.py --action audit - -# Generate Conditional Access policy script -python scripts/powershell_generator.py --action conditional-access \ - --policy-name "Require MFA for Admins" \ - --require-mfa \ - --include-users "All" - -# Generate bulk license assignment script -python scripts/powershell_generator.py --action license \ - --csv users.csv \ - --sku "ENTERPRISEPACK" -``` - -**Parameters:** - -| Parameter | Required | Description | -|-----------|----------|-------------| -| `--action` | Yes | Script type: `audit`, `conditional-access`, `license`, `users` | -| `--policy-name` | No | Name for Conditional Access policy | -| `--require-mfa` | No | Require MFA in policy | -| `--include-users` | No | Users to include: `All` or specific UPNs | -| `--csv` | No | CSV file path for bulk operations | -| `--sku` | No | License SKU for assignment | -| `--output` | No | Output file path (default: stdout) | - -**Output:** Complete PowerShell scripts with error handling, logging, and best practices. - -### user_management.py - -Automates user lifecycle operations and bulk provisioning. - -**Usage:** - -```bash -# Provision users from CSV -python scripts/user_management.py --action provision --csv new_users.csv - -# Offboard user securely -python scripts/user_management.py --action offboard --user john.doe@company.com - -# Generate inactive users report -python scripts/user_management.py --action report-inactive --days 90 -``` - -**Parameters:** - -| Parameter | Required | Description | -|-----------|----------|-------------| -| `--action` | Yes | Operation: `provision`, `offboard`, `report-inactive`, `sync` | -| `--csv` | No | CSV file for bulk operations | -| `--user` | No | Single user UPN | -| `--days` | No | Days for inactivity threshold (default: 90) | -| `--license` | No | License SKU to assign | - -### tenant_setup.py - -Initial tenant configuration and service provisioning automation. - -**Usage:** - -```bash -# Generate tenant setup checklist -python scripts/tenant_setup.py --action checklist --company "Acme Inc" --users 50 - -# Generate DNS records configuration -python scripts/tenant_setup.py --action dns --domain acme.com - -# Generate security baseline script -python scripts/tenant_setup.py --action security-baseline +```powershell +$adminRoles = (Get-MgDirectoryRole | Where-Object { $_.DisplayName -match "Admin" }).Id +$policy = @{ + DisplayName = "Require MFA for Admins" + State = "enabledForReportingButNotEnforced" # Start in report-only mode + Conditions = @{ Users = @{ IncludeRoles = $adminRoles } } + GrantControls = @{ Operator = "OR"; BuiltInControls = @("mfa") } +} +New-MgIdentityConditionalAccessPolicy -BodyParameter $policy ``` --- @@ -156,69 +58,150 @@ python scripts/tenant_setup.py --action security-baseline **Step 1: Generate Setup Checklist** -```bash -python scripts/tenant_setup.py --action checklist --company "Company Name" --users 100 +Confirm prerequisites before provisioning: +- Global Admin account created and secured with MFA +- Custom domain purchased and accessible for DNS edits +- License SKUs confirmed (E3 vs E5 feature requirements noted) + +**Step 2: Configure and Verify DNS Records** + +```powershell +# After adding the domain in the M365 admin center, verify propagation before proceeding +$domain = "company.com" +Resolve-DnsName -Name "_msdcs.$domain" -Type NS -ErrorAction SilentlyContinue +# Also run from a shell prompt: +# nslookup -type=MX company.com +# nslookup -type=TXT company.com # confirm SPF record ``` -**Step 2: Configure DNS Records** - -```bash -python scripts/tenant_setup.py --action dns --domain company.com -``` +Wait for DNS propagation (up to 48 h) before bulk user creation. **Step 3: Apply Security Baseline** -```bash -python scripts/powershell_generator.py --action audit > initial_audit.ps1 +```powershell +# Disable legacy authentication (blocks Basic Auth protocols) +$policy = @{ + DisplayName = "Block Legacy Authentication" + State = "enabled" + Conditions = @{ ClientAppTypes = @("exchangeActiveSync","other") } + GrantControls = @{ Operator = "OR"; BuiltInControls = @("block") } +} +New-MgIdentityConditionalAccessPolicy -BodyParameter $policy + +# Enable unified audit log +Set-AdminAuditLogConfig -UnifiedAuditLogIngestionEnabled $true ``` **Step 4: Provision Users** -```bash -python scripts/user_management.py --action provision --csv employees.csv --license E3 +```powershell +$licenseSku = (Get-MgSubscribedSku | Where-Object { $_.SkuPartNumber -eq "ENTERPRISEPACK" }).SkuId + +Import-Csv .\employees.csv | ForEach-Object { + try { + $user = New-MgUser -DisplayName $_.DisplayName -UserPrincipalName $_.UserPrincipalName ` + -AccountEnabled -PasswordProfile @{ Password = (New-Guid).ToString().Substring(0,12)+"!"; ForceChangePasswordNextSignIn = $true } + Set-MgUserLicense -UserId $user.Id -AddLicenses @(@{ SkuId = $licenseSku }) -RemoveLicenses @() + Write-Host "Provisioned: $($_.UserPrincipalName)" + } catch { + Write-Warning "Failed $($_.UserPrincipalName): $_" + } +} ``` +**Validation:** Spot-check 3–5 accounts in the M365 admin portal; confirm licenses show "Active." + +--- + ### Workflow 2: Security Hardening **Step 1: Run Security Audit** -```bash -python scripts/powershell_generator.py --action audit --output security_audit.ps1 +```powershell +Connect-MgGraph -Scopes "Directory.Read.All","Policy.Read.All","AuditLog.Read.All","Reports.Read.All" + +# Export Conditional Access policy inventory +Get-MgIdentityConditionalAccessPolicy | Select-Object DisplayName, State | + Export-Csv .\ca_policies.csv -NoTypeInformation + +# Find accounts without MFA registered +$report = Get-MgReportAuthenticationMethodUserRegistrationDetail +$report | Where-Object { -not $_.IsMfaRegistered } | + Select-Object UserPrincipalName, IsMfaRegistered | + Export-Csv .\no_mfa_users.csv -NoTypeInformation + +Write-Host "Audit complete. Review ca_policies.csv and no_mfa_users.csv." ``` -**Step 2: Create MFA Policy** +**Step 2: Create MFA Policy (report-only first)** -```bash -python scripts/powershell_generator.py --action conditional-access \ - --policy-name "Require MFA All Users" \ - --require-mfa \ - --include-users "All" +```powershell +$policy = @{ + DisplayName = "Require MFA All Users" + State = "enabledForReportingButNotEnforced" + Conditions = @{ Users = @{ IncludeUsers = @("All") } } + GrantControls = @{ Operator = "OR"; BuiltInControls = @("mfa") } +} +New-MgIdentityConditionalAccessPolicy -BodyParameter $policy ``` -**Step 3: Review Results** +**Validation:** After 48 h, review Sign-in logs in Entra ID; confirm expected users would be challenged, then change `State` to `"enabled"`. -Execute generated scripts and review CSV reports in output directory. +**Step 3: Review Secure Score** + +```powershell +# Retrieve current Secure Score and top improvement actions +Get-MgSecuritySecureScore -Top 1 | Select-Object CurrentScore, MaxScore, ActiveUserCount +Get-MgSecuritySecureScoreControlProfile | Sort-Object -Property ActionType | + Select-Object Title, ImplementationStatus, MaxScore | Format-Table -AutoSize +``` + +--- ### Workflow 3: User Offboarding -**Step 1: Generate Offboarding Script** - -```bash -python scripts/user_management.py --action offboard --user departing.user@company.com -``` - -**Step 2: Execute Script with -WhatIf** +**Step 1: Block Sign-in and Revoke Sessions** ```powershell -.\offboard_user.ps1 -WhatIf +$upn = "departing.user@company.com" +$user = Get-MgUser -Filter "userPrincipalName eq '$upn'" + +# Block sign-in immediately +Update-MgUser -UserId $user.Id -AccountEnabled:$false + +# Revoke all active tokens +Invoke-MgInvalidateAllUserRefreshToken -UserId $user.Id +Write-Host "Sign-in blocked and sessions revoked for $upn" ``` -**Step 3: Execute for Real** +**Step 2: Preview with -WhatIf (license removal)** ```powershell -.\offboard_user.ps1 -Confirm:$false +# Identify assigned licenses +$licenses = (Get-MgUserLicenseDetail -UserId $user.Id).SkuId + +# Dry-run: print what would be removed +$licenses | ForEach-Object { Write-Host "[WhatIf] Would remove SKU: $_" } ``` +**Step 3: Execute Offboarding** + +```powershell +# Remove licenses +Set-MgUserLicense -UserId $user.Id -AddLicenses @() -RemoveLicenses $licenses + +# Convert mailbox to shared (requires ExchangeOnlineManagement module) +Set-Mailbox -Identity $upn -Type Shared + +# Remove from all groups +Get-MgUserMemberOf -UserId $user.Id | ForEach-Object { + try { Remove-MgGroupMemberByRef -GroupId $_.Id -DirectoryObjectId $user.Id } catch {} +} +Write-Host "Offboarding complete for $upn" +``` + +**Validation:** Confirm in the M365 admin portal that the account shows "Blocked," has no active licenses, and the mailbox type is "Shared." + --- ## Best Practices @@ -228,47 +211,42 @@ python scripts/user_management.py --action offboard --user departing.user@compan 1. Enable MFA before adding users 2. Configure named locations for Conditional Access 3. Use separate admin accounts with PIM -4. Verify custom domains before bulk user creation +4. Verify custom domains (and DNS propagation) before bulk user creation 5. Apply Microsoft Secure Score recommendations ### Security Operations 1. Start Conditional Access policies in report-only mode -2. Use `-WhatIf` parameter before executing scripts -3. Never hardcode credentials in scripts -4. Enable audit logging for all operations -5. Regular quarterly security reviews +2. Review Sign-in logs for 48 h before enforcing a new policy +3. Never hardcode credentials in scripts — use Azure Key Vault or `Get-Credential` +4. Enable unified audit logging for all operations +5. Conduct quarterly security reviews and Secure Score check-ins ### PowerShell Automation -1. Prefer Microsoft Graph over legacy MSOnline modules -2. Include try/catch blocks for error handling -3. Implement logging for audit trails -4. Use Azure Key Vault for credential management -5. Test in non-production tenant first +1. Prefer Microsoft Graph (`Microsoft.Graph` module) over legacy MSOnline +2. Include `try/catch` blocks for error handling +3. Implement `Write-Host`/`Write-Warning` logging for audit trails +4. Use `-WhatIf` or dry-run output before bulk destructive operations +5. Test in a non-production tenant first --- ## Reference Guides -### When to Use Each Reference - **references/powershell-templates.md** - - Ready-to-use script templates - Conditional Access policy examples - Bulk user provisioning scripts - Security audit scripts **references/security-policies.md** - - Conditional Access configuration - MFA enforcement strategies - DLP and retention policies - Security baseline settings **references/troubleshooting.md** - - Common error resolutions - PowerShell module issues - Permission troubleshooting @@ -296,7 +274,7 @@ Install-Module MicrosoftTeams -Scope CurrentUser ### Required Permissions -- **Global Administrator** - Full tenant setup -- **User Administrator** - User management -- **Security Administrator** - Security policies -- **Exchange Administrator** - Mailbox management +- **Global Administrator** — Full tenant setup +- **User Administrator** — User management +- **Security Administrator** — Security policies +- **Exchange Administrator** — Mailbox management diff --git a/docs/skills/engineering-team/playwright-pro-browserstack.md b/docs/skills/engineering-team/playwright-pro-browserstack.md index 890d274..ec03dcf 100644 --- a/docs/skills/engineering-team/playwright-pro-browserstack.md +++ b/docs/skills/engineering-team/playwright-pro-browserstack.md @@ -44,7 +44,7 @@ export default defineConfig({ // ... existing config projects: isBS ? [ { - name: 'chrome@latest:Windows 11', + name: "chromelatestwindows-11", use: { connectOptions: { wsEndpoint: `wss://cdp.browserstack.com/playwright?caps=${encodeURIComponent(JSON.stringify({ @@ -59,7 +59,7 @@ export default defineConfig({ }, }, { - name: 'firefox@latest:Windows 11', + name: "firefoxlatestwindows-11", use: { connectOptions: { wsEndpoint: `wss://cdp.browserstack.com/playwright?caps=${encodeURIComponent(JSON.stringify({ @@ -74,7 +74,7 @@ export default defineConfig({ }, }, { - name: 'webkit@latest:OS X Ventura', + name: "webkitlatestos-x-ventura", use: { connectOptions: { wsEndpoint: `wss://cdp.browserstack.com/playwright?caps=${encodeURIComponent(JSON.stringify({ diff --git a/docs/skills/engineering-team/playwright-pro-fix.md b/docs/skills/engineering-team/playwright-pro-fix.md index 8139f98..bd17a6a 100644 --- a/docs/skills/engineering-team/playwright-pro-fix.md +++ b/docs/skills/engineering-team/playwright-pro-fix.md @@ -18,7 +18,7 @@ Diagnose and fix a Playwright test that fails or passes intermittently using a s `$ARGUMENTS` contains: - A test file path: `e2e/login.spec.ts` -- A test name: `"should redirect after login"` +- A test name: ""should redirect after login"` - A description: `"the checkout test fails in CI but passes locally"` ## Steps diff --git a/docs/skills/engineering-team/playwright-pro-init.md b/docs/skills/engineering-team/playwright-pro-init.md index acb6ee6..75a9e7f 100644 --- a/docs/skills/engineering-team/playwright-pro-init.md +++ b/docs/skills/engineering-team/playwright-pro-init.md @@ -65,9 +65,9 @@ export default defineConfig({ screenshot: 'only-on-failure', }, projects: [ - { name: 'chromium', use: { ...devices['Desktop Chrome'] } }, - { name: 'firefox', use: { ...devices['Desktop Firefox'] } }, - { name: 'webkit', use: { ...devices['Desktop Safari'] } }, + { name: "chromium", use: { ...devices['Desktop Chrome'] } }, + { name: "firefox", use: { ...devices['Desktop Firefox'] } }, + { name: "webkit", use: { ...devices['Desktop Safari'] } }, ], webServer: { command: 'npm run dev', @@ -129,7 +129,7 @@ test.describe('Homepage', () => { If `.github/workflows/` exists, create `playwright.yml`: ```yaml -name: Playwright Tests +name: "playwright-tests" on: push: @@ -146,16 +146,16 @@ jobs: - uses: actions/setup-node@v4 with: node-version: lts/* - - name: Install dependencies + - name: "install-dependencies" run: npm ci - - name: Install Playwright Browsers + - name: "install-playwright-browsers" run: npx playwright install --with-deps - - name: Run Playwright tests + - name: "run-playwright-tests" run: npx playwright test - uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: - name: playwright-report + name: "playwright-report" path: playwright-report/ retention-days: 30 ``` diff --git a/docs/skills/engineering-team/playwright-pro-review.md b/docs/skills/engineering-team/playwright-pro-review.md index 56ca5d8..10ee28d 100644 --- a/docs/skills/engineering-team/playwright-pro-review.md +++ b/docs/skills/engineering-team/playwright-pro-review.md @@ -76,7 +76,7 @@ For each file: ### Critical - Line 15: `waitForTimeout(2000)` → use `expect(locator).toBeVisible()` -- Line 28: CSS selector `.btn-submit` → `getByRole('button', { name: 'Submit' })` +- Line 28: CSS selector `.btn-submit` → `getByRole('button', { name: "submit" })` ### Warning - Line 42: Test name "test login" → "should redirect to dashboard after login" diff --git a/docs/skills/engineering-team/playwright-pro.md b/docs/skills/engineering-team/playwright-pro.md index 257a418..d2eebbb 100644 --- a/docs/skills/engineering-team/playwright-pro.md +++ b/docs/skills/engineering-team/playwright-pro.md @@ -30,6 +30,45 @@ When installed as a Claude Code plugin, these are available as `/pw:` commands: | `/pw:browserstack` | Run on BrowserStack, pull cross-browser reports | | `/pw:report` | Generate test report in your preferred format | +## Quick Start Workflow + +The recommended sequence for most projects: + +``` +1. /pw:init → scaffolds config, CI pipeline, and a first smoke test +2. /pw:generate → generates tests from your spec or URL +3. /pw:review → validates quality and flags anti-patterns ← always run after generate +4. /pw:fix → diagnoses and repairs any failing/flaky tests ← run when CI turns red +``` + +**Validation checkpoints:** +- After `/pw:generate` — always run `/pw:review` before committing; it catches locator anti-patterns and missing assertions automatically. +- After `/pw:fix` — re-run the full suite locally (`npx playwright test`) to confirm the fix doesn't introduce regressions. +- After `/pw:migrate` — run `/pw:coverage` to confirm parity with the old suite before decommissioning Cypress/Selenium tests. + +### Example: Generate → Review → Fix + +```bash +# 1. Generate tests from a user story +/pw:generate "As a user I can log in with email and password" + +# Generated: tests/auth/login.spec.ts +# → Playwright Pro creates the file using the auth template. + +# 2. Review the generated tests +/pw:review tests/auth/login.spec.ts + +# → Flags: one test used page.locator('input[type=password]') — suggests getByLabel('Password') +# → Fix applied automatically. + +# 3. Run locally to confirm +npx playwright test tests/auth/login.spec.ts --headed + +# 4. If a test is flaky in CI, diagnose it +/pw:fix tests/auth/login.spec.ts +# → Identifies missing web-first assertion; replaces waitForTimeout(2000) with expect(locator).toBeVisible() +``` + ## Golden Rules 1. `getByRole()` over CSS/XPath — resilient to markup changes diff --git a/docs/skills/engineering-team/self-improving-agent-extract.md b/docs/skills/engineering-team/self-improving-agent-extract.md index df95e1b..8c842e8 100644 --- a/docs/skills/engineering-team/self-improving-agent-extract.md +++ b/docs/skills/engineering-team/self-improving-agent-extract.md @@ -81,7 +81,7 @@ The generated SKILL.md must follow this format: ```markdown --- -name: +name: "skill-name" description: ". Use when: ." --- diff --git a/docs/skills/engineering-team/senior-backend.md b/docs/skills/engineering-team/senior-backend.md index 6599d78..6cd62cc 100644 --- a/docs/skills/engineering-team/senior-backend.md +++ b/docs/skills/engineering-team/senior-backend.md @@ -14,20 +14,6 @@ description: "Senior Backend Engineer - Claude Code skill from the Engineering - Backend development patterns, API design, database optimization, and security practices. -## Table of Contents - -- [Quick Start](#quick-start) -- [Tools Overview](#tools-overview) - - [API Scaffolder](#1-api-scaffolder) - - [Database Migration Tool](#2-database-migration-tool) - - [API Load Tester](#3-api-load-tester) -- [Backend Development Workflows](#backend-development-workflows) - - [API Design Workflow](#api-design-workflow) - - [Database Optimization Workflow](#database-optimization-workflow) - - [Security Hardening Workflow](#security-hardening-workflow) -- [Reference Documentation](#reference-documentation) -- [Common Patterns Quick Reference](#common-patterns-quick-reference) - --- ## Quick Start @@ -58,17 +44,7 @@ Generates API route handlers, middleware, and OpenAPI specifications from schema ```bash # Generate Express routes from OpenAPI spec python scripts/api_scaffolder.py openapi.yaml --framework express --output src/routes/ - -# Output: -# Generated 12 route handlers in src/routes/ -# - GET /users (listUsers) -# - POST /users (createUser) -# - GET /users/{id} (getUser) -# - PUT /users/{id} (updateUser) -# - DELETE /users/{id} (deleteUser) -# ... -# Created validation middleware: src/middleware/validators.ts -# Created TypeScript types: src/types/api.ts +# Output: Generated 12 route handlers, validation middleware, and TypeScript types # Generate from database schema python scripts/api_scaffolder.py --from-db postgres://localhost/mydb --output src/routes/ @@ -95,32 +71,12 @@ Analyzes database schemas, detects changes, and generates migration files with r ```bash # Analyze current schema and suggest optimizations python scripts/database_migration_tool.py --connection postgres://localhost/mydb --analyze - -# Output: -# === Database Analysis Report === -# Tables: 24 -# Total rows: 1,247,832 -# -# MISSING INDEXES (5 found): -# orders.user_id - 847ms avg query time, ADD INDEX recommended -# products.category_id - 234ms avg query time, ADD INDEX recommended -# -# N+1 QUERY RISKS (3 found): -# users -> orders relationship (no eager loading) -# -# SUGGESTED MIGRATIONS: -# 1. Add index on orders(user_id) -# 2. Add index on products(category_id) -# 3. Add composite index on order_items(order_id, product_id) +# Output: Missing indexes, N+1 query risks, and suggested migration files # Generate migration from schema diff python scripts/database_migration_tool.py --connection postgres://localhost/mydb \ --compare schema/v2.sql --output migrations/ -# Output: -# Generated migration: migrations/20240115_add_user_indexes.sql -# Generated rollback: migrations/20240115_add_user_indexes_rollback.sql - # Dry-run a migration python scripts/database_migration_tool.py --connection postgres://localhost/mydb \ --migrate migrations/20240115_add_user_indexes.sql --dry-run @@ -139,32 +95,7 @@ Performs HTTP load testing with configurable concurrency, measuring latency perc ```bash # Basic load test python scripts/api_load_tester.py https://api.example.com/users --concurrency 50 --duration 30 - -# Output: -# === Load Test Results === -# Target: https://api.example.com/users -# Duration: 30s | Concurrency: 50 -# -# THROUGHPUT: -# Total requests: 15,247 -# Requests/sec: 508.2 -# Successful: 15,102 (99.0%) -# Failed: 145 (1.0%) -# -# LATENCY (ms): -# Min: 12 -# Avg: 89 -# P50: 67 -# P95: 198 -# P99: 423 -# Max: 1,247 -# -# ERRORS: -# Connection timeout: 89 -# HTTP 503: 56 -# -# RECOMMENDATION: P99 latency (423ms) exceeds 200ms target. -# Consider: connection pooling, query optimization, or horizontal scaling. +# Output: Throughput (req/sec), latency percentiles (P50/P95/P99), error counts, and scaling recommendations # Test with custom headers and body python scripts/api_load_tester.py https://api.example.com/orders \ @@ -199,7 +130,7 @@ paths: get: summary: List users parameters: - - name: limit + - name: "limit" in: query schema: type: integer @@ -326,7 +257,7 @@ import { z } from 'zod'; const CreateUserSchema = z.object({ email: z.string().email().max(255), - name: z.string().min(1).max(100), + name: "zstringmin1max100" age: z.number().int().positive().optional() }); diff --git a/docs/skills/engineering-team/senior-computer-vision.md b/docs/skills/engineering-team/senior-computer-vision.md index af0b1d9..1c9e457 100644 --- a/docs/skills/engineering-team/senior-computer-vision.md +++ b/docs/skills/engineering-team/senior-computer-vision.md @@ -426,99 +426,7 @@ python scripts/dataset_pipeline_builder.py data/final/ \ | Positional encoding | Implicit | Explicit | ## Reference Documentation - -### 1. Computer Vision Architectures - -See `references/computer_vision_architectures.md` for: - -- CNN backbone architectures (ResNet, EfficientNet, ConvNeXt) -- Vision Transformer variants (ViT, DeiT, Swin) -- Detection heads (anchor-based vs anchor-free) -- Feature Pyramid Networks (FPN, BiFPN, PANet) -- Neck architectures for multi-scale detection - -### 2. Object Detection Optimization - -See `references/object_detection_optimization.md` for: - -- Non-Maximum Suppression variants (NMS, Soft-NMS, DIoU-NMS) -- Anchor optimization and anchor-free alternatives -- Loss function design (focal loss, GIoU, CIoU, DIoU) -- Training strategies (warmup, cosine annealing, EMA) -- Data augmentation for detection (mosaic, mixup, copy-paste) - -### 3. Production Vision Systems - -See `references/production_vision_systems.md` for: - -- ONNX export and optimization -- TensorRT deployment pipeline -- Batch inference optimization -- Edge device deployment (Jetson, Intel NCS) -- Model serving with Triton -- Video processing pipelines - -## Common Commands - -### Ultralytics YOLO - -```bash -# Training -yolo detect train data=coco.yaml model=yolov8m.pt epochs=100 imgsz=640 - -# Validation -yolo detect val model=best.pt data=coco.yaml - -# Inference -yolo detect predict model=best.pt source=images/ save=True - -# Export -yolo export model=best.pt format=onnx simplify=True dynamic=True -``` - -### Detectron2 - -```bash -# Training -python train_net.py --config-file configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml \ - --num-gpus 1 OUTPUT_DIR ./output - -# Evaluation -python train_net.py --config-file configs/faster_rcnn.yaml --eval-only \ - MODEL.WEIGHTS output/model_final.pth - -# Inference -python demo.py --config-file configs/faster_rcnn.yaml \ - --input images/*.jpg --output results/ \ - --opts MODEL.WEIGHTS output/model_final.pth -``` - -### MMDetection - -```bash -# Training -python tools/train.py configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py - -# Testing -python tools/test.py configs/faster_rcnn.py checkpoints/latest.pth --eval bbox - -# Inference -python demo/image_demo.py demo.jpg configs/faster_rcnn.py checkpoints/latest.pth -``` - -### Model Optimization - -```bash -# ONNX export and simplify -python -c "import torch; model = torch.load('model.pt'); torch.onnx.export(model, torch.randn(1,3,640,640), 'model.onnx', opset_version=17)" -python -m onnxsim model.onnx model_sim.onnx - -# TensorRT conversion -trtexec --onnx=model.onnx --saveEngine=model.engine --fp16 --workspace=4096 - -# Benchmark -trtexec --loadEngine=model.engine --batch=1 --iterations=1000 --avgRuns=100 -``` +→ See references/reference-docs-and-commands.md for details ## Performance Targets diff --git a/docs/skills/engineering-team/senior-data-engineer.md b/docs/skills/engineering-team/senior-data-engineer.md index df24fd4..6eac996 100644 --- a/docs/skills/engineering-team/senior-data-engineer.md +++ b/docs/skills/engineering-team/senior-data-engineer.md @@ -93,627 +93,7 @@ python scripts/etl_performance_optimizer.py analyze \ --- ## Workflows - -### Workflow 1: Building a Batch ETL Pipeline - -**Scenario:** Extract data from PostgreSQL, transform with dbt, load to Snowflake. - -#### Step 1: Define Source Schema - -```sql --- Document source tables -SELECT - table_name, - column_name, - data_type, - is_nullable -FROM information_schema.columns -WHERE table_schema = 'source_schema' -ORDER BY table_name, ordinal_position; -``` - -#### Step 2: Generate Extraction Config - -```bash -python scripts/pipeline_orchestrator.py generate \ - --type airflow \ - --source postgres \ - --tables orders,customers,products \ - --mode incremental \ - --watermark updated_at \ - --output dags/extract_source.py -``` - -#### Step 3: Create dbt Models - -```sql --- models/staging/stg_orders.sql -WITH source AS ( - SELECT * FROM {{ source('postgres', 'orders') }} -), - -renamed AS ( - SELECT - order_id, - customer_id, - order_date, - total_amount, - status, - _extracted_at - FROM source - WHERE order_date >= DATEADD(day, -3, CURRENT_DATE) -) - -SELECT * FROM renamed -``` - -```sql --- models/marts/fct_orders.sql -{{ - config( - materialized='incremental', - unique_key='order_id', - cluster_by=['order_date'] - ) -}} - -SELECT - o.order_id, - o.customer_id, - c.customer_segment, - o.order_date, - o.total_amount, - o.status -FROM {{ ref('stg_orders') }} o -LEFT JOIN {{ ref('dim_customers') }} c - ON o.customer_id = c.customer_id - -{% if is_incremental() %} -WHERE o._extracted_at > (SELECT MAX(_extracted_at) FROM {{ this }}) -{% endif %} -``` - -#### Step 4: Configure Data Quality Tests - -```yaml -# models/marts/schema.yml -version: 2 - -models: - - name: fct_orders - description: "Order fact table" - columns: - - name: order_id - tests: - - unique - - not_null - - name: total_amount - tests: - - not_null - - dbt_utils.accepted_range: - min_value: 0 - max_value: 1000000 - - name: order_date - tests: - - not_null - - dbt_utils.recency: - datepart: day - field: order_date - interval: 1 -``` - -#### Step 5: Create Airflow DAG - -```python -# dags/daily_etl.py -from airflow import DAG -from airflow.providers.postgres.operators.postgres import PostgresOperator -from airflow.operators.bash import BashOperator -from airflow.utils.dates import days_ago -from datetime import timedelta - -default_args = { - 'owner': 'data-team', - 'depends_on_past': False, - 'email_on_failure': True, - 'email': ['data-alerts@company.com'], - 'retries': 2, - 'retry_delay': timedelta(minutes=5), -} - -with DAG( - 'daily_etl_pipeline', - default_args=default_args, - description='Daily ETL from PostgreSQL to Snowflake', - schedule_interval='0 5 * * *', - start_date=days_ago(1), - catchup=False, - tags=['etl', 'daily'], -) as dag: - - extract = BashOperator( - task_id='extract_source_data', - bash_command='python /opt/airflow/scripts/extract.py --date {{ ds }}', - ) - - transform = BashOperator( - task_id='run_dbt_models', - bash_command='cd /opt/airflow/dbt && dbt run --select marts.*', - ) - - test = BashOperator( - task_id='run_dbt_tests', - bash_command='cd /opt/airflow/dbt && dbt test --select marts.*', - ) - - notify = BashOperator( - task_id='send_notification', - bash_command='python /opt/airflow/scripts/notify.py --status success', - trigger_rule='all_success', - ) - - extract >> transform >> test >> notify -``` - -#### Step 6: Validate Pipeline - -```bash -# Test locally -dbt run --select stg_orders fct_orders -dbt test --select fct_orders - -# Validate data quality -python scripts/data_quality_validator.py validate \ - --table fct_orders \ - --checks all \ - --output reports/quality_report.json -``` - ---- - -### Workflow 2: Implementing Real-Time Streaming - -**Scenario:** Stream events from Kafka, process with Flink/Spark Streaming, sink to data lake. - -#### Step 1: Define Event Schema - -```json -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "UserEvent", - "type": "object", - "required": ["event_id", "user_id", "event_type", "timestamp"], - "properties": { - "event_id": {"type": "string", "format": "uuid"}, - "user_id": {"type": "string"}, - "event_type": {"type": "string", "enum": ["page_view", "click", "purchase"]}, - "timestamp": {"type": "string", "format": "date-time"}, - "properties": {"type": "object"} - } -} -``` - -#### Step 2: Create Kafka Topic - -```bash -# Create topic with appropriate partitions -kafka-topics.sh --create \ - --bootstrap-server localhost:9092 \ - --topic user-events \ - --partitions 12 \ - --replication-factor 3 \ - --config retention.ms=604800000 \ - --config cleanup.policy=delete - -# Verify topic -kafka-topics.sh --describe \ - --bootstrap-server localhost:9092 \ - --topic user-events -``` - -#### Step 3: Implement Spark Streaming Job - -```python -# streaming/user_events_processor.py -from pyspark.sql import SparkSession -from pyspark.sql.functions import ( - from_json, col, window, count, avg, - to_timestamp, current_timestamp -) -from pyspark.sql.types import ( - StructType, StructField, StringType, - TimestampType, MapType -) - -# Initialize Spark -spark = SparkSession.builder \ - .appName("UserEventsProcessor") \ - .config("spark.sql.streaming.checkpointLocation", "/checkpoints/user-events") \ - .config("spark.sql.shuffle.partitions", "12") \ - .getOrCreate() - -# Define schema -event_schema = StructType([ - StructField("event_id", StringType(), False), - StructField("user_id", StringType(), False), - StructField("event_type", StringType(), False), - StructField("timestamp", StringType(), False), - StructField("properties", MapType(StringType(), StringType()), True) -]) - -# Read from Kafka -events_df = spark.readStream \ - .format("kafka") \ - .option("kafka.bootstrap.servers", "localhost:9092") \ - .option("subscribe", "user-events") \ - .option("startingOffsets", "latest") \ - .option("failOnDataLoss", "false") \ - .load() - -# Parse JSON -parsed_df = events_df \ - .select(from_json(col("value").cast("string"), event_schema).alias("data")) \ - .select("data.*") \ - .withColumn("event_timestamp", to_timestamp(col("timestamp"))) - -# Windowed aggregation -aggregated_df = parsed_df \ - .withWatermark("event_timestamp", "10 minutes") \ - .groupBy( - window(col("event_timestamp"), "5 minutes"), - col("event_type") - ) \ - .agg( - count("*").alias("event_count"), - approx_count_distinct("user_id").alias("unique_users") - ) - -# Write to Delta Lake -query = aggregated_df.writeStream \ - .format("delta") \ - .outputMode("append") \ - .option("checkpointLocation", "/checkpoints/user-events-aggregated") \ - .option("path", "/data/lake/user_events_aggregated") \ - .trigger(processingTime="1 minute") \ - .start() - -query.awaitTermination() -``` - -#### Step 4: Handle Late Data and Errors - -```python -# Dead letter queue for failed records -from pyspark.sql.functions import current_timestamp, lit - -def process_with_error_handling(batch_df, batch_id): - try: - # Attempt processing - valid_df = batch_df.filter(col("event_id").isNotNull()) - invalid_df = batch_df.filter(col("event_id").isNull()) - - # Write valid records - valid_df.write \ - .format("delta") \ - .mode("append") \ - .save("/data/lake/user_events") - - # Write invalid to DLQ - if invalid_df.count() > 0: - invalid_df \ - .withColumn("error_timestamp", current_timestamp()) \ - .withColumn("error_reason", lit("missing_event_id")) \ - .write \ - .format("delta") \ - .mode("append") \ - .save("/data/lake/dlq/user_events") - - except Exception as e: - # Log error, alert, continue - logger.error(f"Batch {batch_id} failed: {e}") - raise - -# Use foreachBatch for custom processing -query = parsed_df.writeStream \ - .foreachBatch(process_with_error_handling) \ - .option("checkpointLocation", "/checkpoints/user-events") \ - .start() -``` - -#### Step 5: Monitor Stream Health - -```python -# monitoring/stream_metrics.py -from prometheus_client import Gauge, Counter, start_http_server - -# Define metrics -RECORDS_PROCESSED = Counter( - 'stream_records_processed_total', - 'Total records processed', - ['stream_name', 'status'] -) - -PROCESSING_LAG = Gauge( - 'stream_processing_lag_seconds', - 'Current processing lag', - ['stream_name'] -) - -BATCH_DURATION = Gauge( - 'stream_batch_duration_seconds', - 'Last batch processing duration', - ['stream_name'] -) - -def emit_metrics(query): - """Emit Prometheus metrics from streaming query.""" - progress = query.lastProgress - if progress: - RECORDS_PROCESSED.labels( - stream_name='user-events', - status='success' - ).inc(progress['numInputRows']) - - if progress['sources']: - # Calculate lag from latest offset - for source in progress['sources']: - end_offset = source.get('endOffset', {}) - # Parse Kafka offsets and calculate lag -``` - ---- - -### Workflow 3: Data Quality Framework Setup - -**Scenario:** Implement comprehensive data quality monitoring with Great Expectations. - -#### Step 1: Initialize Great Expectations - -```bash -# Install and initialize -pip install great_expectations - -great_expectations init - -# Connect to data source -great_expectations datasource new -``` - -#### Step 2: Create Expectation Suite - -```python -# expectations/orders_suite.py -import great_expectations as gx - -context = gx.get_context() - -# Create expectation suite -suite = context.add_expectation_suite("orders_quality_suite") - -# Add expectations -validator = context.get_validator( - batch_request={ - "datasource_name": "warehouse", - "data_asset_name": "orders", - }, - expectation_suite_name="orders_quality_suite" -) - -# Schema expectations -validator.expect_table_columns_to_match_ordered_list( - column_list=[ - "order_id", "customer_id", "order_date", - "total_amount", "status", "created_at" - ] -) - -# Completeness expectations -validator.expect_column_values_to_not_be_null("order_id") -validator.expect_column_values_to_not_be_null("customer_id") -validator.expect_column_values_to_not_be_null("order_date") - -# Uniqueness expectations -validator.expect_column_values_to_be_unique("order_id") - -# Range expectations -validator.expect_column_values_to_be_between( - "total_amount", - min_value=0, - max_value=1000000 -) - -# Categorical expectations -validator.expect_column_values_to_be_in_set( - "status", - ["pending", "confirmed", "shipped", "delivered", "cancelled"] -) - -# Freshness expectation -validator.expect_column_max_to_be_between( - "order_date", - min_value={"$PARAMETER": "now - timedelta(days=1)"}, - max_value={"$PARAMETER": "now"} -) - -# Referential integrity -validator.expect_column_values_to_be_in_set( - "customer_id", - value_set={"$PARAMETER": "valid_customer_ids"} -) - -validator.save_expectation_suite(discard_failed_expectations=False) -``` - -#### Step 3: Create Data Quality Checks with dbt - -```yaml -# models/marts/schema.yml -version: 2 - -models: - - name: fct_orders - description: "Order fact table with data quality checks" - - tests: - # Row count check - - dbt_utils.equal_rowcount: - compare_model: ref('stg_orders') - - # Freshness check - - dbt_utils.recency: - datepart: hour - field: created_at - interval: 24 - - columns: - - name: order_id - description: "Unique order identifier" - tests: - - unique - - not_null - - relationships: - to: ref('dim_orders') - field: order_id - - - name: total_amount - tests: - - not_null - - dbt_utils.accepted_range: - min_value: 0 - max_value: 1000000 - inclusive: true - - dbt_expectations.expect_column_values_to_be_between: - min_value: 0 - row_condition: "status != 'cancelled'" - - - name: customer_id - tests: - - not_null - - relationships: - to: ref('dim_customers') - field: customer_id - severity: warn -``` - -#### Step 4: Implement Data Contracts - -```yaml -# contracts/orders_contract.yaml -contract: - name: orders_data_contract - version: "1.0.0" - owner: data-team@company.com - -schema: - type: object - properties: - order_id: - type: string - format: uuid - description: "Unique order identifier" - customer_id: - type: string - not_null: true - order_date: - type: date - not_null: true - total_amount: - type: decimal - precision: 10 - scale: 2 - minimum: 0 - status: - type: string - enum: ["pending", "confirmed", "shipped", "delivered", "cancelled"] - -sla: - freshness: - max_delay_hours: 1 - completeness: - min_percentage: 99.9 - accuracy: - duplicate_tolerance: 0.01 - -consumers: - - name: analytics-team - usage: "Daily reporting dashboards" - - name: ml-team - usage: "Churn prediction model" -``` - -#### Step 5: Set Up Quality Monitoring Dashboard - -```python -# monitoring/quality_dashboard.py -from datetime import datetime, timedelta -import pandas as pd - -def generate_quality_report(connection, table_name: str) -> dict: - """Generate comprehensive data quality report.""" - - report = { - "table": table_name, - "timestamp": datetime.now().isoformat(), - "checks": {} - } - - # Row count check - row_count = connection.execute( - f"SELECT COUNT(*) FROM {table_name}" - ).fetchone()[0] - report["checks"]["row_count"] = { - "value": row_count, - "status": "pass" if row_count > 0 else "fail" - } - - # Freshness check - max_date = connection.execute( - f"SELECT MAX(created_at) FROM {table_name}" - ).fetchone()[0] - hours_old = (datetime.now() - max_date).total_seconds() / 3600 - report["checks"]["freshness"] = { - "max_timestamp": max_date.isoformat(), - "hours_old": round(hours_old, 2), - "status": "pass" if hours_old < 24 else "fail" - } - - # Null rate check - null_query = f""" - SELECT - SUM(CASE WHEN order_id IS NULL THEN 1 ELSE 0 END) as null_order_id, - SUM(CASE WHEN customer_id IS NULL THEN 1 ELSE 0 END) as null_customer_id, - COUNT(*) as total - FROM {table_name} - """ - null_result = connection.execute(null_query).fetchone() - report["checks"]["null_rates"] = { - "order_id": null_result[0] / null_result[2] if null_result[2] > 0 else 0, - "customer_id": null_result[1] / null_result[2] if null_result[2] > 0 else 0, - "status": "pass" if null_result[0] == 0 and null_result[1] == 0 else "fail" - } - - # Duplicate check - dup_query = f""" - SELECT COUNT(*) - COUNT(DISTINCT order_id) as duplicates - FROM {table_name} - """ - duplicates = connection.execute(dup_query).fetchone()[0] - report["checks"]["duplicates"] = { - "count": duplicates, - "status": "pass" if duplicates == 0 else "fail" - } - - # Overall status - all_passed = all( - check["status"] == "pass" - for check in report["checks"].values() - ) - report["overall_status"] = "pass" if all_passed else "fail" - - return report -``` - ---- +→ See references/workflows.md for details ## Architecture Decision Framework @@ -817,183 +197,5 @@ See `references/dataops_best_practices.md` for: --- ## Troubleshooting +→ See references/troubleshooting.md for details -### Pipeline Failures - -**Symptom:** Airflow DAG fails with timeout -``` -Task exceeded max execution time -``` - -**Solution:** -1. Check resource allocation -2. Profile slow operations -3. Add incremental processing -```python -# Increase timeout -default_args = { - 'execution_timeout': timedelta(hours=2), -} - -# Or use incremental loads -WHERE updated_at > '{{ prev_ds }}' -``` - ---- - -**Symptom:** Spark job OOM -``` -java.lang.OutOfMemoryError: Java heap space -``` - -**Solution:** -1. Increase executor memory -2. Reduce partition size -3. Use disk spill -```python -spark.conf.set("spark.executor.memory", "8g") -spark.conf.set("spark.sql.shuffle.partitions", "200") -spark.conf.set("spark.memory.fraction", "0.8") -``` - ---- - -**Symptom:** Kafka consumer lag increasing -``` -Consumer lag: 1000000 messages -``` - -**Solution:** -1. Increase consumer parallelism -2. Optimize processing logic -3. Scale consumer group -```bash -# Add more partitions -kafka-topics.sh --alter \ - --bootstrap-server localhost:9092 \ - --topic user-events \ - --partitions 24 -``` - ---- - -### Data Quality Issues - -**Symptom:** Duplicate records appearing -``` -Expected unique, found 150 duplicates -``` - -**Solution:** -1. Add deduplication logic -2. Use merge/upsert operations -```sql --- dbt incremental with dedup -{{ - config( - materialized='incremental', - unique_key='order_id' - ) -}} - -SELECT * FROM ( - SELECT - *, - ROW_NUMBER() OVER ( - PARTITION BY order_id - ORDER BY updated_at DESC - ) as rn - FROM {{ source('raw', 'orders') }} -) WHERE rn = 1 -``` - ---- - -**Symptom:** Stale data in tables -``` -Last update: 3 days ago -``` - -**Solution:** -1. Check upstream pipeline status -2. Verify source availability -3. Add freshness monitoring -```yaml -# dbt freshness check -sources: - - name: raw - freshness: - warn_after: {count: 12, period: hour} - error_after: {count: 24, period: hour} - loaded_at_field: _loaded_at -``` - ---- - -**Symptom:** Schema drift detected -``` -Column 'new_field' not in expected schema -``` - -**Solution:** -1. Update data contract -2. Modify transformations -3. Communicate with producers -```python -# Handle schema evolution -df = spark.read.format("delta") \ - .option("mergeSchema", "true") \ - .load("/data/orders") -``` - ---- - -### Performance Issues - -**Symptom:** Query takes hours -``` -Query runtime: 4 hours (expected: 30 minutes) -``` - -**Solution:** -1. Check query plan -2. Add proper partitioning -3. Optimize joins -```sql --- Before: Full table scan -SELECT * FROM orders WHERE order_date = '2024-01-15'; - --- After: Partition pruning --- Table partitioned by order_date -SELECT * FROM orders WHERE order_date = '2024-01-15'; - --- Add clustering for frequent filters -ALTER TABLE orders CLUSTER BY (customer_id); -``` - ---- - -**Symptom:** dbt model takes too long -``` -Model fct_orders completed in 45 minutes -``` - -**Solution:** -1. Use incremental materialization -2. Reduce upstream dependencies -3. Pre-aggregate where possible -```sql --- Convert to incremental -{{ - config( - materialized='incremental', - unique_key='order_id', - on_schema_change='sync_all_columns' - ) -}} - -SELECT * FROM {{ ref('stg_orders') }} -{% if is_incremental() %} -WHERE _loaded_at > (SELECT MAX(_loaded_at) FROM {{ this }}) -{% endif %} -``` diff --git a/docs/skills/engineering-team/senior-data-scientist.md b/docs/skills/engineering-team/senior-data-scientist.md index 9344016..4854dff 100644 --- a/docs/skills/engineering-team/senior-data-scientist.md +++ b/docs/skills/engineering-team/senior-data-scientist.md @@ -14,170 +14,208 @@ description: "Senior Data Scientist - Claude Code skill from the Engineering - C World-class senior data scientist skill for production-grade AI/ML/Data systems. -## Quick Start +## Core Workflows -### Main Capabilities +### 1. Design an A/B Test -```bash -# Core Tool 1 -python scripts/experiment_designer.py --input data/ --output results/ +```python +import numpy as np +from scipy import stats -# Core Tool 2 -python scripts/feature_engineering_pipeline.py --target project/ --analyze +def calculate_sample_size(baseline_rate, mde, alpha=0.05, power=0.8): + """ + Calculate required sample size per variant. + baseline_rate: current conversion rate (e.g. 0.10) + mde: minimum detectable effect (relative, e.g. 0.05 = 5% lift) + """ + p1 = baseline_rate + p2 = baseline_rate * (1 + mde) + effect_size = abs(p2 - p1) / np.sqrt((p1 * (1 - p1) + p2 * (1 - p2)) / 2) + z_alpha = stats.norm.ppf(1 - alpha / 2) + z_beta = stats.norm.ppf(power) + n = ((z_alpha + z_beta) / effect_size) ** 2 + return int(np.ceil(n)) -# Core Tool 3 -python scripts/model_evaluation_suite.py --config config.yaml --deploy +def analyze_experiment(control, treatment, alpha=0.05): + """ + Run two-proportion z-test and return structured results. + control/treatment: dicts with 'conversions' and 'visitors'. + """ + p_c = control["conversions"] / control["visitors"] + p_t = treatment["conversions"] / treatment["visitors"] + pooled = (control["conversions"] + treatment["conversions"]) / (control["visitors"] + treatment["visitors"]) + se = np.sqrt(pooled * (1 - pooled) * (1 / control["visitors"] + 1 / treatment["visitors"])) + z = (p_t - p_c) / se + p_value = 2 * (1 - stats.norm.cdf(abs(z))) + ci_low = (p_t - p_c) - stats.norm.ppf(1 - alpha / 2) * se + ci_high = (p_t - p_c) + stats.norm.ppf(1 - alpha / 2) * se + return { + "lift": (p_t - p_c) / p_c, + "p_value": p_value, + "significant": p_value < alpha, + "ci_95": (ci_low, ci_high), + } + +# --- Experiment checklist --- +# 1. Define ONE primary metric and pre-register secondary metrics. +# 2. Calculate sample size BEFORE starting: calculate_sample_size(0.10, 0.05) +# 3. Randomise at the user (not session) level to avoid leakage. +# 4. Run for at least 1 full business cycle (typically 2 weeks). +# 5. Check for sample ratio mismatch: abs(n_control - n_treatment) / expected < 0.01 +# 6. Analyze with analyze_experiment() and report lift + CI, not just p-value. +# 7. Apply Bonferroni correction if testing multiple metrics: alpha / n_metrics ``` -## Core Expertise +### 2. Build a Feature Engineering Pipeline -This skill covers world-class capabilities in: +```python +import pandas as pd +import numpy as np +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler, OneHotEncoder +from sklearn.impute import SimpleImputer +from sklearn.compose import ColumnTransformer -- Advanced production patterns and architectures -- Scalable system design and implementation -- Performance optimization at scale -- MLOps and DataOps best practices -- Real-time processing and inference -- Distributed computing frameworks -- Model deployment and monitoring -- Security and compliance -- Cost optimization -- Team leadership and mentoring +def build_feature_pipeline(numeric_cols, categorical_cols, date_cols=None): + """ + Returns a fitted-ready ColumnTransformer for structured tabular data. + """ + numeric_pipeline = Pipeline([ + ("impute", SimpleImputer(strategy="median")), + ("scale", StandardScaler()), + ]) + categorical_pipeline = Pipeline([ + ("impute", SimpleImputer(strategy="most_frequent")), + ("encode", OneHotEncoder(handle_unknown="ignore", sparse_output=False)), + ]) + transformers = [ + ("num", numeric_pipeline, numeric_cols), + ("cat", categorical_pipeline, categorical_cols), + ] + return ColumnTransformer(transformers, remainder="drop") -## Tech Stack +def add_time_features(df, date_col): + """Extract cyclical and lag features from a datetime column.""" + df = df.copy() + df[date_col] = pd.to_datetime(df[date_col]) + df["dow_sin"] = np.sin(2 * np.pi * df[date_col].dt.dayofweek / 7) + df["dow_cos"] = np.cos(2 * np.pi * df[date_col].dt.dayofweek / 7) + df["month_sin"] = np.sin(2 * np.pi * df[date_col].dt.month / 12) + df["month_cos"] = np.cos(2 * np.pi * df[date_col].dt.month / 12) + df["is_weekend"] = (df[date_col].dt.dayofweek >= 5).astype(int) + return df -**Languages:** Python, SQL, R, Scala, Go -**ML Frameworks:** PyTorch, TensorFlow, Scikit-learn, XGBoost -**Data Tools:** Spark, Airflow, dbt, Kafka, Databricks -**LLM Frameworks:** LangChain, LlamaIndex, DSPy -**Deployment:** Docker, Kubernetes, AWS/GCP/Azure -**Monitoring:** MLflow, Weights & Biases, Prometheus -**Databases:** PostgreSQL, BigQuery, Snowflake, Pinecone +# --- Feature engineering checklist --- +# 1. Never fit transformers on the full dataset — fit on train, transform test. +# 2. Log-transform right-skewed numeric features before scaling. +# 3. For high-cardinality categoricals (>50 levels), use target encoding or embeddings. +# 4. Generate lag/rolling features BEFORE the train/test split to avoid leakage. +# 5. Document each feature's business meaning alongside its code. +``` + +### 3. Train, Evaluate, and Select a Prediction Model + +```python +from sklearn.model_selection import StratifiedKFold, cross_validate +from sklearn.metrics import make_scorer, roc_auc_score, average_precision_score +import xgboost as xgb +import mlflow + +SCORERS = { + "roc_auc": make_scorer(roc_auc_score, needs_proba=True), + "avg_prec": make_scorer(average_precision_score, needs_proba=True), +} + +def evaluate_model(model, X, y, cv=5): + """ + Cross-validate and return mean ± std for each scorer. + Use StratifiedKFold for classification to preserve class balance. + """ + cv_results = cross_validate( + model, X, y, + cv=StratifiedKFold(n_splits=cv, shuffle=True, random_state=42), + scoring=SCORERS, + return_train_score=True, + ) + summary = {} + for metric in SCORERS: + test_scores = cv_results[f"test_{metric}"] + summary[metric] = {"mean": test_scores.mean(), "std": test_scores.std()} + # Flag overfitting: large gap between train and test score + train_mean = cv_results[f"train_{metric}"].mean() + summary[metric]["overfit_gap"] = train_mean - test_scores.mean() + return summary + +def train_and_log(model, X_train, y_train, X_test, y_test, run_name): + """Train model and log all artefacts to MLflow.""" + with mlflow.start_run(run_name=run_name): + model.fit(X_train, y_train) + proba = model.predict_proba(X_test)[:, 1] + metrics = { + "roc_auc": roc_auc_score(y_test, proba), + "avg_prec": average_precision_score(y_test, proba), + } + mlflow.log_params(model.get_params()) + mlflow.log_metrics(metrics) + mlflow.sklearn.log_model(model, "model") + return metrics + +# --- Model evaluation checklist --- +# 1. Always report AUC-PR alongside AUC-ROC for imbalanced datasets. +# 2. Check overfit_gap > 0.05 as a warning sign of overfitting. +# 3. Calibrate probabilities (Platt scaling / isotonic) before production use. +# 4. Compute SHAP values to validate feature importance makes business sense. +# 5. Run a baseline (e.g. DummyClassifier) and verify the model beats it. +# 6. Log every run to MLflow — never rely on notebook output for comparison. +``` + +### 4. Causal Inference: Difference-in-Differences + +```python +import statsmodels.formula.api as smf + +def diff_in_diff(df, outcome, treatment_col, post_col, controls=None): + """ + Estimate ATT via OLS DiD with optional covariates. + df must have: outcome, treatment_col (0/1), post_col (0/1). + Returns the interaction coefficient (treatment × post) and its p-value. + """ + covariates = " + ".join(controls) if controls else "" + formula = ( + f"{outcome} ~ {treatment_col} * {post_col}" + + (f" + {covariates}" if covariates else "") + ) + result = smf.ols(formula, data=df).fit(cov_type="HC3") + interaction = f"{treatment_col}:{post_col}" + return { + "att": result.params[interaction], + "p_value": result.pvalues[interaction], + "ci_95": result.conf_int().loc[interaction].tolist(), + "summary": result.summary(), + } + +# --- Causal inference checklist --- +# 1. Validate parallel trends in pre-period before trusting DiD estimates. +# 2. Use HC3 robust standard errors to handle heteroskedasticity. +# 3. For panel data, cluster SEs at the unit level (add groups= param to fit). +# 4. Consider propensity score matching if groups differ at baseline. +# 5. Report the ATT with confidence interval, not just statistical significance. +``` ## Reference Documentation -### 1. Statistical Methods Advanced - -Comprehensive guide available in `references/statistical_methods_advanced.md` covering: - -- Advanced patterns and best practices -- Production implementation strategies -- Performance optimization techniques -- Scalability considerations -- Security and compliance -- Real-world case studies - -### 2. Experiment Design Frameworks - -Complete workflow documentation in `references/experiment_design_frameworks.md` including: - -- Step-by-step processes -- Architecture design patterns -- Tool integration guides -- Performance tuning strategies -- Troubleshooting procedures - -### 3. Feature Engineering Patterns - -Technical reference guide in `references/feature_engineering_patterns.md` with: - -- System design principles -- Implementation examples -- Configuration best practices -- Deployment strategies -- Monitoring and observability - -## Production Patterns - -### Pattern 1: Scalable Data Processing - -Enterprise-scale data processing with distributed computing: - -- Horizontal scaling architecture -- Fault-tolerant design -- Real-time and batch processing -- Data quality validation -- Performance monitoring - -### Pattern 2: ML Model Deployment - -Production ML system with high availability: - -- Model serving with low latency -- A/B testing infrastructure -- Feature store integration -- Model monitoring and drift detection -- Automated retraining pipelines - -### Pattern 3: Real-Time Inference - -High-throughput inference system: - -- Batching and caching strategies -- Load balancing -- Auto-scaling -- Latency optimization -- Cost optimization - -## Best Practices - -### Development - -- Test-driven development -- Code reviews and pair programming -- Documentation as code -- Version control everything -- Continuous integration - -### Production - -- Monitor everything critical -- Automate deployments -- Feature flags for releases -- Canary deployments -- Comprehensive logging - -### Team Leadership - -- Mentor junior engineers -- Drive technical decisions -- Establish coding standards -- Foster learning culture -- Cross-functional collaboration - -## Performance Targets - -**Latency:** -- P50: < 50ms -- P95: < 100ms -- P99: < 200ms - -**Throughput:** -- Requests/second: > 1000 -- Concurrent users: > 10,000 - -**Availability:** -- Uptime: 99.9% -- Error rate: < 0.1% - -## Security & Compliance - -- Authentication & authorization -- Data encryption (at rest & in transit) -- PII handling and anonymization -- GDPR/CCPA compliance -- Regular security audits -- Vulnerability management +- **Statistical Methods:** `references/statistical_methods_advanced.md` +- **Experiment Design Frameworks:** `references/experiment_design_frameworks.md` +- **Feature Engineering Patterns:** `references/feature_engineering_patterns.md` ## Common Commands ```bash -# Development -python -m pytest tests/ -v --cov -python -m black src/ -python -m pylint src/ +# Testing & linting +python -m pytest tests/ -v --cov=src/ +python -m black src/ && python -m pylint src/ -# Training +# Training & evaluation python scripts/train.py --config prod.yaml python scripts/evaluate.py --model best.pth @@ -186,48 +224,7 @@ docker build -t service:v1 . kubectl apply -f k8s/ helm upgrade service ./charts/ -# Monitoring +# Monitoring & health kubectl logs -f deployment/service python scripts/health_check.py ``` - -## Resources - -- Advanced Patterns: `references/statistical_methods_advanced.md` -- Implementation Guide: `references/experiment_design_frameworks.md` -- Technical Reference: `references/feature_engineering_patterns.md` -- Automation Scripts: `scripts/` directory - -## Senior-Level Responsibilities - -As a world-class senior professional: - -1. **Technical Leadership** - - Drive architectural decisions - - Mentor team members - - Establish best practices - - Ensure code quality - -2. **Strategic Thinking** - - Align with business goals - - Evaluate trade-offs - - Plan for scale - - Manage technical debt - -3. **Collaboration** - - Work across teams - - Communicate effectively - - Build consensus - - Share knowledge - -4. **Innovation** - - Stay current with research - - Experiment with new approaches - - Contribute to community - - Drive continuous improvement - -5. **Production Excellence** - - Ensure high availability - - Monitor proactively - - Optimize performance - - Respond to incidents diff --git a/docs/skills/engineering-team/senior-devops.md b/docs/skills/engineering-team/senior-devops.md index f4f878f..7477d37 100644 --- a/docs/skills/engineering-team/senior-devops.md +++ b/docs/skills/engineering-team/senior-devops.md @@ -21,196 +21,262 @@ Complete toolkit for senior devops with modern tools and best practices. This skill provides three core capabilities through automated scripts: ```bash -# Script 1: Pipeline Generator -python scripts/pipeline_generator.py [options] +# Script 1: Pipeline Generator — scaffolds CI/CD pipelines for GitHub Actions or CircleCI +python scripts/pipeline_generator.py ./app --platform=github --stages=build,test,deploy -# Script 2: Terraform Scaffolder -python scripts/terraform_scaffolder.py [options] +# Script 2: Terraform Scaffolder — generates and validates IaC modules for AWS/GCP/Azure +python scripts/terraform_scaffolder.py ./infra --provider=aws --module=ecs-service --verbose -# Script 3: Deployment Manager -python scripts/deployment_manager.py [options] +# Script 3: Deployment Manager — orchestrates container deployments with rollback support +python scripts/deployment_manager.py deploy --env=production --image=app:1.2.3 --strategy=blue-green ``` ## Core Capabilities ### 1. Pipeline Generator -Automated tool for pipeline generator tasks. +Scaffolds CI/CD pipeline configurations for GitHub Actions or CircleCI, with stages for build, test, security scan, and deploy. -**Features:** -- Automated scaffolding -- Best practices built-in -- Configurable templates -- Quality checks +**Example — GitHub Actions workflow:** +```yaml +# .github/workflows/ci.yml +name: CI/CD Pipeline +on: + push: + branches: [main, develop] + pull_request: + branches: [main] + +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + - run: npm ci + - run: npm run lint + - run: npm test -- --coverage + - name: Upload coverage + uses: codecov/codecov-action@v4 + + build-docker: + needs: build-and-test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Build and push image + uses: docker/build-push-action@v5 + with: + push: ${{ github.ref == 'refs/heads/main' }} + tags: ghcr.io/${{ github.repository }}:${{ github.sha }} + + deploy: + needs: build-docker + if: github.ref == 'refs/heads/main' + runs-on: ubuntu-latest + steps: + - name: Deploy to ECS + run: | + aws ecs update-service \ + --cluster production \ + --service app-service \ + --force-new-deployment +``` **Usage:** ```bash -python scripts/pipeline_generator.py [options] +python scripts/pipeline_generator.py --platform=github|circleci --stages=build,test,deploy ``` ### 2. Terraform Scaffolder -Comprehensive analysis and optimization tool. +Generates, validates, and plans Terraform modules. Enforces consistent module structure and runs `terraform validate` + `terraform plan` before any apply. -**Features:** -- Deep analysis -- Performance metrics -- Recommendations -- Automated fixes +**Example — AWS ECS service module:** +```hcl +# modules/ecs-service/main.tf +resource "aws_ecs_task_definition" "app" { + family = var.service_name + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = var.cpu + memory = var.memory + + container_definitions = jsonencode([{ + name = var.service_name + image = var.container_image + essential = true + portMappings = [{ + containerPort = var.container_port + protocol = "tcp" + }] + environment = [for k, v in var.env_vars : { name = k, value = v }] + logConfiguration = { + logDriver = "awslogs" + options = { + awslogs-group = "/ecs/${var.service_name}" + awslogs-region = var.aws_region + awslogs-stream-prefix = "ecs" + } + } + }]) +} + +resource "aws_ecs_service" "app" { + name = var.service_name + cluster = var.cluster_id + task_definition = aws_ecs_task_definition.app.arn + desired_count = var.desired_count + launch_type = "FARGATE" + + network_configuration { + subnets = var.private_subnet_ids + security_groups = [aws_security_group.app.id] + assign_public_ip = false + } + + load_balancer { + target_group_arn = aws_lb_target_group.app.arn + container_name = var.service_name + container_port = var.container_port + } +} +``` **Usage:** ```bash -python scripts/terraform_scaffolder.py [--verbose] +python scripts/terraform_scaffolder.py --provider=aws|gcp|azure --module=ecs-service|gke-deployment|aks-service [--verbose] ``` ### 3. Deployment Manager -Advanced tooling for specialized tasks. +Orchestrates deployments with blue/green or rolling strategies, health-check gates, and automatic rollback on failure. -**Features:** -- Expert-level automation -- Custom configurations -- Integration ready -- Production-grade output +**Example — Kubernetes blue/green deployment (blue-slot specific elements):** +```yaml +# k8s/deployment-blue.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-blue + labels: + app: myapp + slot: blue # slot label distinguishes blue from green +spec: + replicas: 3 + selector: + matchLabels: + app: myapp + slot: blue + template: + metadata: + labels: + app: myapp + slot: blue + spec: + containers: + - name: app + image: ghcr.io/org/app:1.2.3 + readinessProbe: # gate: pod must pass before traffic switches + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + resources: + requests: + cpu: "250m" + memory: "256Mi" + limits: + cpu: "500m" + memory: "512Mi" +``` **Usage:** ```bash -python scripts/deployment_manager.py [arguments] [options] +python scripts/deployment_manager.py deploy \ + --env=staging|production \ + --image=app:1.2.3 \ + --strategy=blue-green|rolling \ + --health-check-url=https://app.example.com/healthz + +python scripts/deployment_manager.py rollback --env=production --to-version=1.2.2 +python scripts/deployment_manager.py --analyze --env=production # audit current state ``` -## Reference Documentation +## Resources -### Cicd Pipeline Guide - -Comprehensive guide available in `references/cicd_pipeline_guide.md`: - -- Detailed patterns and practices -- Code examples -- Best practices -- Anti-patterns to avoid -- Real-world scenarios - -### Infrastructure As Code - -Complete workflow documentation in `references/infrastructure_as_code.md`: - -- Step-by-step processes -- Optimization strategies -- Tool integrations -- Performance tuning -- Troubleshooting guide - -### Deployment Strategies - -Technical reference guide in `references/deployment_strategies.md`: - -- Technology stack details -- Configuration examples -- Integration patterns -- Security considerations -- Scalability guidelines - -## Tech Stack - -**Languages:** TypeScript, JavaScript, Python, Go, Swift, Kotlin -**Frontend:** React, Next.js, React Native, Flutter -**Backend:** Node.js, Express, GraphQL, REST APIs -**Database:** PostgreSQL, Prisma, NeonDB, Supabase -**DevOps:** Docker, Kubernetes, Terraform, GitHub Actions, CircleCI -**Cloud:** AWS, GCP, Azure +- Pattern Reference: `references/cicd_pipeline_guide.md` — detailed CI/CD patterns, best practices, anti-patterns +- Workflow Guide: `references/infrastructure_as_code.md` — IaC step-by-step processes, optimization, troubleshooting +- Technical Guide: `references/deployment_strategies.md` — deployment strategy configs, security considerations, scalability +- Tool Scripts: `scripts/` directory ## Development Workflow -### 1. Setup and Configuration +### 1. Infrastructure Changes (Terraform) ```bash -# Install dependencies -npm install -# or -pip install -r requirements.txt +# Scaffold or update module +python scripts/terraform_scaffolder.py ./infra --provider=aws --module=ecs-service --verbose -# Configure environment -cp .env.example .env +# Validate and plan — review diff before applying +terraform -chdir=infra init +terraform -chdir=infra validate +terraform -chdir=infra plan -out=tfplan + +# Apply only after plan review +terraform -chdir=infra apply tfplan + +# Verify resources are healthy +aws ecs describe-services --cluster production --services app-service \ + --query 'services[0].{Status:status,Running:runningCount,Desired:desiredCount}' ``` -### 2. Run Quality Checks +### 2. Application Deployment ```bash -# Use the analyzer script -python scripts/terraform_scaffolder.py . +# Generate or update pipeline config +python scripts/pipeline_generator.py . --platform=github --stages=build,test,security,deploy -# Review recommendations -# Apply fixes +# Build and tag image +docker build -t ghcr.io/org/app:$(git rev-parse --short HEAD) . +docker push ghcr.io/org/app:$(git rev-parse --short HEAD) + +# Deploy with health-check gate +python scripts/deployment_manager.py deploy \ + --env=production \ + --image=app:$(git rev-parse --short HEAD) \ + --strategy=blue-green \ + --health-check-url=https://app.example.com/healthz + +# Verify pods are running +kubectl get pods -n production -l app=myapp +kubectl rollout status deployment/app-blue -n production + +# Switch traffic after verification +kubectl patch service app-svc -n production \ + -p '{"spec":{"selector":{"slot":"blue"}}}' ``` -### 3. Implement Best Practices - -Follow the patterns and practices documented in: -- `references/cicd_pipeline_guide.md` -- `references/infrastructure_as_code.md` -- `references/deployment_strategies.md` - -## Best Practices Summary - -### Code Quality -- Follow established patterns -- Write comprehensive tests -- Document decisions -- Review regularly - -### Performance -- Measure before optimizing -- Use appropriate caching -- Optimize critical paths -- Monitor in production - -### Security -- Validate all inputs -- Use parameterized queries -- Implement proper authentication -- Keep dependencies updated - -### Maintainability -- Write clear code -- Use consistent naming -- Add helpful comments -- Keep it simple - -## Common Commands +### 3. Rollback Procedure ```bash -# Development -npm run dev -npm run build -npm run test -npm run lint +# Immediate rollback via deployment manager +python scripts/deployment_manager.py rollback --env=production --to-version=1.2.2 -# Analysis -python scripts/terraform_scaffolder.py . -python scripts/deployment_manager.py --analyze +# Or via kubectl +kubectl rollout undo deployment/app -n production +kubectl rollout status deployment/app -n production -# Deployment -docker build -t app:latest . -docker-compose up -d -kubectl apply -f k8s/ +# Verify rollback succeeded +kubectl get pods -n production -l app=myapp +curl -sf https://app.example.com/healthz || echo "ROLLBACK FAILED — escalate" ``` ## Troubleshooting -### Common Issues - Check the comprehensive troubleshooting section in `references/deployment_strategies.md`. - -### Getting Help - -- Review reference documentation -- Check script output messages -- Consult tech stack documentation -- Review error logs - -## Resources - -- Pattern Reference: `references/cicd_pipeline_guide.md` -- Workflow Guide: `references/infrastructure_as_code.md` -- Technical Guide: `references/deployment_strategies.md` -- Tool Scripts: `scripts/` directory diff --git a/docs/skills/engineering-team/senior-frontend.md b/docs/skills/engineering-team/senior-frontend.md index 0a3f961..5bf4f5c 100644 --- a/docs/skills/engineering-team/senior-frontend.md +++ b/docs/skills/engineering-team/senior-frontend.md @@ -429,7 +429,7 @@ test('dialog is accessible', async () => { // next.config.js const nextConfig = { images: { - remotePatterns: [{ hostname: 'cdn.example.com' }], + remotePatterns: [{ hostname: "cdnexamplecom" }], formats: ['image/avif', 'image/webp'], }, experimental: { diff --git a/docs/skills/engineering-team/senior-qa.md b/docs/skills/engineering-team/senior-qa.md index 42f13ce..bf48622 100644 --- a/docs/skills/engineering-team/senior-qa.md +++ b/docs/skills/engineering-team/senior-qa.md @@ -14,20 +14,6 @@ description: "Senior QA Engineer - Claude Code skill from the Engineering - Core Test automation, coverage analysis, and quality assurance patterns for React and Next.js applications. -## Table of Contents - -- [Quick Start](#quick-start) -- [Tools Overview](#tools-overview) - - [Test Suite Generator](#1-test-suite-generator) - - [Coverage Analyzer](#2-coverage-analyzer) - - [E2E Test Scaffolder](#3-e2e-test-scaffolder) -- [QA Workflows](#qa-workflows) - - [Unit Test Generation Workflow](#unit-test-generation-workflow) - - [Coverage Analysis Workflow](#coverage-analysis-workflow) - - [E2E Test Setup Workflow](#e2e-test-setup-workflow) -- [Reference Documentation](#reference-documentation) -- [Common Patterns Quick Reference](#common-patterns-quick-reference) - --- ## Quick Start @@ -59,18 +45,6 @@ Scans React/TypeScript components and generates Jest + React Testing Library tes # Basic usage - scan components and generate tests python scripts/test_suite_generator.py src/components/ --output __tests__/ -# Output: -# Scanning: src/components/ -# Found 24 React components -# -# Generated tests: -# __tests__/Button.test.tsx (render, click handler, disabled state) -# __tests__/Modal.test.tsx (render, open/close, keyboard events) -# __tests__/Form.test.tsx (render, validation, submission) -# ... -# -# Summary: 24 test files, 87 test cases - # Include accessibility tests python scripts/test_suite_generator.py src/ --output __tests__/ --include-a11y @@ -98,29 +72,6 @@ Parses Jest/Istanbul coverage reports and identifies gaps, uncovered branches, a # Analyze coverage report python scripts/coverage_analyzer.py coverage/coverage-final.json -# Output: -# === Coverage Analysis Report === -# Overall: 72.4% (target: 80%) -# -# BY TYPE: -# Statements: 74.2% -# Branches: 68.1% -# Functions: 71.8% -# Lines: 73.5% -# -# CRITICAL GAPS (uncovered business logic): -# src/services/payment.ts:45-67 - Payment processing -# src/hooks/useAuth.ts:23-41 - Authentication flow -# -# RECOMMENDATIONS: -# 1. Add tests for payment service error handling -# 2. Cover authentication edge cases -# 3. Test form validation branches -# -# Files below threshold (80%): -# src/components/Checkout.tsx: 45% -# src/services/api.ts: 62% - # Enforce threshold (exit 1 if below) python scripts/coverage_analyzer.py coverage/ --threshold 80 --strict @@ -142,21 +93,6 @@ Scans Next.js pages/app directory and generates Playwright test files with commo # Scaffold E2E tests for Next.js App Router python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ -# Output: -# Scanning: src/app/ -# Found 12 routes -# -# Generated E2E tests: -# e2e/home.spec.ts (navigation, hero section) -# e2e/auth/login.spec.ts (form submission, validation) -# e2e/auth/register.spec.ts (registration flow) -# e2e/dashboard.spec.ts (authenticated routes) -# e2e/products/[id].spec.ts (dynamic routes) -# ... -# -# Generated: playwright.config.ts -# Generated: e2e/fixtures/auth.ts - # Include Page Object Model classes python scripts/e2e_test_scaffolder.py src/app/ --output e2e/ --include-pom @@ -191,7 +127,7 @@ import { Button } from '../src/components/Button'; describe('Button', () => { it('renders with label', () => { render(); - expect(screen.getByRole('button', { name: /click me/i })).toBeInTheDocument(); + expect(screen.getByRole('button', { name: "click-mei-tobeinthedocument" }); it('calls onClick when clicked', () => { @@ -285,12 +221,12 @@ npx playwright show-report **Step 5: Add to CI pipeline** ```yaml # .github/workflows/e2e.yml -- name: Run E2E tests +- name: "run-e2e-tests" run: npx playwright test -- name: Upload report +- name: "upload-report" uses: actions/upload-artifact@v3 with: - name: playwright-report + name: "playwright-report" path: playwright-report/ ``` @@ -312,7 +248,7 @@ npx playwright show-report ```typescript // Preferred (accessible) -screen.getByRole('button', { name: /submit/i }) +screen.getByRole('button', { name: "submiti" screen.getByLabelText(/email/i) screen.getByPlaceholderText(/search/i) @@ -343,7 +279,7 @@ import { setupServer } from 'msw/node'; const server = setupServer( rest.get('/api/users', (req, res, ctx) => { - return res(ctx.json([{ id: 1, name: 'John' }])); + return res(ctx.json([{ id: 1, name: "john" }])); }) ); @@ -356,7 +292,7 @@ afterAll(() => server.close()); ```typescript // Preferred -page.getByRole('button', { name: 'Submit' }) +page.getByRole('button', { name: "submit" }) page.getByLabel('Email') page.getByText('Welcome') diff --git a/docs/skills/engineering-team/senior-secops.md b/docs/skills/engineering-team/senior-secops.md index fe49ee6..966b03a 100644 --- a/docs/skills/engineering-team/senior-secops.md +++ b/docs/skills/engineering-team/senior-secops.md @@ -18,7 +18,6 @@ Complete toolkit for Security Operations including vulnerability management, com ## Table of Contents -- [Trigger Terms](#trigger-terms) - [Core Capabilities](#core-capabilities) - [Workflows](#workflows) - [Tool Reference](#tool-reference) @@ -28,27 +27,6 @@ Complete toolkit for Security Operations including vulnerability management, com --- -## Trigger Terms - -Use this skill when you encounter: - -| Category | Terms | -|----------|-------| -| **Vulnerability Management** | CVE, CVSS, vulnerability scan, security patch, dependency audit, npm audit, pip-audit | -| **OWASP Top 10** | injection, XSS, CSRF, broken authentication, security misconfiguration, sensitive data exposure | -| **Compliance** | SOC 2, PCI-DSS, HIPAA, GDPR, compliance audit, security controls, access control | -| **Secure Coding** | input validation, output encoding, parameterized queries, prepared statements, sanitization | -| **Secrets Management** | API key, secrets vault, environment variables, HashiCorp Vault, AWS Secrets Manager | -| **Authentication** | JWT, OAuth, MFA, 2FA, TOTP, password hashing, bcrypt, argon2, session management | -| **Security Testing** | SAST, DAST, penetration test, security scan, Snyk, Semgrep, CodeQL, Trivy | -| **Incident Response** | security incident, breach notification, incident response, forensics, containment | -| **Network Security** | TLS, HTTPS, HSTS, CSP, CORS, security headers, firewall rules, WAF | -| **Infrastructure Security** | container security, Kubernetes security, IAM, least privilege, zero trust | -| **Cryptography** | encryption at rest, encryption in transit, AES-256, RSA, key management, KMS | -| **Monitoring** | security monitoring, SIEM, audit logging, intrusion detection, anomaly detection | - ---- - ## Core Capabilities ### 1. Security Scanner @@ -136,14 +114,23 @@ Complete security assessment of a codebase. ```bash # Step 1: Scan for code vulnerabilities python scripts/security_scanner.py . --severity medium +# STOP if exit code 2 — resolve critical findings before continuing +``` +```bash # Step 2: Check dependency vulnerabilities python scripts/vulnerability_assessor.py . --severity high +# STOP if exit code 2 — patch critical CVEs before continuing +``` +```bash # Step 3: Verify compliance controls python scripts/compliance_checker.py . --framework all +# STOP if exit code 2 — address critical gaps before proceeding +``` -# Step 4: Generate combined report +```bash +# Step 4: Generate combined reports python scripts/security_scanner.py . --json --output security.json python scripts/vulnerability_assessor.py . --json --output vulns.json python scripts/compliance_checker.py . --json --output compliance.json @@ -155,7 +142,7 @@ Integrate security checks into deployment pipeline. ```yaml # .github/workflows/security.yml -name: Security Scan +name: "security-scan" on: pull_request: @@ -167,21 +154,23 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python + - name: "set-up-python" uses: actions/setup-python@v5 with: python-version: '3.11' - - name: Security Scanner + - name: "security-scanner" run: python scripts/security_scanner.py . --severity high - - name: Vulnerability Assessment + - name: "vulnerability-assessment" run: python scripts/vulnerability_assessor.py . --severity critical - - name: Compliance Check + - name: "compliance-check" run: python scripts/compliance_checker.py . --framework soc2 ``` +Each step fails the pipeline on its respective exit code — no deployment proceeds past a critical finding. + ### Workflow 3: CVE Triage Respond to a new CVE affecting your application. @@ -191,6 +180,7 @@ Respond to a new CVE affecting your application. - Identify affected systems using vulnerability_assessor.py - Check if CVE is being actively exploited - Determine CVSS environmental score for your context + - STOP if CVSS 9.0+ on internet-facing system — escalate immediately 2. PRIORITIZE - Critical (CVSS 9.0+, internet-facing): 24 hours @@ -200,7 +190,8 @@ Respond to a new CVE affecting your application. 3. REMEDIATE - Update affected dependency to fixed version - - Run security_scanner.py to verify fix + - Run security_scanner.py to verify fix (must return exit code 0) + - STOP if scanner still flags the CVE — do not deploy - Test for regressions - Deploy with enhanced monitoring @@ -230,7 +221,7 @@ PHASE 2: CONTAIN (15-60 min) PHASE 3: ERADICATE (1-4 hours) - Root cause identified - Malware/backdoors removed -- Vulnerabilities patched (run security_scanner.py) +- Vulnerabilities patched (run security_scanner.py; must return exit code 0) - Systems hardened PHASE 4: RECOVER (4-24 hours) @@ -261,10 +252,7 @@ PHASE 5: POST-INCIDENT (24-72 hours) | `--json` | Output results as JSON | | `--output, -o` | Write results to file | -**Exit Codes:** -- `0`: No critical/high findings -- `1`: High severity findings -- `2`: Critical severity findings +**Exit Codes:** `0` = no critical/high findings · `1` = high severity findings · `2` = critical severity findings ### vulnerability_assessor.py @@ -276,10 +264,7 @@ PHASE 5: POST-INCIDENT (24-72 hours) | `--json` | Output results as JSON | | `--output, -o` | Write results to file | -**Exit Codes:** -- `0`: No critical/high vulnerabilities -- `1`: High severity vulnerabilities -- `2`: Critical severity vulnerabilities +**Exit Codes:** `0` = no critical/high vulnerabilities · `1` = high severity vulnerabilities · `2` = critical severity vulnerabilities ### compliance_checker.py @@ -291,29 +276,13 @@ PHASE 5: POST-INCIDENT (24-72 hours) | `--json` | Output results as JSON | | `--output, -o` | Write results to file | -**Exit Codes:** -- `0`: Compliant (90%+ score) -- `1`: Non-compliant (50-69% score) -- `2`: Critical gaps (<50% score) +**Exit Codes:** `0` = compliant (90%+ score) · `1` = non-compliant (50-69% score) · `2` = critical gaps (<50% score) --- ## Security Standards -### OWASP Top 10 Prevention - -| Vulnerability | Prevention | -|--------------|------------| -| **A01: Broken Access Control** | Implement RBAC, deny by default, validate permissions server-side | -| **A02: Cryptographic Failures** | Use TLS 1.2+, AES-256 encryption, secure key management | -| **A03: Injection** | Parameterized queries, input validation, escape output | -| **A04: Insecure Design** | Threat modeling, secure design patterns, defense in depth | -| **A05: Security Misconfiguration** | Hardening guides, remove defaults, disable unused features | -| **A06: Vulnerable Components** | Dependency scanning, automated updates, SBOM | -| **A07: Authentication Failures** | MFA, rate limiting, secure password storage | -| **A08: Data Integrity Failures** | Code signing, integrity checks, secure CI/CD | -| **A09: Security Logging Failures** | Comprehensive audit logs, SIEM integration, alerting | -| **A10: SSRF** | URL validation, allowlist destinations, network segmentation | +See `references/security_standards.md` for OWASP Top 10 full guidance, secure coding standards, authentication requirements, and API security controls. ### Secure Coding Checklist @@ -353,47 +322,28 @@ PHASE 5: POST-INCIDENT (24-72 hours) ## Compliance Frameworks -### SOC 2 Type II Controls +See `references/compliance_requirements.md` for full control mappings. Run `compliance_checker.py` to verify the controls below: -| Control | Category | Description | -|---------|----------|-------------| -| CC1 | Control Environment | Security policies, org structure | -| CC2 | Communication | Security awareness, documentation | -| CC3 | Risk Assessment | Vulnerability scanning, threat modeling | -| CC6 | Logical Access | Authentication, authorization, MFA | -| CC7 | System Operations | Monitoring, logging, incident response | -| CC8 | Change Management | CI/CD, code review, deployment controls | +### SOC 2 Type II +- **CC6** Logical Access: authentication, authorization, MFA +- **CC7** System Operations: monitoring, logging, incident response +- **CC8** Change Management: CI/CD, code review, deployment controls -### PCI-DSS v4.0 Requirements - -| Requirement | Description | -|-------------|-------------| -| Req 3 | Protect stored cardholder data (encryption at rest) | -| Req 4 | Encrypt transmission (TLS 1.2+) | -| Req 6 | Secure development (input validation, secure coding) | -| Req 8 | Strong authentication (MFA, password policy) | -| Req 10 | Audit logging (all access to cardholder data) | -| Req 11 | Security testing (SAST, DAST, penetration testing) | +### PCI-DSS v4.0 +- **Req 3/4**: Encryption at rest and in transit (TLS 1.2+) +- **Req 6**: Secure development (input validation, secure coding) +- **Req 8**: Strong authentication (MFA, password policy) +- **Req 10/11**: Audit logging, SAST/DAST/penetration testing ### HIPAA Security Rule +- Unique user IDs and audit trails for PHI access (164.312(a)(1), 164.312(b)) +- MFA for person/entity authentication (164.312(d)) +- Transmission encryption via TLS (164.312(e)(1)) -| Safeguard | Requirement | -|-----------|-------------| -| 164.312(a)(1) | Unique user identification for PHI access | -| 164.312(b) | Audit trails for PHI access | -| 164.312(c)(1) | Data integrity controls | -| 164.312(d) | Person/entity authentication (MFA) | -| 164.312(e)(1) | Transmission encryption (TLS) | - -### GDPR Requirements - -| Article | Requirement | -|---------|-------------| -| Art 25 | Privacy by design, data minimization | -| Art 32 | Security measures, encryption, pseudonymization | -| Art 33 | Breach notification (72 hours) | -| Art 17 | Right to erasure (data deletion) | -| Art 20 | Data portability (export capability) | +### GDPR +- **Art 25/32**: Privacy by design, encryption, pseudonymization +- **Art 33**: Breach notification within 72 hours +- **Art 17/20**: Right to erasure and data portability --- @@ -476,37 +426,4 @@ app.use((req, res, next) => { |----------|-------------| | `references/security_standards.md` | OWASP Top 10, secure coding, authentication, API security | | `references/vulnerability_management_guide.md` | CVE triage, CVSS scoring, remediation workflows | -| `references/compliance_requirements.md` | SOC 2, PCI-DSS, HIPAA, GDPR requirements | - ---- - -## Tech Stack - -**Security Scanning:** -- Snyk (dependency scanning) -- Semgrep (SAST) -- CodeQL (code analysis) -- Trivy (container scanning) -- OWASP ZAP (DAST) - -**Secrets Management:** -- HashiCorp Vault -- AWS Secrets Manager -- Azure Key Vault -- 1Password Secrets Automation - -**Authentication:** -- bcrypt, argon2 (password hashing) -- jsonwebtoken (JWT) -- passport.js (authentication middleware) -- speakeasy (TOTP/MFA) - -**Logging & Monitoring:** -- Winston, Pino (Node.js logging) -- Datadog, Splunk (SIEM) -- PagerDuty (alerting) - -**Compliance:** -- Vanta (SOC 2 automation) -- Drata (compliance management) -- AWS Config (configuration compliance) +| `references/compliance_requirements.md` | SOC 2, PCI-DSS, HIPAA, GDPR full control mappings | diff --git a/docs/skills/engineering-team/senior-security.md b/docs/skills/engineering-team/senior-security.md index f486119..d410a58 100644 --- a/docs/skills/engineering-team/senior-security.md +++ b/docs/skills/engineering-team/senior-security.md @@ -43,13 +43,7 @@ Identify and analyze security threats using STRIDE methodology. - Processes (application components) - Data stores (databases, caches) - Data flows (APIs, network connections) -3. Apply STRIDE to each DFD element: - - Spoofing: Can identity be faked? - - Tampering: Can data be modified? - - Repudiation: Can actions be denied? - - Information Disclosure: Can data leak? - - Denial of Service: Can availability be affected? - - Elevation of Privilege: Can access be escalated? +3. Apply STRIDE to each DFD element (see [STRIDE per Element Matrix](#stride-per-element-matrix) below) 4. Score risks using DREAD: - Damage potential (1-10) - Reproducibility (1-10) @@ -63,14 +57,14 @@ Identify and analyze security threats using STRIDE methodology. ### STRIDE Threat Categories -| Category | Description | Security Property | Mitigation Focus | -|----------|-------------|-------------------|------------------| -| Spoofing | Impersonating users or systems | Authentication | MFA, certificates, strong auth | -| Tampering | Modifying data or code | Integrity | Signing, checksums, validation | -| Repudiation | Denying actions | Non-repudiation | Audit logs, digital signatures | -| Information Disclosure | Exposing data | Confidentiality | Encryption, access controls | -| Denial of Service | Disrupting availability | Availability | Rate limiting, redundancy | -| Elevation of Privilege | Gaining unauthorized access | Authorization | RBAC, least privilege | +| Category | Security Property | Mitigation Focus | +|----------|-------------------|------------------| +| Spoofing | Authentication | MFA, certificates, strong auth | +| Tampering | Integrity | Signing, checksums, validation | +| Repudiation | Non-repudiation | Audit logs, digital signatures | +| Information Disclosure | Confidentiality | Encryption, access controls | +| Denial of Service | Availability | Rate limiting, redundancy | +| Elevation of Privilege | Authorization | RBAC, least privilege | ### STRIDE per Element Matrix @@ -189,24 +183,11 @@ Identify and remediate security vulnerabilities in applications. 7. Verify fixes and document 8. **Validation:** Scope defined; automated and manual testing complete; findings classified; remediation tracked -### OWASP Top 10 Mapping - -| Rank | Vulnerability | Testing Approach | -|------|---------------|------------------| -| A01 | Broken Access Control | Manual IDOR testing, authorization checks | -| A02 | Cryptographic Failures | Algorithm review, key management audit | -| A03 | Injection | SAST + manual payload testing | -| A04 | Insecure Design | Threat modeling, architecture review | -| A05 | Security Misconfiguration | Configuration audit, CIS benchmarks | -| A06 | Vulnerable Components | Dependency scanning, CVE monitoring | -| A07 | Authentication Failures | Password policy, session management review | -| A08 | Software/Data Integrity | CI/CD security, code signing verification | -| A09 | Logging Failures | Log review, SIEM configuration check | -| A10 | SSRF | Manual URL manipulation testing | +For OWASP Top 10 vulnerability descriptions and testing guidance, refer to [owasp.org/Top10](https://owasp.org/Top10). ### Vulnerability Severity Matrix -| Impact / Exploitability | Easy | Moderate | Difficult | +| Impact \ Exploitability | Easy | Moderate | Difficult | |-------------------------|------|----------|-----------| | Critical | Critical | Critical | High | | High | Critical | High | Medium | @@ -274,6 +255,55 @@ Review code for security vulnerabilities before deployment. | MD5/SHA1 for passwords | Weak hashing | Use Argon2id or bcrypt | | Math.random for tokens | Predictable values | Use crypto.getRandomValues | +### Inline Code Examples + +**SQL Injection — insecure vs. secure (Python):** + +```python +# ❌ Insecure: string formatting allows SQL injection +query = f"SELECT * FROM users WHERE username = '{username}'" +cursor.execute(query) + +# ✅ Secure: parameterized query — user input never interpreted as SQL +query = "SELECT * FROM users WHERE username = %s" +cursor.execute(query, (username,)) +``` + +**Password Hashing with Argon2id (Python):** + +```python +from argon2 import PasswordHasher + +ph = PasswordHasher() # uses secure defaults (time_cost, memory_cost) + +# On registration +hashed = ph.hash(plain_password) + +# On login — raises argon2.exceptions.VerifyMismatchError on failure +ph.verify(hashed, plain_password) +``` + +**Secret Scanning — core pattern matching (Python):** + +```python +import re, pathlib + +SECRET_PATTERNS = { + "aws_access_key": re.compile(r"AKIA[0-9A-Z]{16}"), + "github_token": re.compile(r"ghp_[A-Za-z0-9]{36}"), + "private_key": re.compile(r"-----BEGIN (RSA |EC )?PRIVATE KEY-----"), + "generic_secret": re.compile(r'(?i)(password|secret|api_key)\s*=\s*["\']?\S{8,}'), +} + +def scan_file(path: pathlib.Path) -> list[dict]: + findings = [] + for lineno, line in enumerate(path.read_text(errors="replace").splitlines(), 1): + for name, pattern in SECRET_PATTERNS.items(): + if pattern.search(line): + findings.append({"file": str(path), "line": lineno, "type": name}) + return findings +``` + --- ## Incident Response Workflow @@ -311,12 +341,12 @@ Respond to and contain security incidents. ### Incident Severity Levels -| Level | Description | Response Time | Escalation | -|-------|-------------|---------------|------------| -| P1 - Critical | Active breach, data exfiltration | Immediate | CISO, Legal, Executive | -| P2 - High | Confirmed compromise, contained | 1 hour | Security Lead, IT Director | -| P3 - Medium | Potential compromise, under investigation | 4 hours | Security Team | -| P4 - Low | Suspicious activity, low impact | 24 hours | On-call engineer | +| Level | Response Time | Escalation | +|-------|---------------|------------| +| P1 - Critical (active breach/exfiltration) | Immediate | CISO, Legal, Executive | +| P2 - High (confirmed, contained) | 1 hour | Security Lead, IT Director | +| P3 - Medium (potential, under investigation) | 4 hours | Security Team | +| P4 - Low (suspicious, low impact) | 24 hours | On-call engineer | ### Incident Response Checklist @@ -364,24 +394,12 @@ See: [references/cryptography-implementation.md](references/cryptography-impleme ### Scripts -| Script | Purpose | Usage | -|--------|---------|-------| -| [threat_modeler.py](scripts/threat_modeler.py) | STRIDE threat analysis with risk scoring | `python threat_modeler.py --component "Authentication"` | -| [secret_scanner.py](scripts/secret_scanner.py) | Detect hardcoded secrets and credentials | `python secret_scanner.py /path/to/project` | +| Script | Purpose | +|--------|---------| +| [threat_modeler.py](scripts/threat_modeler.py) | STRIDE threat analysis with DREAD risk scoring; JSON and text output; interactive guided mode | +| [secret_scanner.py](scripts/secret_scanner.py) | Detect hardcoded secrets and credentials across 20+ patterns; CI/CD integration ready | -**Threat Modeler Features:** -- STRIDE analysis for any system component -- DREAD risk scoring -- Mitigation recommendations -- JSON and text output formats -- Interactive mode for guided analysis - -**Secret Scanner Features:** -- Detects AWS, GCP, Azure credentials -- Finds API keys and tokens (GitHub, Slack, Stripe) -- Identifies private keys and passwords -- Supports 20+ secret patterns -- CI/CD integration ready +For usage, see the inline code examples in [Secure Code Review Workflow](#inline-code-examples) and the script source files directly. ### References @@ -395,17 +413,6 @@ See: [references/cryptography-implementation.md](references/cryptography-impleme ## Security Standards Reference -### Compliance Frameworks - -| Framework | Focus | Applicable To | -|-----------|-------|---------------| -| OWASP ASVS | Application security | Web applications | -| CIS Benchmarks | System hardening | Servers, containers, cloud | -| NIST CSF | Risk management | Enterprise security programs | -| PCI-DSS | Payment card data | Payment processing | -| HIPAA | Healthcare data | Healthcare applications | -| SOC 2 | Service organization controls | SaaS providers | - ### Security Headers Checklist | Header | Recommended Value | @@ -417,6 +424,8 @@ See: [references/cryptography-implementation.md](references/cryptography-impleme | Referrer-Policy | strict-origin-when-cross-origin | | Permissions-Policy | geolocation=(), microphone=(), camera=() | +For compliance framework requirements (OWASP ASVS, CIS Benchmarks, NIST CSF, PCI-DSS, HIPAA, SOC 2), refer to the respective official documentation. + --- ## Related Skills diff --git a/docs/skills/engineering-team/stripe-integration-expert.md b/docs/skills/engineering-team/stripe-integration-expert.md index 782a4e6..b7d5b71 100644 --- a/docs/skills/engineering-team/stripe-integration-expert.md +++ b/docs/skills/engineering-team/stripe-integration-expert.md @@ -10,6 +10,8 @@ description: "Stripe Integration Expert - Claude Code skill from the Engineering --- +# Stripe Integration Expert + **Tier:** POWERFUL **Category:** Engineering Team **Domain:** Payments / Billing Infrastructure @@ -77,7 +79,7 @@ export const stripe = new Stripe(process.env.STRIPE_SECRET_KEY!, { apiVersion: "2024-04-10", typescript: true, appInfo: { - name: "MyApp", + name: "myapp", version: "1.0.0", }, }) @@ -119,7 +121,7 @@ export async function POST(req: Request) { if (!stripeCustomerId) { const customer = await stripe.customers.create({ email: user.email, - name: user.name ?? undefined, + name: "username-undefined" metadata: { userId: user.id }, }) stripeCustomerId = customer.id diff --git a/docs/skills/engineering-team/tdd-guide.md b/docs/skills/engineering-team/tdd-guide.md index e6f59cf..67b3361 100644 --- a/docs/skills/engineering-team/tdd-guide.md +++ b/docs/skills/engineering-team/tdd-guide.md @@ -14,27 +14,6 @@ description: "TDD Guide - Claude Code skill from the Engineering - Core domain." Test-driven development skill for generating tests, analyzing coverage, and guiding red-green-refactor workflows across Jest, Pytest, JUnit, and Vitest. -## Table of Contents - -- [Capabilities](#capabilities) -- [Workflows](#workflows) -- [Tools](#tools) -- [Input Requirements](#input-requirements) -- [Limitations](#limitations) - ---- - -## Capabilities - -| Capability | Description | -|------------|-------------| -| Test Generation | Convert requirements or code into test cases with proper structure | -| Coverage Analysis | Parse LCOV/JSON/XML reports, identify gaps, prioritize fixes | -| TDD Workflow | Guide red-green-refactor cycles with validation | -| Framework Adapters | Generate tests for Jest, Pytest, JUnit, Vitest, Mocha | -| Quality Scoring | Assess test isolation, assertions, naming, detect test smells | -| Fixture Generation | Create realistic test data, mocks, and factories | - --- ## Workflows @@ -66,18 +45,85 @@ Test-driven development skill for generating tests, analyzing coverage, and guid --- -## Tools +## Examples + +### Test Generation — Input → Output (Pytest) + +**Input source function (`math_utils.py`):** +```python +def divide(a: float, b: float) -> float: + if b == 0: + raise ValueError("Cannot divide by zero") + return a / b +``` + +**Command:** +```bash +python scripts/test_generator.py --input math_utils.py --framework pytest +``` + +**Generated test output (`test_math_utils.py`):** +```python +import pytest +from math_utils import divide + +class TestDivide: + def test_divide_positive_numbers(self): + assert divide(10, 2) == 5.0 + + def test_divide_negative_numerator(self): + assert divide(-10, 2) == -5.0 + + def test_divide_float_result(self): + assert divide(1, 3) == pytest.approx(0.333, rel=1e-3) + + def test_divide_by_zero_raises_value_error(self): + with pytest.raises(ValueError, match="Cannot divide by zero"): + divide(10, 0) + + def test_divide_zero_numerator(self): + assert divide(0, 5) == 0.0 +``` + +--- + +### Coverage Analysis — Sample P0/P1/P2 Output + +**Command:** +```bash +python scripts/coverage_analyzer.py --report lcov.info --threshold 80 +``` + +**Sample output:** +``` +Coverage Report — Overall: 63% (threshold: 80%) + +P0 — Critical gaps (uncovered error paths): + auth/login.py:42-58 handle_expired_token() 0% covered + payments/process.py:91-110 handle_payment_failure() 0% covered + +P1 — High-value gaps (core logic branches): + users/service.py:77 update_profile() — else branch 0% covered + orders/cart.py:134 apply_discount() — zero-qty guard 0% covered + +P2 — Low-risk gaps (utility / helper functions): + utils/formatting.py:12 format_currency() 0% covered + +Recommended: Generate tests for P0 items first to reach 80% threshold. +``` + +--- + +## Key Tools | Tool | Purpose | Usage | |------|---------|-------| | `test_generator.py` | Generate test cases from code/requirements | `python scripts/test_generator.py --input source.py --framework pytest` | | `coverage_analyzer.py` | Parse and analyze coverage reports | `python scripts/coverage_analyzer.py --report lcov.info --threshold 80` | | `tdd_workflow.py` | Guide red-green-refactor cycles | `python scripts/tdd_workflow.py --phase red --test test_auth.py` | -| `framework_adapter.py` | Convert tests between frameworks | `python scripts/framework_adapter.py --from jest --to pytest` | | `fixture_generator.py` | Generate test data and mocks | `python scripts/fixture_generator.py --entity User --count 5` | -| `metrics_calculator.py` | Calculate test quality metrics | `python scripts/metrics_calculator.py --tests tests/` | -| `format_detector.py` | Detect language and framework | `python scripts/format_detector.py --file source.ts` | -| `output_formatter.py` | Format output for CLI/desktop/CI | `python scripts/output_formatter.py --format markdown` | + +Additional scripts: `framework_adapter.py` (convert between frameworks), `metrics_calculator.py` (quality metrics), `format_detector.py` (detect language/framework), `output_formatter.py` (CLI/desktop/CI output). --- diff --git a/docs/skills/engineering/agent-designer.md b/docs/skills/engineering/agent-designer.md index 8029dc8..22eae26 100644 --- a/docs/skills/engineering/agent-designer.md +++ b/docs/skills/engineering/agent-designer.md @@ -10,6 +10,8 @@ description: "Agent Designer - Multi-Agent System Architecture - Claude Code ski --- +# Agent Designer - Multi-Agent System Architecture + **Tier:** POWERFUL **Category:** Engineering **Tags:** AI agents, architecture, system design, orchestration, multi-agent systems diff --git a/docs/skills/engineering/agent-workflow-designer.md b/docs/skills/engineering/agent-workflow-designer.md index 7271cb6..c444c18 100644 --- a/docs/skills/engineering/agent-workflow-designer.md +++ b/docs/skills/engineering/agent-workflow-designer.md @@ -10,6 +10,8 @@ description: "Agent Workflow Designer - Claude Code skill from the Engineering - --- +# Agent Workflow Designer + **Tier:** POWERFUL **Category:** Engineering **Domain:** Multi-Agent Systems / AI Orchestration @@ -72,7 +74,7 @@ import anthropic @dataclass class PipelineStage: - name: str + name: "str" system_prompt: str input_key: str # what to take from state output_key: str # what to write to state @@ -141,7 +143,7 @@ import asyncio import anthropic from typing import Any -async def run_agent(client, task_name: str, system: str, user: str, model: str = "claude-3-5-sonnet-20241022") -> dict: +async def run_agent(client, task_name: "str-system-str-user-str-model-str"claude-3-5-sonnet-20241022") -> dict: """Single async agent call""" loop = asyncio.get_event_loop() @@ -407,7 +409,7 @@ class ContextBudget: def remaining(self): return self.total - self.reserve - self.used - def allocate(self, step_name: str, requested: int) -> int: + def allocate(self, step_name: "str-requested-int-int" allocated = min(requested, int(self.remaining * 0.6)) # max 60% of remaining print(f"[Budget] {step_name}: allocated {allocated:,} tokens (remaining: {self.remaining:,})") return allocated diff --git a/docs/skills/engineering/api-design-reviewer.md b/docs/skills/engineering/api-design-reviewer.md index 73eb062..2d1758c 100644 --- a/docs/skills/engineering/api-design-reviewer.md +++ b/docs/skills/engineering/api-design-reviewer.md @@ -10,6 +10,8 @@ description: "API Design Reviewer - Claude Code skill from the Engineering - POW --- +# API Design Reviewer + **Tier:** POWERFUL **Category:** Engineering / Architecture **Maintainer:** Claude Skills Team @@ -373,13 +375,13 @@ Provides comprehensive scoring of API design quality. ### CI/CD Integration ```yaml -- name: API Linting +- name: "api-linting" run: python scripts/api_linter.py openapi.json -- name: Breaking Change Detection +- name: "breaking-change-detection" run: python scripts/breaking_change_detector.py openapi-v1.json openapi-v2.json -- name: API Scorecard +- name: "api-scorecard" run: python scripts/api_scorecard.py openapi.json ``` diff --git a/docs/skills/engineering/api-test-suite-builder.md b/docs/skills/engineering/api-test-suite-builder.md index f168ff4..3c29c5b 100644 --- a/docs/skills/engineering/api-test-suite-builder.md +++ b/docs/skills/engineering/api-test-suite-builder.md @@ -10,6 +10,8 @@ description: "API Test Suite Builder - Claude Code skill from the Engineering - --- +# API Test Suite Builder + **Tier:** POWERFUL **Category:** Engineering **Domain:** Testing / API Quality @@ -140,511 +142,7 @@ For every POST/PUT/PATCH endpoint with a request body: --- ## Example Test Files - -### Example 1 — Node.js: Vitest + Supertest (Next.js API Route) - -```typescript -// tests/api/users.test.ts -import { describe, it, expect, beforeAll, afterAll } from 'vitest' -import request from 'supertest' -import { createServer } from '@/test/helpers/server' -import { generateJWT, generateExpiredJWT } from '@/test/helpers/auth' -import { createTestUser, cleanupTestUsers } from '@/test/helpers/db' - -const app = createServer() - -describe('GET /api/users/:id', () => { - let validToken: string - let adminToken: string - let testUserId: string - - beforeAll(async () => { - const user = await createTestUser({ role: 'user' }) - const admin = await createTestUser({ role: 'admin' }) - testUserId = user.id - validToken = generateJWT(user) - adminToken = generateJWT(admin) - }) - - afterAll(async () => { - await cleanupTestUsers() - }) - - // --- Auth tests --- - it('returns 401 with no auth header', async () => { - const res = await request(app).get(`/api/users/${testUserId}`) - expect(res.status).toBe(401) - expect(res.body).toHaveProperty('error') - }) - - it('returns 401 with malformed token', async () => { - const res = await request(app) - .get(`/api/users/${testUserId}`) - .set('Authorization', 'Bearer not-a-real-jwt') - expect(res.status).toBe(401) - }) - - it('returns 401 with expired token', async () => { - const expiredToken = generateExpiredJWT({ id: testUserId }) - const res = await request(app) - .get(`/api/users/${testUserId}`) - .set('Authorization', `Bearer ${expiredToken}`) - expect(res.status).toBe(401) - expect(res.body.error).toMatch(/expired/i) - }) - - it('returns 403 when accessing another user\'s profile without admin', async () => { - const otherUser = await createTestUser({ role: 'user' }) - const otherToken = generateJWT(otherUser) - const res = await request(app) - .get(`/api/users/${testUserId}`) - .set('Authorization', `Bearer ${otherToken}`) - expect(res.status).toBe(403) - await cleanupTestUsers([otherUser.id]) - }) - - it('returns 200 with valid token for own profile', async () => { - const res = await request(app) - .get(`/api/users/${testUserId}`) - .set('Authorization', `Bearer ${validToken}`) - expect(res.status).toBe(200) - expect(res.body).toMatchObject({ id: testUserId }) - expect(res.body).not.toHaveProperty('password') - expect(res.body).not.toHaveProperty('hashedPassword') - }) - - it('returns 404 for non-existent user', async () => { - const res = await request(app) - .get('/api/users/00000000-0000-0000-0000-000000000000') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(404) - }) - - // --- Input validation --- - it('returns 400 for invalid UUID format', async () => { - const res = await request(app) - .get('/api/users/not-a-uuid') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(400) - }) -}) - -describe('POST /api/users', () => { - let adminToken: string - - beforeAll(async () => { - const admin = await createTestUser({ role: 'admin' }) - adminToken = generateJWT(admin) - }) - - afterAll(cleanupTestUsers) - - // --- Input validation --- - it('returns 422 when body is empty', async () => { - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({}) - expect(res.status).toBe(422) - expect(res.body.errors).toBeDefined() - }) - - it('returns 422 when email is missing', async () => { - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({ name: 'Test User', role: 'user' }) - expect(res.status).toBe(422) - expect(res.body.errors).toContainEqual( - expect.objectContaining({ field: 'email' }) - ) - }) - - it('returns 422 for invalid email format', async () => { - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({ email: 'not-an-email', name: 'Test', role: 'user' }) - expect(res.status).toBe(422) - }) - - it('returns 422 for SQL injection attempt in email field', async () => { - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({ email: "' OR '1'='1", name: 'Hacker', role: 'user' }) - expect(res.status).toBe(422) - }) - - it('returns 409 when email already exists', async () => { - const existing = await createTestUser({ role: 'user' }) - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({ email: existing.email, name: 'Duplicate', role: 'user' }) - expect(res.status).toBe(409) - }) - - it('creates user successfully with valid data', async () => { - const res = await request(app) - .post('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - .send({ email: 'newuser@example.com', name: 'New User', role: 'user' }) - expect(res.status).toBe(201) - expect(res.body).toHaveProperty('id') - expect(res.body.email).toBe('newuser@example.com') - expect(res.body).not.toHaveProperty('password') - }) -}) - -describe('GET /api/users (pagination)', () => { - let adminToken: string - - beforeAll(async () => { - const admin = await createTestUser({ role: 'admin' }) - adminToken = generateJWT(admin) - // Create 15 test users for pagination - await Promise.all(Array.from({ length: 15 }, (_, i) => - createTestUser({ email: `pagtest${i}@example.com` }) - )) - }) - - afterAll(cleanupTestUsers) - - it('returns first page with default limit', async () => { - const res = await request(app) - .get('/api/users') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(200) - expect(res.body.data).toBeInstanceOf(Array) - expect(res.body).toHaveProperty('total') - expect(res.body).toHaveProperty('page') - expect(res.body).toHaveProperty('pageSize') - }) - - it('returns empty array for page beyond total', async () => { - const res = await request(app) - .get('/api/users?page=9999') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(200) - expect(res.body.data).toHaveLength(0) - }) - - it('returns 400 for negative page number', async () => { - const res = await request(app) - .get('/api/users?page=-1') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(400) - }) - - it('caps pageSize at maximum allowed value', async () => { - const res = await request(app) - .get('/api/users?pageSize=9999') - .set('Authorization', `Bearer ${adminToken}`) - expect(res.status).toBe(200) - expect(res.body.data.length).toBeLessThanOrEqual(100) - }) -}) -``` - ---- - -### Example 2 — Node.js: File Upload Tests - -```typescript -// tests/api/uploads.test.ts -import { describe, it, expect } from 'vitest' -import request from 'supertest' -import path from 'path' -import fs from 'fs' -import { createServer } from '@/test/helpers/server' -import { generateJWT } from '@/test/helpers/auth' -import { createTestUser } from '@/test/helpers/db' - -const app = createServer() - -describe('POST /api/upload', () => { - let validToken: string - - beforeAll(async () => { - const user = await createTestUser({ role: 'user' }) - validToken = generateJWT(user) - }) - - it('returns 401 without authentication', async () => { - const res = await request(app) - .post('/api/upload') - .attach('file', Buffer.from('test'), 'test.pdf') - expect(res.status).toBe(401) - }) - - it('returns 400 when no file attached', async () => { - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - expect(res.status).toBe(400) - expect(res.body.error).toMatch(/file/i) - }) - - it('returns 400 for unsupported file type (exe)', async () => { - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - .attach('file', Buffer.from('MZ fake exe'), { filename: 'virus.exe', contentType: 'application/octet-stream' }) - expect(res.status).toBe(400) - expect(res.body.error).toMatch(/type|format|allowed/i) - }) - - it('returns 413 for oversized file (>10MB)', async () => { - const largeBuf = Buffer.alloc(11 * 1024 * 1024) // 11MB - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - .attach('file', largeBuf, { filename: 'large.pdf', contentType: 'application/pdf' }) - expect(res.status).toBe(413) - }) - - it('returns 400 for empty file (0 bytes)', async () => { - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - .attach('file', Buffer.alloc(0), { filename: 'empty.pdf', contentType: 'application/pdf' }) - expect(res.status).toBe(400) - }) - - it('rejects MIME type spoofing (pdf extension but exe content)', async () => { - // Real malicious file: exe magic bytes but pdf extension - const fakeExe = Buffer.from('4D5A9000', 'hex') // MZ header - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - .attach('file', fakeExe, { filename: 'document.pdf', contentType: 'application/pdf' }) - // Should detect magic bytes mismatch - expect([400, 415]).toContain(res.status) - }) - - it('accepts valid PDF file', async () => { - const pdfHeader = Buffer.from('%PDF-1.4 test content') - const res = await request(app) - .post('/api/upload') - .set('Authorization', `Bearer ${validToken}`) - .attach('file', pdfHeader, { filename: 'valid.pdf', contentType: 'application/pdf' }) - expect(res.status).toBe(200) - expect(res.body).toHaveProperty('url') - expect(res.body).toHaveProperty('id') - }) -}) -``` - ---- - -### Example 3 — Python: Pytest + httpx (FastAPI) - -```python -# tests/api/test_items.py -import pytest -import httpx -from datetime import datetime, timedelta -import jwt - -BASE_URL = "http://localhost:8000" -JWT_SECRET = "test-secret" # use test config, never production secret - - -def make_token(user_id: str, role: str = "user", expired: bool = False) -> str: - exp = datetime.utcnow() + (timedelta(hours=-1) if expired else timedelta(hours=1)) - return jwt.encode( - {"sub": user_id, "role": role, "exp": exp}, - JWT_SECRET, - algorithm="HS256", - ) - - -@pytest.fixture -def client(): - with httpx.Client(base_url=BASE_URL) as c: - yield c - - -@pytest.fixture -def valid_token(): - return make_token("user-123", role="user") - - -@pytest.fixture -def admin_token(): - return make_token("admin-456", role="admin") - - -@pytest.fixture -def expired_token(): - return make_token("user-123", expired=True) - - -class TestGetItem: - def test_returns_401_without_auth(self, client): - res = client.get("/api/items/1") - assert res.status_code == 401 - - def test_returns_401_with_invalid_token(self, client): - res = client.get("/api/items/1", headers={"Authorization": "Bearer garbage"}) - assert res.status_code == 401 - - def test_returns_401_with_expired_token(self, client, expired_token): - res = client.get("/api/items/1", headers={"Authorization": f"Bearer {expired_token}"}) - assert res.status_code == 401 - assert "expired" in res.json().get("detail", "").lower() - - def test_returns_404_for_nonexistent_item(self, client, valid_token): - res = client.get( - "/api/items/99999999", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 404 - - def test_returns_400_for_invalid_id_format(self, client, valid_token): - res = client.get( - "/api/items/not-a-number", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code in (400, 422) - - def test_returns_200_with_valid_auth(self, client, valid_token, test_item): - res = client.get( - f"/api/items/{test_item['id']}", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 200 - data = res.json() - assert data["id"] == test_item["id"] - assert "password" not in data - - -class TestCreateItem: - def test_returns_422_with_empty_body(self, client, admin_token): - res = client.post( - "/api/items", - json={}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 422 - errors = res.json()["detail"] - assert len(errors) > 0 - - def test_returns_422_with_missing_required_field(self, client, admin_token): - res = client.post( - "/api/items", - json={"description": "no name field"}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 422 - fields = [e["loc"][-1] for e in res.json()["detail"]] - assert "name" in fields - - def test_returns_422_with_wrong_type(self, client, admin_token): - res = client.post( - "/api/items", - json={"name": "test", "price": "not-a-number"}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 422 - - @pytest.mark.parametrize("price", [-1, -0.01]) - def test_returns_422_for_negative_price(self, client, admin_token, price): - res = client.post( - "/api/items", - json={"name": "test", "price": price}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 422 - - def test_returns_422_for_price_exceeding_max(self, client, admin_token): - res = client.post( - "/api/items", - json={"name": "test", "price": 1_000_001}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 422 - - def test_creates_item_successfully(self, client, admin_token): - res = client.post( - "/api/items", - json={"name": "New Widget", "price": 9.99, "category": "tools"}, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - assert res.status_code == 201 - data = res.json() - assert "id" in data - assert data["name"] == "New Widget" - - def test_returns_403_for_non_admin(self, client, valid_token): - res = client.post( - "/api/items", - json={"name": "test", "price": 1.0}, - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 403 - - -class TestPagination: - def test_returns_paginated_response(self, client, valid_token): - res = client.get( - "/api/items?page=1&size=10", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 200 - data = res.json() - assert "items" in data - assert "total" in data - assert "page" in data - assert len(data["items"]) <= 10 - - def test_empty_result_for_out_of_range_page(self, client, valid_token): - res = client.get( - "/api/items?page=99999", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 200 - assert res.json()["items"] == [] - - def test_returns_422_for_page_zero(self, client, valid_token): - res = client.get( - "/api/items?page=0", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 422 - - def test_caps_page_size_at_maximum(self, client, valid_token): - res = client.get( - "/api/items?size=9999", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - assert res.status_code == 200 - assert len(res.json()["items"]) <= 100 # max page size - - -class TestRateLimiting: - def test_rate_limit_after_burst(self, client, valid_token): - responses = [] - for _ in range(60): # exceed typical 50/min limit - res = client.get( - "/api/items", - headers={"Authorization": f"Bearer {valid_token}"}, - ) - responses.append(res.status_code) - if res.status_code == 429: - break - assert 429 in responses, "Rate limit was not triggered" - - def test_rate_limit_response_has_retry_after(self, client, valid_token): - for _ in range(60): - res = client.get("/api/items", headers={"Authorization": f"Bearer {valid_token}"}) - if res.status_code == 429: - assert "Retry-After" in res.headers or "retry_after" in res.json() - break -``` - ---- +→ See references/example-test-files.md for details ## Generating Tests from Route Scan diff --git a/docs/skills/engineering/changelog-generator.md b/docs/skills/engineering/changelog-generator.md index 677fc62..acda10d 100644 --- a/docs/skills/engineering/changelog-generator.md +++ b/docs/skills/engineering/changelog-generator.md @@ -10,6 +10,8 @@ description: "Changelog Generator - Claude Code skill from the Engineering - POW --- +# Changelog Generator + **Tier:** POWERFUL **Category:** Engineering **Domain:** Release Management / Documentation diff --git a/docs/skills/engineering/ci-cd-pipeline-builder.md b/docs/skills/engineering/ci-cd-pipeline-builder.md index e01f1b8..37b42ee 100644 --- a/docs/skills/engineering/ci-cd-pipeline-builder.md +++ b/docs/skills/engineering/ci-cd-pipeline-builder.md @@ -10,6 +10,8 @@ description: "CI/CD Pipeline Builder - Claude Code skill from the Engineering - --- +# CI/CD Pipeline Builder + **Tier:** POWERFUL **Category:** Engineering **Domain:** DevOps / Automation diff --git a/docs/skills/engineering/codebase-onboarding.md b/docs/skills/engineering/codebase-onboarding.md index e763a85..2f8160a 100644 --- a/docs/skills/engineering/codebase-onboarding.md +++ b/docs/skills/engineering/codebase-onboarding.md @@ -10,6 +10,8 @@ description: "Codebase Onboarding - Claude Code skill from the Engineering - POW --- +# Codebase Onboarding + **Tier:** POWERFUL **Category:** Engineering **Domain:** Documentation / Developer Experience @@ -272,7 +274,7 @@ await sendEmail({ to: user.email, subject: 'Subject line', template: 'my-email', - props: { name: user.name }, + props: { name: "username" }) ``` @@ -447,44 +449,7 @@ pnpm validate --- ## Output Formats - -### Notion Export - -```javascript -// Use Notion API to create onboarding page -const { Client } = require('@notionhq/client') -const notion = new Client({ auth: process.env.NOTION_TOKEN }) - -const blocks = markdownToNotionBlocks(onboardingMarkdown) // use notion-to-md -await notion.pages.create({ - parent: { page_id: ONBOARDING_PARENT_PAGE_ID }, - properties: { title: { title: [{ text: { content: 'Engineer Onboarding — MyApp' } }] } }, - children: blocks, -}) -``` - -### Confluence Export - -```bash -# Using confluence-cli or REST API -curl -X POST \ - -H "Content-Type: application/json" \ - -u "user@example.com:$CONFLUENCE_TOKEN" \ - "https://yourorg.atlassian.net/wiki/rest/api/content" \ - -d '{ - "type": "page", - "title": "Codebase Onboarding", - "space": {"key": "ENG"}, - "body": { - "storage": { - "value": "

Generated content...

", - "representation": "storage" - } - } - }' -``` - ---- +→ See references/output-format-templates.md for details ## Common Pitfalls diff --git a/docs/skills/engineering/database-designer.md b/docs/skills/engineering/database-designer.md index a57ed30..0169c84 100644 --- a/docs/skills/engineering/database-designer.md +++ b/docs/skills/engineering/database-designer.md @@ -10,6 +10,8 @@ description: "Database Designer - POWERFUL Tier Skill - Claude Code skill from t --- +# Database Designer - POWERFUL Tier Skill + ## Overview A comprehensive database design skill that provides expert-level analysis, optimization, and migration capabilities for modern database systems. This skill combines theoretical principles with practical tools to help architects and developers create scalable, performant, and maintainable database schemas. @@ -39,479 +41,7 @@ A comprehensive database design skill that provides expert-level analysis, optim - **Execution Planning**: Ordered migration steps with dependency resolution ## Database Design Principles - -### Normalization Forms - -#### First Normal Form (1NF) -- **Atomic Values**: Each column contains indivisible values -- **Unique Column Names**: No duplicate column names within a table -- **Uniform Data Types**: Each column contains the same type of data -- **Row Uniqueness**: No duplicate rows in the table - -**Example Violation:** -```sql --- BAD: Multiple phone numbers in one column -CREATE TABLE contacts ( - id INT PRIMARY KEY, - name VARCHAR(100), - phones VARCHAR(200) -- "123-456-7890, 098-765-4321" -); - --- GOOD: Separate table for phone numbers -CREATE TABLE contacts ( - id INT PRIMARY KEY, - name VARCHAR(100) -); - -CREATE TABLE contact_phones ( - id INT PRIMARY KEY, - contact_id INT REFERENCES contacts(id), - phone_number VARCHAR(20), - phone_type VARCHAR(10) -); -``` - -#### Second Normal Form (2NF) -- **1NF Compliance**: Must satisfy First Normal Form -- **Full Functional Dependency**: Non-key attributes depend on the entire primary key -- **Partial Dependency Elimination**: Remove attributes that depend on part of a composite key - -**Example Violation:** -```sql --- BAD: Student course table with partial dependencies -CREATE TABLE student_courses ( - student_id INT, - course_id INT, - student_name VARCHAR(100), -- Depends only on student_id - course_name VARCHAR(100), -- Depends only on course_id - grade CHAR(1), - PRIMARY KEY (student_id, course_id) -); - --- GOOD: Separate tables eliminate partial dependencies -CREATE TABLE students ( - id INT PRIMARY KEY, - name VARCHAR(100) -); - -CREATE TABLE courses ( - id INT PRIMARY KEY, - name VARCHAR(100) -); - -CREATE TABLE enrollments ( - student_id INT REFERENCES students(id), - course_id INT REFERENCES courses(id), - grade CHAR(1), - PRIMARY KEY (student_id, course_id) -); -``` - -#### Third Normal Form (3NF) -- **2NF Compliance**: Must satisfy Second Normal Form -- **Transitive Dependency Elimination**: Non-key attributes should not depend on other non-key attributes -- **Direct Dependency**: Non-key attributes depend directly on the primary key - -**Example Violation:** -```sql --- BAD: Employee table with transitive dependency -CREATE TABLE employees ( - id INT PRIMARY KEY, - name VARCHAR(100), - department_id INT, - department_name VARCHAR(100), -- Depends on department_id, not employee id - department_budget DECIMAL(10,2) -- Transitive dependency -); - --- GOOD: Separate department information -CREATE TABLE departments ( - id INT PRIMARY KEY, - name VARCHAR(100), - budget DECIMAL(10,2) -); - -CREATE TABLE employees ( - id INT PRIMARY KEY, - name VARCHAR(100), - department_id INT REFERENCES departments(id) -); -``` - -#### Boyce-Codd Normal Form (BCNF) -- **3NF Compliance**: Must satisfy Third Normal Form -- **Determinant Key Rule**: Every determinant must be a candidate key -- **Stricter 3NF**: Handles anomalies not covered by 3NF - -### Denormalization Strategies - -#### When to Denormalize -1. **Read-Heavy Workloads**: High query frequency with acceptable write trade-offs -2. **Performance Bottlenecks**: Join operations causing significant latency -3. **Aggregation Needs**: Frequent calculation of derived values -4. **Caching Requirements**: Pre-computed results for common queries - -#### Common Denormalization Patterns - -**Redundant Storage** -```sql --- Store calculated values to avoid expensive joins -CREATE TABLE orders ( - id INT PRIMARY KEY, - customer_id INT REFERENCES customers(id), - customer_name VARCHAR(100), -- Denormalized from customers table - order_total DECIMAL(10,2), -- Denormalized calculation - created_at TIMESTAMP -); -``` - -**Materialized Aggregates** -```sql --- Pre-computed summary tables -CREATE TABLE customer_statistics ( - customer_id INT PRIMARY KEY, - total_orders INT, - lifetime_value DECIMAL(12,2), - last_order_date DATE, - updated_at TIMESTAMP -); -``` - -## Index Optimization Strategies - -### B-Tree Indexes -- **Default Choice**: Best for range queries, sorting, and equality matches -- **Column Order**: Most selective columns first for composite indexes -- **Prefix Matching**: Supports leading column subset queries -- **Maintenance Cost**: Balanced tree structure with logarithmic operations - -### Hash Indexes -- **Equality Queries**: Optimal for exact match lookups -- **Memory Efficiency**: Constant-time access for single-value queries -- **Range Limitations**: Cannot support range or partial matches -- **Use Cases**: Primary keys, unique constraints, cache keys - -### Composite Indexes -```sql --- Query pattern determines optimal column order --- Query: WHERE status = 'active' AND created_date > '2023-01-01' ORDER BY priority DESC -CREATE INDEX idx_task_status_date_priority -ON tasks (status, created_date, priority DESC); - --- Query: WHERE user_id = 123 AND category IN ('A', 'B') AND date_field BETWEEN '...' AND '...' -CREATE INDEX idx_user_category_date -ON user_activities (user_id, category, date_field); -``` - -### Covering Indexes -```sql --- Include additional columns to avoid table lookups -CREATE INDEX idx_user_email_covering -ON users (email) -INCLUDE (first_name, last_name, status); - --- Query can be satisfied entirely from the index --- SELECT first_name, last_name, status FROM users WHERE email = 'user@example.com'; -``` - -### Partial Indexes -```sql --- Index only relevant subset of data -CREATE INDEX idx_active_users_email -ON users (email) -WHERE status = 'active'; - --- Index for recent orders only -CREATE INDEX idx_recent_orders_customer -ON orders (customer_id, created_at) -WHERE created_at > CURRENT_DATE - INTERVAL '30 days'; -``` - -## Query Analysis & Optimization - -### Query Patterns Recognition -1. **Equality Filters**: Single-column B-tree indexes -2. **Range Queries**: B-tree with proper column ordering -3. **Text Search**: Full-text indexes or trigram indexes -4. **Join Operations**: Foreign key indexes on both sides -5. **Sorting Requirements**: Indexes matching ORDER BY clauses - -### Index Selection Algorithm -``` -1. Identify WHERE clause columns -2. Determine most selective columns first -3. Consider JOIN conditions -4. Include ORDER BY columns if possible -5. Evaluate covering index opportunities -6. Check for existing overlapping indexes -``` - -## Data Modeling Patterns - -### Star Schema (Data Warehousing) -```sql --- Central fact table -CREATE TABLE sales_facts ( - sale_id BIGINT PRIMARY KEY, - product_id INT REFERENCES products(id), - customer_id INT REFERENCES customers(id), - date_id INT REFERENCES date_dimension(id), - store_id INT REFERENCES stores(id), - quantity INT, - unit_price DECIMAL(8,2), - total_amount DECIMAL(10,2) -); - --- Dimension tables -CREATE TABLE date_dimension ( - id INT PRIMARY KEY, - date_value DATE, - year INT, - quarter INT, - month INT, - day_of_week INT, - is_weekend BOOLEAN -); -``` - -### Snowflake Schema -```sql --- Normalized dimension tables -CREATE TABLE products ( - id INT PRIMARY KEY, - name VARCHAR(200), - category_id INT REFERENCES product_categories(id), - brand_id INT REFERENCES brands(id) -); - -CREATE TABLE product_categories ( - id INT PRIMARY KEY, - name VARCHAR(100), - parent_category_id INT REFERENCES product_categories(id) -); -``` - -### Document Model (JSON Storage) -```sql --- Flexible document storage with indexing -CREATE TABLE documents ( - id UUID PRIMARY KEY, - document_type VARCHAR(50), - data JSONB, - created_at TIMESTAMP DEFAULT NOW(), - updated_at TIMESTAMP DEFAULT NOW() -); - --- Index on JSON properties -CREATE INDEX idx_documents_user_id -ON documents USING GIN ((data->>'user_id')); - -CREATE INDEX idx_documents_status -ON documents ((data->>'status')) -WHERE document_type = 'order'; -``` - -### Graph Data Patterns -```sql --- Adjacency list for hierarchical data -CREATE TABLE categories ( - id INT PRIMARY KEY, - name VARCHAR(100), - parent_id INT REFERENCES categories(id), - level INT, - path VARCHAR(500) -- Materialized path: "/1/5/12/" -); - --- Many-to-many relationships -CREATE TABLE relationships ( - id UUID PRIMARY KEY, - from_entity_id UUID, - to_entity_id UUID, - relationship_type VARCHAR(50), - created_at TIMESTAMP, - INDEX (from_entity_id, relationship_type), - INDEX (to_entity_id, relationship_type) -); -``` - -## Migration Strategies - -### Zero-Downtime Migration (Expand-Contract Pattern) - -**Phase 1: Expand** -```sql --- Add new column without constraints -ALTER TABLE users ADD COLUMN new_email VARCHAR(255); - --- Backfill data in batches -UPDATE users SET new_email = email WHERE id BETWEEN 1 AND 1000; --- Continue in batches... - --- Add constraints after backfill -ALTER TABLE users ADD CONSTRAINT users_new_email_unique UNIQUE (new_email); -ALTER TABLE users ALTER COLUMN new_email SET NOT NULL; -``` - -**Phase 2: Contract** -```sql --- Update application to use new column --- Deploy application changes --- Verify new column is being used - --- Remove old column -ALTER TABLE users DROP COLUMN email; --- Rename new column -ALTER TABLE users RENAME COLUMN new_email TO email; -``` - -### Data Type Changes -```sql --- Safe string to integer conversion -ALTER TABLE products ADD COLUMN sku_number INTEGER; -UPDATE products SET sku_number = CAST(sku AS INTEGER) WHERE sku ~ '^[0-9]+$'; --- Validate conversion success before dropping old column -``` - -## Partitioning Strategies - -### Horizontal Partitioning (Sharding) -```sql --- Range partitioning by date -CREATE TABLE sales_2023 PARTITION OF sales -FOR VALUES FROM ('2023-01-01') TO ('2024-01-01'); - -CREATE TABLE sales_2024 PARTITION OF sales -FOR VALUES FROM ('2024-01-01') TO ('2025-01-01'); - --- Hash partitioning by user_id -CREATE TABLE user_data_0 PARTITION OF user_data -FOR VALUES WITH (MODULUS 4, REMAINDER 0); - -CREATE TABLE user_data_1 PARTITION OF user_data -FOR VALUES WITH (MODULUS 4, REMAINDER 1); -``` - -### Vertical Partitioning -```sql --- Separate frequently accessed columns -CREATE TABLE users_core ( - id INT PRIMARY KEY, - email VARCHAR(255), - status VARCHAR(20), - created_at TIMESTAMP -); - --- Less frequently accessed profile data -CREATE TABLE users_profile ( - user_id INT PRIMARY KEY REFERENCES users_core(id), - bio TEXT, - preferences JSONB, - last_login TIMESTAMP -); -``` - -## Connection Management - -### Connection Pooling -- **Pool Size**: CPU cores × 2 + effective spindle count -- **Connection Lifetime**: Rotate connections to prevent resource leaks -- **Timeout Settings**: Connection, idle, and query timeouts -- **Health Checks**: Regular connection validation - -### Read Replicas Strategy -```sql --- Write queries to primary -INSERT INTO users (email, name) VALUES ('user@example.com', 'John Doe'); - --- Read queries to replicas (with appropriate read preference) -SELECT * FROM users WHERE status = 'active'; -- Route to read replica - --- Consistent reads when required -SELECT * FROM users WHERE id = LAST_INSERT_ID(); -- Route to primary -``` - -## Caching Layers - -### Cache-Aside Pattern -```python -def get_user(user_id): - # Try cache first - user = cache.get(f"user:{user_id}") - if user is None: - # Cache miss - query database - user = db.query("SELECT * FROM users WHERE id = %s", user_id) - # Store in cache - cache.set(f"user:{user_id}", user, ttl=3600) - return user -``` - -### Write-Through Cache -- **Consistency**: Always keep cache and database in sync -- **Write Latency**: Higher due to dual writes -- **Data Safety**: No data loss on cache failures - -### Cache Invalidation Strategies -1. **TTL-Based**: Time-based expiration -2. **Event-Driven**: Invalidate on data changes -3. **Version-Based**: Use version numbers for consistency -4. **Tag-Based**: Group related cache entries - -## Database Selection Guide - -### SQL Databases -**PostgreSQL** -- **Strengths**: ACID compliance, complex queries, JSON support, extensibility -- **Use Cases**: OLTP applications, data warehousing, geospatial data -- **Scale**: Vertical scaling with read replicas - -**MySQL** -- **Strengths**: Performance, replication, wide ecosystem support -- **Use Cases**: Web applications, content management, e-commerce -- **Scale**: Horizontal scaling through sharding - -### NoSQL Databases - -**Document Stores (MongoDB, CouchDB)** -- **Strengths**: Flexible schema, horizontal scaling, developer productivity -- **Use Cases**: Content management, catalogs, user profiles -- **Trade-offs**: Eventual consistency, complex queries limitations - -**Key-Value Stores (Redis, DynamoDB)** -- **Strengths**: High performance, simple model, excellent caching -- **Use Cases**: Session storage, real-time analytics, gaming leaderboards -- **Trade-offs**: Limited query capabilities, data modeling constraints - -**Column-Family (Cassandra, HBase)** -- **Strengths**: Write-heavy workloads, linear scalability, fault tolerance -- **Use Cases**: Time-series data, IoT applications, messaging systems -- **Trade-offs**: Query flexibility, consistency model complexity - -**Graph Databases (Neo4j, Amazon Neptune)** -- **Strengths**: Relationship queries, pattern matching, recommendation engines -- **Use Cases**: Social networks, fraud detection, knowledge graphs -- **Trade-offs**: Specialized use cases, learning curve - -### NewSQL Databases -**Distributed SQL (CockroachDB, TiDB, Spanner)** -- **Strengths**: SQL compatibility with horizontal scaling -- **Use Cases**: Global applications requiring ACID guarantees -- **Trade-offs**: Complexity, latency for distributed transactions - -## Tools & Scripts - -### Schema Analyzer -- **Input**: SQL DDL files, JSON schema definitions -- **Analysis**: Normalization compliance, constraint validation, naming conventions -- **Output**: Analysis report, Mermaid ERD, improvement recommendations - -### Index Optimizer -- **Input**: Schema definition, query patterns -- **Analysis**: Missing indexes, redundancy detection, selectivity estimation -- **Output**: Index recommendations, CREATE INDEX statements, performance projections - -### Migration Generator -- **Input**: Current and target schemas -- **Analysis**: Schema differences, dependency resolution, risk assessment -- **Output**: Migration scripts, rollback plans, validation queries +→ See references/database-design-reference.md for details ## Best Practices @@ -540,4 +70,4 @@ def get_user(user_id): Effective database design requires balancing multiple competing concerns: performance, scalability, maintainability, and business requirements. This skill provides the tools and knowledge to make informed decisions throughout the database lifecycle, from initial schema design through production optimization and evolution. -The included tools automate common analysis and optimization tasks, while the comprehensive guides provide the theoretical foundation for making sound architectural decisions. Whether building a new system or optimizing an existing one, these resources provide expert-level guidance for creating robust, scalable database solutions. \ No newline at end of file +The included tools automate common analysis and optimization tasks, while the comprehensive guides provide the theoretical foundation for making sound architectural decisions. Whether building a new system or optimizing an existing one, these resources provide expert-level guidance for creating robust, scalable database solutions. diff --git a/docs/skills/engineering/database-schema-designer.md b/docs/skills/engineering/database-schema-designer.md index 5a4c30e..e1d58e2 100644 --- a/docs/skills/engineering/database-schema-designer.md +++ b/docs/skills/engineering/database-schema-designer.md @@ -10,6 +10,8 @@ description: "Database Schema Designer - Claude Code skill from the Engineering --- +# Database Schema Designer + **Tier:** POWERFUL **Category:** Engineering **Domain:** Data Architecture / Backend @@ -74,286 +76,7 @@ User 1──* AuditLog --- ## Full Schema Example (Task Management SaaS) - -### Prisma Schema - -```prisma -// schema.prisma -generator client { - provider = "prisma-client-js" -} - -datasource db { - provider = "postgresql" - url = env("DATABASE_URL") -} - -// ── Multi-tenancy ───────────────────────────────────────────────────────────── - -model Organization { - id String @id @default(cuid()) - name String - slug String @unique - plan Plan @default(FREE) - createdAt DateTime @default(now()) @map("created_at") - updatedAt DateTime @updatedAt @map("updated_at") - deletedAt DateTime? @map("deleted_at") - - users OrganizationMember[] - projects Project[] - auditLogs AuditLog[] - - @@map("organizations") -} - -model OrganizationMember { - id String @id @default(cuid()) - organizationId String @map("organization_id") - userId String @map("user_id") - role OrgRole @default(MEMBER) - joinedAt DateTime @default(now()) @map("joined_at") - - organization Organization @relation(fields: [organizationId], references: [id], onDelete: Cascade) - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - - @@unique([organizationId, userId]) - @@index([userId]) - @@map("organization_members") -} - -model User { - id String @id @default(cuid()) - email String @unique - name String? - avatarUrl String? @map("avatar_url") - passwordHash String? @map("password_hash") - emailVerifiedAt DateTime? @map("email_verified_at") - lastLoginAt DateTime? @map("last_login_at") - createdAt DateTime @default(now()) @map("created_at") - updatedAt DateTime @updatedAt @map("updated_at") - deletedAt DateTime? @map("deleted_at") - - memberships OrganizationMember[] - ownedProjects Project[] @relation("ProjectOwner") - assignedTasks TaskAssignment[] - comments Comment[] - auditLogs AuditLog[] - - @@map("users") -} - -// ── Core entities ───────────────────────────────────────────────────────────── - -model Project { - id String @id @default(cuid()) - organizationId String @map("organization_id") - ownerId String @map("owner_id") - name String - description String? - status ProjectStatus @default(ACTIVE) - settings Json @default("{}") - createdAt DateTime @default(now()) @map("created_at") - updatedAt DateTime @updatedAt @map("updated_at") - deletedAt DateTime? @map("deleted_at") - - organization Organization @relation(fields: [organizationId], references: [id]) - owner User @relation("ProjectOwner", fields: [ownerId], references: [id]) - tasks Task[] - labels Label[] - - @@index([organizationId]) - @@index([organizationId, status]) - @@index([deletedAt]) - @@map("projects") -} - -model Task { - id String @id @default(cuid()) - projectId String @map("project_id") - title String - description String? - status TaskStatus @default(TODO) - priority Priority @default(MEDIUM) - dueDate DateTime? @map("due_date") - position Float @default(0) // For drag-and-drop ordering - version Int @default(1) // Optimistic locking - createdById String @map("created_by_id") - updatedById String @map("updated_by_id") - createdAt DateTime @default(now()) @map("created_at") - updatedAt DateTime @updatedAt @map("updated_at") - deletedAt DateTime? @map("deleted_at") - - project Project @relation(fields: [projectId], references: [id]) - assignments TaskAssignment[] - labels TaskLabel[] - comments Comment[] - attachments Attachment[] - - @@index([projectId]) - @@index([projectId, status]) - @@index([projectId, deletedAt]) - @@index([dueDate], where: { deletedAt: null }) // Partial index - @@map("tasks") -} - -// ── Polymorphic attachments ─────────────────────────────────────────────────── - -model Attachment { - id String @id @default(cuid()) - // Polymorphic association - entityType String @map("entity_type") // "task" | "comment" - entityId String @map("entity_id") - filename String - mimeType String @map("mime_type") - sizeBytes Int @map("size_bytes") - storageKey String @map("storage_key") // S3 key - uploadedById String @map("uploaded_by_id") - createdAt DateTime @default(now()) @map("created_at") - - // Only one concrete relation (task) — polymorphic handled at app level - task Task? @relation(fields: [entityId], references: [id], map: "attachment_task_fk") - - @@index([entityType, entityId]) - @@map("attachments") -} - -// ── Audit trail ─────────────────────────────────────────────────────────────── - -model AuditLog { - id String @id @default(cuid()) - organizationId String @map("organization_id") - userId String? @map("user_id") - action String // "task.created", "task.status_changed" - entityType String @map("entity_type") - entityId String @map("entity_id") - before Json? // Previous state - after Json? // New state - ipAddress String? @map("ip_address") - userAgent String? @map("user_agent") - createdAt DateTime @default(now()) @map("created_at") - - organization Organization @relation(fields: [organizationId], references: [id]) - user User? @relation(fields: [userId], references: [id]) - - @@index([organizationId, createdAt(sort: Desc)]) - @@index([entityType, entityId]) - @@index([userId]) - @@map("audit_logs") -} - -enum Plan { FREE STARTER GROWTH ENTERPRISE } -enum OrgRole { OWNER ADMIN MEMBER VIEWER } -enum ProjectStatus { ACTIVE ARCHIVED } -enum TaskStatus { TODO IN_PROGRESS IN_REVIEW DONE CANCELLED } -enum Priority { LOW MEDIUM HIGH CRITICAL } -``` - ---- - -### Drizzle Schema (TypeScript) - -```typescript -// db/schema.ts -import { - pgTable, text, timestamp, integer, boolean, - varchar, jsonb, real, pgEnum, uniqueIndex, index, -} from 'drizzle-orm/pg-core' -import { createId } from '@paralleldrive/cuid2' - -export const taskStatusEnum = pgEnum('task_status', [ - 'todo', 'in_progress', 'in_review', 'done', 'cancelled' -]) -export const priorityEnum = pgEnum('priority', ['low', 'medium', 'high', 'critical']) - -export const tasks = pgTable('tasks', { - id: text('id').primaryKey().$defaultFn(() => createId()), - projectId: text('project_id').notNull().references(() => projects.id), - title: varchar('title', { length: 500 }).notNull(), - description: text('description'), - status: taskStatusEnum('status').notNull().default('todo'), - priority: priorityEnum('priority').notNull().default('medium'), - dueDate: timestamp('due_date', { withTimezone: true }), - position: real('position').notNull().default(0), - version: integer('version').notNull().default(1), - createdById: text('created_by_id').notNull().references(() => users.id), - updatedById: text('updated_by_id').notNull().references(() => users.id), - createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), - updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(), - deletedAt: timestamp('deleted_at', { withTimezone: true }), -}, (table) => ({ - projectIdx: index('tasks_project_id_idx').on(table.projectId), - projectStatusIdx: index('tasks_project_status_idx').on(table.projectId, table.status), -})) - -// Infer TypeScript types -export type Task = typeof tasks.$inferSelect -export type NewTask = typeof tasks.$inferInsert -``` - ---- - -### Alembic Migration (Python / SQLAlchemy) - -```python -# alembic/versions/20260301_create_tasks.py -"""Create tasks table - -Revision ID: a1b2c3d4e5f6 -Revises: previous_revision -Create Date: 2026-03-01 12:00:00 -""" - -from alembic import op -import sqlalchemy as sa -from sqlalchemy.dialects import postgresql - -revision = 'a1b2c3d4e5f6' -down_revision = 'previous_revision' - - -def upgrade() -> None: - # Create enums - task_status = postgresql.ENUM( - 'todo', 'in_progress', 'in_review', 'done', 'cancelled', - name='task_status' - ) - task_status.create(op.get_bind()) - - op.create_table( - 'tasks', - sa.Column('id', sa.Text(), primary_key=True), - sa.Column('project_id', sa.Text(), sa.ForeignKey('projects.id'), nullable=False), - sa.Column('title', sa.VARCHAR(500), nullable=False), - sa.Column('description', sa.Text()), - sa.Column('status', postgresql.ENUM('todo', 'in_progress', 'in_review', 'done', 'cancelled', name='task_status', create_type=False), nullable=False, server_default='todo'), - sa.Column('priority', sa.Text(), nullable=False, server_default='medium'), - sa.Column('due_date', sa.TIMESTAMP(timezone=True)), - sa.Column('position', sa.Float(), nullable=False, server_default='0'), - sa.Column('version', sa.Integer(), nullable=False, server_default='1'), - sa.Column('created_by_id', sa.Text(), sa.ForeignKey('users.id'), nullable=False), - sa.Column('updated_by_id', sa.Text(), sa.ForeignKey('users.id'), nullable=False), - sa.Column('created_at', sa.TIMESTAMP(timezone=True), nullable=False, server_default=sa.text('NOW()')), - sa.Column('updated_at', sa.TIMESTAMP(timezone=True), nullable=False, server_default=sa.text('NOW()')), - sa.Column('deleted_at', sa.TIMESTAMP(timezone=True)), - ) - - # Indexes - op.create_index('tasks_project_id_idx', 'tasks', ['project_id']) - op.create_index('tasks_project_status_idx', 'tasks', ['project_id', 'status']) - # Partial index for active tasks only - op.create_index( - 'tasks_due_date_active_idx', - 'tasks', ['due_date'], - postgresql_where=sa.text('deleted_at IS NULL') - ) - - -def downgrade() -> None: - op.drop_table('tasks') - op.execute("DROP TYPE IF EXISTS task_status") -``` - ---- +→ See references/full-schema-examples.md for details ## Row-Level Security (RLS) Policies @@ -417,7 +140,7 @@ async function seed() { // Create org const [org] = await db.insert(organizations).values({ id: createId(), - name: 'Acme Corp', + name: "acme-corp", slug: 'acme', plan: 'growth', }).returning() @@ -426,7 +149,7 @@ async function seed() { const adminUser = await db.insert(users).values({ id: createId(), email: 'admin@acme.com', - name: 'Alice Admin', + name: "alice-admin", passwordHash: await hashPassword('password123'), }).returning().then(r => r[0]) @@ -435,7 +158,7 @@ async function seed() { id: createId(), organizationId: org.id, ownerId: adminUser.id, - name: faker.company.catchPhrase(), + name: "fakercompanycatchphrase" description: faker.lorem.paragraph(), status: 'active' as const, })) diff --git a/docs/skills/engineering/dependency-auditor.md b/docs/skills/engineering/dependency-auditor.md index d8d179b..984ed83 100644 --- a/docs/skills/engineering/dependency-auditor.md +++ b/docs/skills/engineering/dependency-auditor.md @@ -10,6 +10,8 @@ description: "Dependency Auditor - Claude Code skill from the Engineering - POWE --- +# Dependency Auditor + > **Skill Type:** POWERFUL > **Category:** Engineering > **Domain:** Dependency Management & Security diff --git a/docs/skills/engineering/env-secrets-manager.md b/docs/skills/engineering/env-secrets-manager.md index 59597e1..86bbae7 100644 --- a/docs/skills/engineering/env-secrets-manager.md +++ b/docs/skills/engineering/env-secrets-manager.md @@ -10,6 +10,8 @@ description: "Env & Secrets Manager - Claude Code skill from the Engineering - P --- +# Env & Secrets Manager + **Tier:** POWERFUL **Category:** Engineering **Domain:** Security / DevOps / Configuration Management @@ -201,361 +203,7 @@ git add .env.example --- ## Required Variable Validation Script - -```bash -#!/bin/bash -# scripts/validate-env.sh -# Run at app startup or in CI before deploy -# Exit 1 if any required var is missing or empty - -set -euo pipefail - -MISSING=() -WARNINGS=() - -# --- Define required vars by environment --- -ALWAYS_REQUIRED=( - APP_SECRET - APP_URL - DATABASE_URL - AUTH_JWT_SECRET - AUTH_REFRESH_SECRET -) - -PROD_REQUIRED=( - STRIPE_SECRET_KEY - STRIPE_WEBHOOK_SECRET - SENTRY_DSN -) - -# --- Check always-required vars --- -for var in "${ALWAYS_REQUIRED[@]}"; do - if [ -z "${!var:-}" ]; then - MISSING+=("$var") - fi -done - -# --- Check prod-only vars --- -if [ "${APP_ENV:-}" = "production" ] || [ "${NODE_ENV:-}" = "production" ]; then - for var in "${PROD_REQUIRED[@]}"; do - if [ -z "${!var:-}" ]; then - MISSING+=("$var (required in production)") - fi - done -fi - -# --- Validate format/length constraints --- -if [ -n "${AUTH_JWT_SECRET:-}" ] && [ ${#AUTH_JWT_SECRET} -lt 32 ]; then - WARNINGS+=("AUTH_JWT_SECRET is shorter than 32 chars — insecure") -fi - -if [ -n "${DATABASE_URL:-}" ]; then - if ! echo "$DATABASE_URL" | grep -qE "^(postgres|postgresql|mysql|mongodb|redis)://"; then - WARNINGS+=("DATABASE_URL doesn't look like a valid connection string") - fi -fi - -if [ -n "${APP_PORT:-}" ]; then - if ! [[ "$APP_PORT" =~ ^[0-9]+$ ]] || [ "$APP_PORT" -lt 1 ] || [ "$APP_PORT" -gt 65535 ]; then - WARNINGS+=("APP_PORT=$APP_PORT is not a valid port number") - fi -fi - -# --- Report --- -if [ ${#WARNINGS[@]} -gt 0 ]; then - echo "WARNINGS:" - for w in "${WARNINGS[@]}"; do - echo " ⚠️ $w" - done -fi - -if [ ${#MISSING[@]} -gt 0 ]; then - echo "" - echo "FATAL: Missing required environment variables:" - for var in "${MISSING[@]}"; do - echo " ❌ $var" - done - echo "" - echo "Copy .env.example to .env and fill in missing values." - exit 1 -fi - -echo "✅ All required environment variables are set" -``` - -Node.js equivalent: -```typescript -// src/config/validateEnv.ts -const required = [ - 'APP_SECRET', 'APP_URL', 'DATABASE_URL', - 'AUTH_JWT_SECRET', 'AUTH_REFRESH_SECRET', -] - -const missing = required.filter(key => !process.env[key]) - -if (missing.length > 0) { - console.error('FATAL: Missing required environment variables:', missing) - process.exit(1) -} - -if (process.env.AUTH_JWT_SECRET && process.env.AUTH_JWT_SECRET.length < 32) { - console.error('FATAL: AUTH_JWT_SECRET must be at least 32 characters') - process.exit(1) -} - -export const config = { - appSecret: process.env.APP_SECRET!, - appUrl: process.env.APP_URL!, - databaseUrl: process.env.DATABASE_URL!, - jwtSecret: process.env.AUTH_JWT_SECRET!, - refreshSecret: process.env.AUTH_REFRESH_SECRET!, - stripeKey: process.env.STRIPE_SECRET_KEY, // optional - port: parseInt(process.env.APP_PORT ?? '3000', 10), -} as const -``` - ---- - -## Secret Leak Detection - -### Scan Working Tree -```bash -#!/bin/bash -# scripts/scan-secrets.sh -# Scan staged files and working tree for common secret patterns - -FAIL=0 - -check() { - local label="$1" - local pattern="$2" - local matches - - matches=$(git diff --cached -U0 2>/dev/null | grep "^+" | grep -vE "^(\+\+\+|#|\/\/)" | \ - grep -E "$pattern" | grep -v ".env.example" | grep -v "test\|mock\|fixture\|fake" || true) - - if [ -n "$matches" ]; then - echo "SECRET DETECTED [$label]:" - echo "$matches" | head -5 - FAIL=1 - fi -} - -# AWS Access Keys -check "AWS Access Key" "AKIA[0-9A-Z]{16}" -check "AWS Secret Key" "aws_secret_access_key\s*=\s*['\"]?[A-Za-z0-9/+]{40}" - -# Stripe -check "Stripe Live Key" "sk_live_[0-9a-zA-Z]{24,}" -check "Stripe Test Key" "sk_test_[0-9a-zA-Z]{24,}" -check "Stripe Webhook" "whsec_[0-9a-zA-Z]{32,}" - -# JWT / Generic secrets -check "Hardcoded JWT" "eyJ[A-Za-z0-9_-]{20,}\.[A-Za-z0-9_-]{20,}" -check "Generic Secret" "(secret|password|passwd|api_key|apikey|token)\s*[:=]\s*['\"][^'\"]{12,}['\"]" - -# Private keys -check "Private Key Block" "-----BEGIN (RSA |EC |DSA |OPENSSH )?PRIVATE KEY-----" -check "PEM Certificate" "-----BEGIN CERTIFICATE-----" - -# Connection strings with credentials -check "DB Connection" "(postgres|mysql|mongodb)://[^:]+:[^@]+@" -check "Redis Auth" "redis://:[^@]+@\|rediss://:[^@]+@" - -# Google -check "Google API Key" "AIza[0-9A-Za-z_-]{35}" -check "Google OAuth" "[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com" - -# GitHub -check "GitHub Token" "gh[ps]_[A-Za-z0-9]{36,}" -check "GitHub Fine-grained" "github_pat_[A-Za-z0-9_]{82}" - -# Slack -check "Slack Token" "xox[baprs]-[0-9A-Za-z]{10,}" -check "Slack Webhook" "https://hooks\.slack\.com/services/[A-Z0-9]{9,}/[A-Z0-9]{9,}/[A-Za-z0-9]{24,}" - -# Twilio -check "Twilio SID" "AC[a-z0-9]{32}" -check "Twilio Token" "SK[a-z0-9]{32}" - -if [ $FAIL -eq 1 ]; then - echo "" - echo "BLOCKED: Secrets detected in staged changes." - echo "Remove secrets before committing. Use environment variables instead." - echo "If this is a false positive, add it to .secretsignore or use:" - echo " git commit --no-verify (only if you're 100% certain it's safe)" - exit 1 -fi - -echo "No secrets detected in staged changes." -``` - -### Scan Git History (post-incident) -```bash -#!/bin/bash -# scripts/scan-history.sh — scan entire git history for leaked secrets - -PATTERNS=( - "AKIA[0-9A-Z]{16}" - "sk_live_[0-9a-zA-Z]{24}" - "sk_test_[0-9a-zA-Z]{24}" - "-----BEGIN.*PRIVATE KEY-----" - "AIza[0-9A-Za-z_-]{35}" - "ghp_[A-Za-z0-9]{36}" - "xox[baprs]-[0-9A-Za-z]{10,}" -) - -for pattern in "${PATTERNS[@]}"; do - echo "Scanning for: $pattern" - git log --all -p --no-color 2>/dev/null | \ - grep -n "$pattern" | \ - grep "^+" | \ - grep -v "^+++" | \ - head -10 -done - -# Alternative: use truffleHog or gitleaks for comprehensive scanning -# gitleaks detect --source . --log-opts="--all" -# trufflehog git file://. --only-verified -``` - ---- - -## Pre-commit Hook Installation - -```bash -#!/bin/bash -# Install the pre-commit hook -HOOK_PATH=".git/hooks/pre-commit" - -cat > "$HOOK_PATH" << 'HOOK' -#!/bin/bash -# Pre-commit: scan for secrets before every commit - -SCRIPT="scripts/scan-secrets.sh" - -if [ -f "$SCRIPT" ]; then - bash "$SCRIPT" -else - # Inline fallback if script not present - if git diff --cached -U0 | grep "^+" | grep -qE "AKIA[0-9A-Z]{16}|sk_live_|-----BEGIN.*PRIVATE KEY"; then - echo "BLOCKED: Possible secret detected in staged changes." - exit 1 - fi -fi -HOOK - -chmod +x "$HOOK_PATH" -echo "Pre-commit hook installed at $HOOK_PATH" -``` - -Using `pre-commit` framework (recommended for teams): -```yaml -# .pre-commit-config.yaml -repos: - - repo: https://github.com/gitleaks/gitleaks - rev: v8.18.0 - hooks: - - id: gitleaks - - - repo: local - hooks: - - id: validate-env-example - name: Check .env.example is up to date - language: script - entry: bash scripts/check-env-example.sh - pass_filenames: false -``` - ---- - -## Credential Rotation Workflow - -When a secret is leaked or compromised: - -### Step 1 — Detect & Confirm -```bash -# Confirm which secret was exposed -git log --all -p --no-color | grep -A2 -B2 "AKIA\|sk_live_\|SECRET" - -# Check if secret is in any open PRs -gh pr list --state open | while read pr; do - gh pr diff $(echo $pr | awk '{print $1}') | grep -E "AKIA|sk_live_" && echo "Found in PR: $pr" -done -``` - -### Step 2 — Identify Exposure Window -```bash -# Find first commit that introduced the secret -git log --all -p --no-color -- "*.env" "*.json" "*.yaml" "*.ts" "*.py" | \ - grep -B 10 "THE_LEAKED_VALUE" | grep "^commit" | tail -1 - -# Get commit date -git show --format="%ci" COMMIT_HASH | head -1 - -# Check if secret appears in public repos (GitHub) -gh api search/code -X GET -f q="THE_LEAKED_VALUE" | jq '.total_count, .items[].html_url' -``` - -### Step 3 — Rotate Credential -Per service — rotate immediately: -- **AWS**: IAM console → delete access key → create new → update everywhere -- **Stripe**: Dashboard → Developers → API keys → Roll key -- **GitHub PAT**: Settings → Developer Settings → Personal access tokens → Revoke → Create new -- **DB password**: `ALTER USER app_user PASSWORD 'new-strong-password-here';` -- **JWT secret**: Rotate key (all existing sessions invalidated — users re-login) - -### Step 4 — Update All Environments -```bash -# Update secret manager (source of truth) -# Then redeploy to pull new values - -# Vault KV v2 -vault kv put secret/myapp/prod \ - STRIPE_SECRET_KEY="sk_live_NEW..." \ - APP_SECRET="new-secret-here" - -# AWS SSM -aws ssm put-parameter \ - --name "/myapp/prod/STRIPE_SECRET_KEY" \ - --value "sk_live_NEW..." \ - --type "SecureString" \ - --overwrite - -# 1Password -op item edit "MyApp Prod" \ - --field "STRIPE_SECRET_KEY[password]=sk_live_NEW..." - -# Doppler -doppler secrets set STRIPE_SECRET_KEY="sk_live_NEW..." --project myapp --config prod -``` - -### Step 5 — Remove from Git History -```bash -# WARNING: rewrites history — coordinate with team first -git filter-repo --path-glob "*.env" --invert-paths - -# Or remove specific string from all commits -git filter-repo --replace-text <(echo "LEAKED_VALUE==>REDACTED") - -# Force push all branches (requires team coordination + force push permissions) -git push origin --force --all - -# Notify all developers to re-clone -``` - -### Step 6 — Verify -```bash -# Confirm secret no longer in history -git log --all -p | grep "LEAKED_VALUE" | wc -l # should be 0 - -# Test new credentials work -curl -H "Authorization: Bearer $NEW_TOKEN" https://api.service.com/test - -# Monitor for unauthorized usage of old credential (check service audit logs) -``` - ---- +→ See references/validation-detection-rotation.md for details ## Secret Manager Integrations diff --git a/docs/skills/engineering/git-worktree-manager.md b/docs/skills/engineering/git-worktree-manager.md index 37152be..1939af1 100644 --- a/docs/skills/engineering/git-worktree-manager.md +++ b/docs/skills/engineering/git-worktree-manager.md @@ -10,6 +10,8 @@ description: "Git Worktree Manager - Claude Code skill from the Engineering - PO --- +# Git Worktree Manager + **Tier:** POWERFUL **Category:** Engineering **Domain:** Parallel Development & Branch Isolation diff --git a/docs/skills/engineering/mcp-server-builder.md b/docs/skills/engineering/mcp-server-builder.md index b8a3ac1..107c57c 100644 --- a/docs/skills/engineering/mcp-server-builder.md +++ b/docs/skills/engineering/mcp-server-builder.md @@ -10,6 +10,8 @@ description: "MCP Server Builder - Claude Code skill from the Engineering - POWE --- +# MCP Server Builder + **Tier:** POWERFUL **Category:** Engineering **Domain:** AI / API Integration diff --git a/docs/skills/engineering/migration-architect.md b/docs/skills/engineering/migration-architect.md index 3594082..2a99f88 100644 --- a/docs/skills/engineering/migration-architect.md +++ b/docs/skills/engineering/migration-architect.md @@ -10,6 +10,8 @@ description: "Migration Architect - Claude Code skill from the Engineering - POW --- +# Migration Architect + **Tier:** POWERFUL **Category:** Engineering - Migration Strategy **Purpose:** Zero-downtime migration planning, compatibility validation, and rollback strategy generation diff --git a/docs/skills/engineering/monorepo-navigator.md b/docs/skills/engineering/monorepo-navigator.md index 1a09482..518e288 100644 --- a/docs/skills/engineering/monorepo-navigator.md +++ b/docs/skills/engineering/monorepo-navigator.md @@ -10,6 +10,8 @@ description: "Monorepo Navigator - Claude Code skill from the Engineering - POWE --- +# Monorepo Navigator + **Tier:** POWERFUL **Category:** Engineering **Domain:** Monorepo Architecture / Build Systems @@ -65,519 +67,7 @@ Most modern setups: **pnpm workspaces + Turborepo + Changesets** --- ## Turborepo - -### turbo.json pipeline config - -```json -{ - "$schema": "https://turbo.build/schema.json", - "globalEnv": ["NODE_ENV", "DATABASE_URL"], - "pipeline": { - "build": { - "dependsOn": ["^build"], // build deps first (topological order) - "outputs": [".next/**", "dist/**", "build/**"], - "env": ["NEXT_PUBLIC_API_URL"] - }, - "test": { - "dependsOn": ["^build"], // need built deps to test - "outputs": ["coverage/**"], - "cache": true - }, - "lint": { - "outputs": [], - "cache": true - }, - "dev": { - "cache": false, // never cache dev servers - "persistent": true // long-running process - }, - "type-check": { - "dependsOn": ["^build"], - "outputs": [] - } - } -} -``` - -### Key commands - -```bash -# Build everything (respects dependency order) -turbo run build - -# Build only affected packages (requires --filter) -turbo run build --filter=...[HEAD^1] # changed since last commit -turbo run build --filter=...[main] # changed vs main branch - -# Test only affected -turbo run test --filter=...[HEAD^1] - -# Run for a specific app and all its dependencies -turbo run build --filter=@myorg/web... - -# Run for a specific package only (no dependencies) -turbo run build --filter=@myorg/ui - -# Dry-run — see what would run without executing -turbo run build --dry-run - -# Enable remote caching (Vercel Remote Cache) -turbo login -turbo link -``` - -### Remote caching setup - -```bash -# .turbo/config.json (auto-created by turbo link) -{ - "teamid": "team_xxxx", - "apiurl": "https://vercel.com" -} - -# Self-hosted cache server (open-source alternative) -# Run ducktape/turborepo-remote-cache or Turborepo's official server -TURBO_API=http://your-cache-server.internal \ -TURBO_TOKEN=your-token \ -TURBO_TEAM=your-team \ -turbo run build -``` - ---- - -## Nx - -### Project graph and affected commands - -```bash -# Install -npx create-nx-workspace@latest my-monorepo - -# Visualize the project graph (opens browser) -nx graph - -# Show affected packages for the current branch -nx affected:graph - -# Run only affected tests -nx affected --target=test - -# Run only affected builds -nx affected --target=build - -# Run affected with base/head (for CI) -nx affected --target=test --base=main --head=HEAD -``` - -### nx.json configuration - -```json -{ - "$schema": "./node_modules/nx/schemas/nx-schema.json", - "targetDefaults": { - "build": { - "dependsOn": ["^build"], - "cache": true - }, - "test": { - "cache": true, - "inputs": ["default", "^production"] - } - }, - "namedInputs": { - "default": ["{projectRoot}/**/*", "sharedGlobals"], - "production": ["default", "!{projectRoot}/**/*.spec.ts", "!{projectRoot}/jest.config.*"], - "sharedGlobals": [] - }, - "parallel": 4, - "cacheDirectory": "/tmp/nx-cache" -} -``` - ---- - -## pnpm Workspaces - -### pnpm-workspace.yaml - -```yaml -packages: - - 'apps/*' - - 'packages/*' - - 'tools/*' -``` - -### workspace:* protocol for local packages - -```json -// apps/web/package.json -{ - "name": "@myorg/web", - "dependencies": { - "@myorg/ui": "workspace:*", // always use local version - "@myorg/utils": "workspace:^", // local, but respect semver on publish - "@myorg/types": "workspace:~" - } -} -``` - -### Useful pnpm workspace commands - -```bash -# Install all packages across workspace -pnpm install - -# Run script in a specific package -pnpm --filter @myorg/web dev - -# Run script in all packages -pnpm --filter "*" build - -# Run script in a package and all its dependencies -pnpm --filter @myorg/web... build - -# Add a dependency to a specific package -pnpm --filter @myorg/web add react - -# Add a shared dev dependency to root -pnpm add -D typescript -w - -# List workspace packages -pnpm ls --depth -1 -r -``` - ---- - -## Cross-Package Impact Analysis - -When a shared package changes, determine what's affected before you ship. - -```bash -# Using Turborepo — show affected packages -turbo run build --filter=...[HEAD^1] --dry-run 2>&1 | grep "Tasks to run" - -# Using Nx -nx affected:apps --base=main --head=HEAD # which apps are affected -nx affected:libs --base=main --head=HEAD # which libs are affected - -# Manual analysis with pnpm -# Find all packages that depend on @myorg/utils: -grep -r '"@myorg/utils"' packages/*/package.json apps/*/package.json - -# Using jq for structured output -for pkg in packages/*/package.json apps/*/package.json; do - name=$(jq -r '.name' "$pkg") - if jq -e '.dependencies["@myorg/utils"] // .devDependencies["@myorg/utils"]' "$pkg" > /dev/null 2>&1; then - echo "$name depends on @myorg/utils" - fi -done -``` - ---- - -## Dependency Graph Visualization - -Generate a Mermaid diagram from your workspace: - -```bash -# Generate dependency graph as Mermaid -cat > scripts/gen-dep-graph.js << 'EOF' -const { execSync } = require('child_process'); -const fs = require('fs'); - -// Parse pnpm workspace packages -const packages = JSON.parse( - execSync('pnpm ls --depth -1 -r --json').toString() -); - -let mermaid = 'graph TD\n'; -packages.forEach(pkg => { - const deps = Object.keys(pkg.dependencies || {}) - .filter(d => d.startsWith('@myorg/')); - deps.forEach(dep => { - const from = pkg.name.replace('@myorg/', ''); - const to = dep.replace('@myorg/', ''); - mermaid += ` ${from} --> ${to}\n`; - }); -}); - -fs.writeFileSync('docs/dep-graph.md', '```mermaid\n' + mermaid + '```\n'); -console.log('Written to docs/dep-graph.md'); -EOF -node scripts/gen-dep-graph.js -``` - -**Example output:** - -```mermaid -graph TD - web --> ui - web --> utils - web --> types - mobile --> ui - mobile --> utils - mobile --> types - admin --> ui - admin --> utils - api --> types - ui --> utils -``` - ---- - -## Claude Code Configuration (Workspace-Aware CLAUDE.md) - -Place a root CLAUDE.md + per-package CLAUDE.md files: - -```markdown -# /CLAUDE.md — Root (applies to all packages) - -## Monorepo Structure -- apps/web — Next.js customer-facing app -- apps/admin — Next.js internal admin -- apps/api — Express REST API -- packages/ui — Shared React component library -- packages/utils — Shared utilities (pure functions only) -- packages/types — Shared TypeScript types (no runtime code) - -## Build System -- pnpm workspaces + Turborepo -- Always use `pnpm --filter ` to scope commands -- Never run `npm install` or `yarn` — pnpm only -- Run `turbo run build --filter=...[HEAD^1]` before committing - -## Task Scoping Rules -- When modifying packages/ui: also run tests for apps/web and apps/admin (they depend on it) -- When modifying packages/types: run type-check across ALL packages -- When modifying apps/api: only need to test apps/api - -## Package Manager -pnpm — version pinned in packageManager field of root package.json -``` - -```markdown -# /packages/ui/CLAUDE.md — Package-specific - -## This Package -Shared React component library. Zero business logic. Pure UI only. - -## Rules -- All components must be exported from src/index.ts -- No direct API calls in components — accept data via props -- Every component needs a Storybook story in src/stories/ -- Use Tailwind for styling — no CSS modules or styled-components - -## Testing -- Component tests: `pnpm --filter @myorg/ui test` -- Visual regression: `pnpm --filter @myorg/ui test:storybook` - -## Publishing -- Version bumps via changesets only — never edit package.json version manually -- Run `pnpm changeset` from repo root after changes -``` - ---- - -## Migration: Multi-Repo → Monorepo - -```bash -# Step 1: Create monorepo scaffold -mkdir my-monorepo && cd my-monorepo -pnpm init -echo "packages:\n - 'apps/*'\n - 'packages/*'" > pnpm-workspace.yaml - -# Step 2: Move repos with git history preserved -mkdir -p apps packages - -# For each existing repo: -git clone https://github.com/myorg/web-app -cd web-app -git filter-repo --to-subdirectory-filter apps/web # rewrites history into subdir -cd .. -git remote add web-app ./web-app -git fetch web-app --tags -git merge web-app/main --allow-unrelated-histories - -# Step 3: Update package names to scoped -# In each package.json, change "name": "web" to "name": "@myorg/web" - -# Step 4: Replace cross-repo npm deps with workspace:* -# apps/web/package.json: "@myorg/ui": "1.2.3" → "@myorg/ui": "workspace:*" - -# Step 5: Add shared configs to root -cp apps/web/.eslintrc.js .eslintrc.base.js -# Update each package's config to extend root: -# { "extends": ["../../.eslintrc.base.js"] } - -# Step 6: Add Turborepo -pnpm add -D turbo -w -# Create turbo.json (see above) - -# Step 7: Unified CI (see CI section below) -# Step 8: Test everything -turbo run build test lint -``` - ---- - -## CI Patterns - -### GitHub Actions — Affected Only - -```yaml -# .github/workflows/ci.yml -name: CI - -on: - push: - branches: [main] - pull_request: - -jobs: - affected: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 # full history needed for affected detection - - - uses: pnpm/action-setup@v3 - with: - version: 9 - - - uses: actions/setup-node@v4 - with: - node-version: 20 - cache: pnpm - - - run: pnpm install --frozen-lockfile - - # Turborepo remote cache - - uses: actions/cache@v4 - with: - path: .turbo - key: ${{ runner.os }}-turbo-${{ github.sha }} - restore-keys: ${{ runner.os }}-turbo- - - # Only test/build affected packages - - name: Build affected - run: turbo run build --filter=...[origin/main] - env: - TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }} - TURBO_TEAM: ${{ vars.TURBO_TEAM }} - - - name: Test affected - run: turbo run test --filter=...[origin/main] - - - name: Lint affected - run: turbo run lint --filter=...[origin/main] -``` - -### GitLab CI — Parallel Stages - -```yaml -# .gitlab-ci.yml -stages: [install, build, test, publish] - -variables: - PNPM_CACHE_FOLDER: .pnpm-store - -cache: - key: pnpm-$CI_COMMIT_REF_SLUG - paths: [.pnpm-store/, .turbo/] - -install: - stage: install - script: - - pnpm install --frozen-lockfile - artifacts: - paths: [node_modules/, packages/*/node_modules/, apps/*/node_modules/] - expire_in: 1h - -build:affected: - stage: build - needs: [install] - script: - - turbo run build --filter=...[origin/main] - artifacts: - paths: [apps/*/dist/, apps/*/.next/, packages/*/dist/] - -test:affected: - stage: test - needs: [build:affected] - script: - - turbo run test --filter=...[origin/main] - coverage: '/Statements\s*:\s*(\d+\.?\d*)%/' - artifacts: - reports: - coverage_report: - coverage_format: cobertura - path: "**/coverage/cobertura-coverage.xml" -``` - ---- - -## Publishing with Changesets - -```bash -# Install changesets -pnpm add -D @changesets/cli -w -pnpm changeset init - -# After making changes, create a changeset -pnpm changeset -# Interactive: select packages, choose semver bump, write changelog entry - -# In CI — version packages + update changelogs -pnpm changeset version - -# Publish all changed packages -pnpm changeset publish - -# Pre-release channel (for alpha/beta) -pnpm changeset pre enter beta -pnpm changeset -pnpm changeset version # produces 1.2.0-beta.0 -pnpm changeset publish --tag beta -pnpm changeset pre exit # back to stable releases -``` - -### Automated publish workflow (GitHub Actions) - -```yaml -# .github/workflows/release.yml -name: Release - -on: - push: - branches: [main] - -jobs: - release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: pnpm/action-setup@v3 - - uses: actions/setup-node@v4 - with: - node-version: 20 - registry-url: https://registry.npmjs.org - - - run: pnpm install --frozen-lockfile - - - name: Create Release PR or Publish - uses: changesets/action@v1 - with: - publish: pnpm changeset publish - version: pnpm changeset version - commit: "chore: release packages" - title: "chore: release packages" - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} -``` - ---- +→ See references/monorepo-tooling-reference.md for details ## Common Pitfalls diff --git a/docs/skills/engineering/observability-designer.md b/docs/skills/engineering/observability-designer.md index d47c109..dd0c147 100644 --- a/docs/skills/engineering/observability-designer.md +++ b/docs/skills/engineering/observability-designer.md @@ -10,6 +10,8 @@ description: "Observability Designer (POWERFUL) - Claude Code skill from the Eng --- +# Observability Designer (POWERFUL) + **Category:** Engineering **Tier:** POWERFUL **Description:** Design comprehensive observability strategies for production systems including SLI/SLO frameworks, alerting optimization, and dashboard generation. diff --git a/docs/skills/engineering/performance-profiler.md b/docs/skills/engineering/performance-profiler.md index 74ffc8e..bfee822 100644 --- a/docs/skills/engineering/performance-profiler.md +++ b/docs/skills/engineering/performance-profiler.md @@ -10,6 +10,8 @@ description: "Performance Profiler - Claude Code skill from the Engineering - PO --- +# Performance Profiler + **Tier:** POWERFUL **Category:** Engineering **Domain:** Performance Engineering @@ -55,478 +57,7 @@ Systematic performance profiling for Node.js, Python, and Go applications. Ident --- ## Node.js Profiling - -### CPU Flamegraph - -```bash -# Method 1: clinic.js (best for development) -npm install -g clinic - -# CPU flamegraph -clinic flame -- node dist/server.js - -# Heap profiler -clinic heapprofiler -- node dist/server.js - -# Bubble chart (event loop blocking) -clinic bubbles -- node dist/server.js - -# Load with autocannon while profiling -autocannon -c 50 -d 30 http://localhost:3000/api/tasks & -clinic flame -- node dist/server.js -``` - -```bash -# Method 2: Node.js built-in profiler -node --prof dist/server.js -# After running some load: -node --prof-process isolate-*.log | head -100 -``` - -```bash -# Method 3: V8 CPU profiler via inspector -node --inspect dist/server.js -# Open Chrome DevTools → Performance → Record -``` - -### Heap Snapshot / Memory Leak Detection - -```javascript -// Add to your server for on-demand heap snapshots -import v8 from 'v8' -import fs from 'fs' - -// Endpoint: POST /debug/heap-snapshot (protect with auth!) -app.post('/debug/heap-snapshot', (req, res) => { - const filename = `heap-${Date.now()}.heapsnapshot` - const snapshot = v8.writeHeapSnapshot(filename) - res.json({ snapshot }) -}) -``` - -```bash -# Take snapshots over time and compare in Chrome DevTools -curl -X POST http://localhost:3000/debug/heap-snapshot -# Wait 5 minutes of load -curl -X POST http://localhost:3000/debug/heap-snapshot -# Open both snapshots in Chrome → Memory → Compare -``` - -### Detect Event Loop Blocking - -```javascript -// Add blocked-at to detect synchronous blocking -import blocked from 'blocked-at' - -blocked((time, stack) => { - console.warn(`Event loop blocked for ${time}ms`) - console.warn(stack.join('\n')) -}, { threshold: 100 }) // Alert if blocked > 100ms -``` - -### Node.js Memory Profiling Script - -```javascript -// scripts/memory-profile.mjs -// Run: node --experimental-vm-modules scripts/memory-profile.mjs - -import { createRequire } from 'module' -const require = createRequire(import.meta.url) - -function formatBytes(bytes) { - return (bytes / 1024 / 1024).toFixed(2) + ' MB' -} - -function measureMemory(label) { - const mem = process.memoryUsage() - console.log(`\n[${label}]`) - console.log(` RSS: ${formatBytes(mem.rss)}`) - console.log(` Heap Used: ${formatBytes(mem.heapUsed)}`) - console.log(` Heap Total:${formatBytes(mem.heapTotal)}`) - console.log(` External: ${formatBytes(mem.external)}`) - return mem -} - -const baseline = measureMemory('Baseline') - -// Simulate your operation -for (let i = 0; i < 1000; i++) { - // Replace with your actual operation - const result = await someOperation() -} - -const after = measureMemory('After 1000 operations') - -console.log(`\n[Delta]`) -console.log(` Heap Used: +${formatBytes(after.heapUsed - baseline.heapUsed)}`) - -// If heap keeps growing across GC cycles, you have a leak -global.gc?.() // Run with --expose-gc flag -const afterGC = measureMemory('After GC') -if (afterGC.heapUsed > baseline.heapUsed * 1.1) { - console.warn('⚠️ Possible memory leak detected (>10% growth after GC)') -} -``` - ---- - -## Python Profiling - -### CPU Profiling with py-spy - -```bash -# Install -pip install py-spy - -# Profile a running process (no code changes needed) -py-spy top --pid $(pgrep -f "uvicorn") - -# Generate flamegraph SVG -py-spy record -o flamegraph.svg --pid $(pgrep -f "uvicorn") --duration 30 - -# Profile from the start -py-spy record -o flamegraph.svg -- python -m uvicorn app.main:app - -# Open flamegraph.svg in browser — look for wide bars = hot code paths -``` - -### cProfile for function-level profiling - -```python -# scripts/profile_endpoint.py -import cProfile -import pstats -import io -from app.services.task_service import TaskService - -def run(): - service = TaskService() - for _ in range(100): - service.list_tasks(user_id="user_1", page=1, limit=20) - -profiler = cProfile.Profile() -profiler.enable() -run() -profiler.disable() - -# Print top 20 functions by cumulative time -stream = io.StringIO() -stats = pstats.Stats(profiler, stream=stream) -stats.sort_stats('cumulative') -stats.print_stats(20) -print(stream.getvalue()) -``` - -### Memory profiling with memory_profiler - -```python -# pip install memory-profiler -from memory_profiler import profile - -@profile -def my_function(): - # Function to profile - data = load_large_dataset() - result = process(data) - return result -``` - -```bash -# Run with line-by-line memory tracking -python -m memory_profiler scripts/profile_function.py - -# Output: -# Line # Mem usage Increment Line Contents -# ================================================ -# 10 45.3 MiB 45.3 MiB def my_function(): -# 11 78.1 MiB 32.8 MiB data = load_large_dataset() -# 12 156.2 MiB 78.1 MiB result = process(data) -``` - ---- - -## Go Profiling with pprof - -```go -// main.go — add pprof endpoints -import _ "net/http/pprof" -import "net/http" - -func main() { - // pprof endpoints at /debug/pprof/ - go func() { - log.Println(http.ListenAndServe(":6060", nil)) - }() - // ... rest of your app -} -``` - -```bash -# CPU profile (30s) -go tool pprof -http=:8080 http://localhost:6060/debug/pprof/profile?seconds=30 - -# Memory profile -go tool pprof -http=:8080 http://localhost:6060/debug/pprof/heap - -# Goroutine leak detection -curl http://localhost:6060/debug/pprof/goroutine?debug=1 - -# In pprof UI: "Flame Graph" view → find the tallest bars -``` - ---- - -## Bundle Size Analysis - -### Next.js Bundle Analyzer - -```bash -# Install -pnpm add -D @next/bundle-analyzer - -# next.config.js -const withBundleAnalyzer = require('@next/bundle-analyzer')({ - enabled: process.env.ANALYZE === 'true', -}) -module.exports = withBundleAnalyzer({}) - -# Run analyzer -ANALYZE=true pnpm build -# Opens browser with treemap of bundle -``` - -### What to look for - -```bash -# Find the largest chunks -pnpm build 2>&1 | grep -E "^\s+(λ|○|●)" | sort -k4 -rh | head -20 - -# Check if a specific package is too large -# Visit: https://bundlephobia.com/package/moment@2.29.4 -# moment: 67.9kB gzipped → replace with date-fns (13.8kB) or dayjs (6.9kB) - -# Find duplicate packages -pnpm dedupe --check - -# Visualize what's in a chunk -npx source-map-explorer .next/static/chunks/*.js -``` - -### Common bundle wins - -```typescript -// Before: import entire lodash -import _ from 'lodash' // 71kB - -// After: import only what you need -import debounce from 'lodash/debounce' // 2kB - -// Before: moment.js -import moment from 'moment' // 67kB - -// After: dayjs -import dayjs from 'dayjs' // 7kB - -// Before: static import (always in bundle) -import HeavyChart from '@/components/HeavyChart' - -// After: dynamic import (loaded on demand) -const HeavyChart = dynamic(() => import('@/components/HeavyChart'), { - loading: () => , -}) -``` - ---- - -## Database Query Optimization - -### Find slow queries - -```sql --- PostgreSQL: enable pg_stat_statements -CREATE EXTENSION IF NOT EXISTS pg_stat_statements; - --- Top 20 slowest queries -SELECT - round(mean_exec_time::numeric, 2) AS mean_ms, - calls, - round(total_exec_time::numeric, 2) AS total_ms, - round(stddev_exec_time::numeric, 2) AS stddev_ms, - left(query, 80) AS query -FROM pg_stat_statements -WHERE calls > 10 -ORDER BY mean_exec_time DESC -LIMIT 20; - --- Reset stats -SELECT pg_stat_statements_reset(); -``` - -```bash -# MySQL slow query log -mysql -e "SET GLOBAL slow_query_log = 'ON'; SET GLOBAL long_query_time = 0.1;" -tail -f /var/log/mysql/slow-query.log -``` - -### EXPLAIN ANALYZE - -```sql --- Always use EXPLAIN (ANALYZE, BUFFERS) for real timing -EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT) -SELECT t.*, u.name as assignee_name -FROM tasks t -LEFT JOIN users u ON u.id = t.assignee_id -WHERE t.project_id = 'proj_123' - AND t.deleted_at IS NULL -ORDER BY t.created_at DESC -LIMIT 20; - --- Look for: --- Seq Scan on large table → needs index --- Nested Loop with high rows → N+1, consider JOIN or batch --- Sort → can index handle the sort? --- Hash Join → fine for moderate sizes -``` - -### Detect N+1 Queries - -```typescript -// Add query logging in dev -import { db } from './client' - -// Drizzle: enable logging -const db = drizzle(pool, { logger: true }) - -// Or use a query counter middleware -let queryCount = 0 -db.$on('query', () => queryCount++) - -// In tests: -queryCount = 0 -const tasks = await getTasksWithAssignees(projectId) -expect(queryCount).toBe(1) // Fail if it's 21 (1 + 20 N+1s) -``` - -```python -# Django: detect N+1 with django-silk or nplusone -from nplusone.ext.django.middleware import NPlusOneMiddleware -MIDDLEWARE = ['nplusone.ext.django.middleware.NPlusOneMiddleware'] -NPLUSONE_RAISE = True # Raise exception on N+1 in tests -``` - -### Fix N+1 — Before/After - -```typescript -// Before: N+1 (1 query for tasks + N queries for assignees) -const tasks = await db.select().from(tasksTable) -for (const task of tasks) { - task.assignee = await db.select().from(usersTable) - .where(eq(usersTable.id, task.assigneeId)) - .then(r => r[0]) -} - -// After: 1 query with JOIN -const tasks = await db - .select({ - id: tasksTable.id, - title: tasksTable.title, - assigneeName: usersTable.name, - assigneeEmail: usersTable.email, - }) - .from(tasksTable) - .leftJoin(usersTable, eq(usersTable.id, tasksTable.assigneeId)) - .where(eq(tasksTable.projectId, projectId)) -``` - ---- - -## Load Testing with k6 - -```javascript -// tests/load/api-load-test.js -import http from 'k6/http' -import { check, sleep } from 'k6' -import { Rate, Trend } from 'k6/metrics' - -const errorRate = new Rate('errors') -const taskListDuration = new Trend('task_list_duration') - -export const options = { - stages: [ - { duration: '30s', target: 10 }, // Ramp up to 10 VUs - { duration: '1m', target: 50 }, // Ramp to 50 VUs - { duration: '2m', target: 50 }, // Sustain 50 VUs - { duration: '30s', target: 100 }, // Spike to 100 VUs - { duration: '1m', target: 50 }, // Back to 50 - { duration: '30s', target: 0 }, // Ramp down - ], - thresholds: { - http_req_duration: ['p(95)<500'], // 95% of requests < 500ms - http_req_duration: ['p(99)<1000'], // 99% < 1s - errors: ['rate<0.01'], // Error rate < 1% - task_list_duration: ['p(95)<200'], // Task list specifically < 200ms - }, -} - -const BASE_URL = __ENV.BASE_URL || 'http://localhost:3000' - -export function setup() { - // Get auth token once - const loginRes = http.post(`${BASE_URL}/api/auth/login`, JSON.stringify({ - email: 'loadtest@example.com', - password: 'loadtest123', - }), { headers: { 'Content-Type': 'application/json' } }) - - return { token: loginRes.json('token') } -} - -export default function(data) { - const headers = { - 'Authorization': `Bearer ${data.token}`, - 'Content-Type': 'application/json', - } - - // Scenario 1: List tasks - const start = Date.now() - const listRes = http.get(`${BASE_URL}/api/tasks?limit=20`, { headers }) - taskListDuration.add(Date.now() - start) - - check(listRes, { - 'list tasks: status 200': (r) => r.status === 200, - 'list tasks: has items': (r) => r.json('items') !== undefined, - }) || errorRate.add(1) - - sleep(0.5) - - // Scenario 2: Create task - const createRes = http.post( - `${BASE_URL}/api/tasks`, - JSON.stringify({ title: `Load test task ${Date.now()}`, priority: 'medium' }), - { headers } - ) - - check(createRes, { - 'create task: status 201': (r) => r.status === 201, - }) || errorRate.add(1) - - sleep(1) -} - -export function teardown(data) { - // Cleanup: delete load test tasks -} -``` - -```bash -# Run load test -k6 run tests/load/api-load-test.js \ - --env BASE_URL=https://staging.myapp.com - -# With Grafana output -k6 run --out influxdb=http://localhost:8086/k6 tests/load/api-load-test.js -``` - ---- +→ See references/profiling-recipes.md for details ## Before/After Measurement Template diff --git a/docs/skills/engineering/pr-review-expert.md b/docs/skills/engineering/pr-review-expert.md index bb3c05d..4382339 100644 --- a/docs/skills/engineering/pr-review-expert.md +++ b/docs/skills/engineering/pr-review-expert.md @@ -10,6 +10,8 @@ description: "PR Review Expert - Claude Code skill from the Engineering - POWERF --- +# PR Review Expert + **Tier:** POWERFUL **Category:** Engineering **Domain:** Code Review / Quality Assurance diff --git a/docs/skills/engineering/rag-architect.md b/docs/skills/engineering/rag-architect.md index 00f22e3..380204c 100644 --- a/docs/skills/engineering/rag-architect.md +++ b/docs/skills/engineering/rag-architect.md @@ -10,6 +10,8 @@ description: "RAG Architect - POWERFUL - Claude Code skill from the Engineering --- +# RAG Architect - POWERFUL + ## Overview The RAG (Retrieval-Augmented Generation) Architect skill provides comprehensive tools and knowledge for designing, implementing, and optimizing production-grade RAG pipelines. This skill covers the entire RAG ecosystem from document chunking strategies to evaluation frameworks, enabling you to build scalable, efficient, and accurate retrieval systems. diff --git a/docs/skills/engineering/release-manager.md b/docs/skills/engineering/release-manager.md index dc31520..6925130 100644 --- a/docs/skills/engineering/release-manager.md +++ b/docs/skills/engineering/release-manager.md @@ -10,6 +10,8 @@ description: "Release Manager - Claude Code skill from the Engineering - POWERFU --- +# Release Manager + **Tier:** POWERFUL **Category:** Engineering **Domain:** Software Release Management & DevOps diff --git a/docs/skills/engineering/runbook-generator.md b/docs/skills/engineering/runbook-generator.md index ecd51f7..6ee90fc 100644 --- a/docs/skills/engineering/runbook-generator.md +++ b/docs/skills/engineering/runbook-generator.md @@ -10,6 +10,8 @@ description: "Runbook Generator - Claude Code skill from the Engineering - POWER --- +# Runbook Generator + **Tier:** POWERFUL **Category:** Engineering **Domain:** DevOps / Site Reliability Engineering diff --git a/docs/skills/engineering/skill-security-auditor.md b/docs/skills/engineering/skill-security-auditor.md index c3a3330..f21e580 100644 --- a/docs/skills/engineering/skill-security-auditor.md +++ b/docs/skills/engineering/skill-security-auditor.md @@ -139,7 +139,7 @@ python3 scripts/skill_security_auditor.py https://github.com/user/skill-repo --s ```yaml # GitHub Actions step -- name: Audit Skill Security +- name: "audit-skill-security" run: | python3 skill-security-auditor/scripts/skill_security_auditor.py ./skills/new-skill/ --strict --json > audit.json if [ $? -ne 0 ]; then echo "Security audit failed"; exit 1; fi diff --git a/docs/skills/engineering/skill-tester.md b/docs/skills/engineering/skill-tester.md index f213bca..a1f02af 100644 --- a/docs/skills/engineering/skill-tester.md +++ b/docs/skills/engineering/skill-tester.md @@ -10,6 +10,8 @@ description: "Skill Tester - Claude Code skill from the Engineering - POWERFUL d --- +# Skill Tester + --- **Name**: skill-tester @@ -171,7 +173,7 @@ quality_scorer.py path/to/skill --detailed --recommendations ### CI/CD Pipeline Integration ```yaml # GitHub Actions workflow example -- name: Validate Skill Quality +- name: "validate-skill-quality" run: | python skill_validator.py engineering/${{ matrix.skill }} --json | tee validation.json python script_tester.py engineering/${{ matrix.skill }} | tee testing.json @@ -281,7 +283,7 @@ echo "Validation passed. Proceeding with commit." ### GitHub Actions Workflow ```yaml -name: Skill Quality Gate +name: "skill-quality-gate" on: pull_request: paths: ['engineering/**'] @@ -291,11 +293,11 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Setup Python + - name: "setup-python" uses: actions/setup-python@v4 with: python-version: '3.11' - - name: Validate Changed Skills + - name: "validate-changed-skills" run: | changed_skills=$(git diff --name-only ${{ github.event.before }} | grep -E '^engineering/[^/]+/' | cut -d'/' -f1-2 | sort -u) for skill in $changed_skills; do diff --git a/docs/skills/engineering/tech-debt-tracker.md b/docs/skills/engineering/tech-debt-tracker.md index 1cc92a7..697e52d 100644 --- a/docs/skills/engineering/tech-debt-tracker.md +++ b/docs/skills/engineering/tech-debt-tracker.md @@ -10,6 +10,8 @@ description: "Tech Debt Tracker - Claude Code skill from the Engineering - POWER --- +# Tech Debt Tracker + **Tier**: POWERFUL 🔥 **Category**: Engineering Process Automation **Expertise**: Code Quality, Technical Debt Management, Software Engineering @@ -31,489 +33,7 @@ This skill offers three interconnected tools that form a complete tech debt mana Together, these tools enable engineering teams to make data-driven decisions about tech debt, balancing new feature development with maintenance work. ## Technical Debt Classification Framework - -### 1. Code Debt -Code-level issues that make the codebase harder to understand, modify, and maintain. - -**Indicators:** -- Long functions (>50 lines for complex logic, >20 for simple operations) -- Deep nesting (>4 levels of indentation) -- High cyclomatic complexity (>10) -- Duplicate code patterns (>3 similar blocks) -- Missing or inadequate error handling -- Poor variable/function naming -- Magic numbers and hardcoded values -- Commented-out code blocks - -**Impact:** -- Increased debugging time -- Higher defect rates -- Slower feature development -- Knowledge silos (only original author understands the code) - -**Detection Methods:** -- AST parsing for structural analysis -- Pattern matching for common anti-patterns -- Complexity metrics calculation -- Duplicate code detection algorithms - -### 2. Architecture Debt -High-level design decisions that seemed reasonable at the time but now limit scalability or maintainability. - -**Indicators:** -- Monolithic components that should be modular -- Circular dependencies between modules -- Violation of separation of concerns -- Inconsistent data flow patterns -- Over-engineering or under-engineering for current scale -- Tightly coupled components -- Missing abstraction layers - -**Impact:** -- Difficult to scale individual components -- Cascading changes required for simple modifications -- Testing becomes complex and brittle -- Onboarding new team members takes longer - -**Detection Methods:** -- Dependency analysis -- Module coupling metrics -- Component size analysis -- Interface consistency checks - -### 3. Test Debt -Inadequate or missing test coverage, poor test quality, and testing infrastructure issues. - -**Indicators:** -- Low test coverage (<80% for critical paths) -- Missing unit tests for complex logic -- No integration tests for key workflows -- Flaky tests that pass/fail intermittently -- Slow test execution (>10 minutes for unit tests) -- Tests that don't test meaningful behavior -- Missing test data management strategy - -**Impact:** -- Fear of refactoring ("don't touch it, it works") -- Regression bugs in production -- Slow feedback cycles during development -- Difficulty validating complex business logic - -**Detection Methods:** -- Coverage report analysis -- Test execution time monitoring -- Test failure pattern analysis -- Test code quality assessment - -### 4. Documentation Debt -Missing, outdated, or poor-quality documentation that makes the system harder to understand and maintain. - -**Indicators:** -- Missing API documentation -- Outdated README files -- No architectural decision records (ADRs) -- Missing code comments for complex algorithms -- No onboarding documentation for new team members -- Inconsistent documentation formats -- Documentation that contradicts actual implementation - -**Impact:** -- Increased onboarding time for new team members -- Knowledge loss when team members leave -- Miscommunication between teams -- Repeated questions in team channels - -**Detection Methods:** -- Documentation coverage analysis -- Freshness checking (last modified dates) -- Link validation -- Comment density analysis - -### 5. Dependency Debt -Issues related to external libraries, frameworks, and system dependencies. - -**Indicators:** -- Outdated packages with known security vulnerabilities -- Dependencies with incompatible licenses -- Unused dependencies bloating the build -- Version conflicts between packages -- Deprecated APIs still in use -- Heavy dependencies for simple tasks -- Missing dependency pinning - -**Impact:** -- Security vulnerabilities -- Build instability -- Longer build times -- Legal compliance issues -- Difficulty upgrading core frameworks - -**Detection Methods:** -- Vulnerability scanning -- License compliance checking -- Usage analysis -- Version compatibility checking - -### 6. Infrastructure Debt -Operations and deployment-related technical debt. - -**Indicators:** -- Manual deployment processes -- Missing monitoring and alerting -- Inadequate logging -- No disaster recovery plan -- Inconsistent environments (dev/staging/prod) -- Missing CI/CD pipelines -- Infrastructure as code gaps - -**Impact:** -- Deployment risks and downtime -- Difficult troubleshooting -- Inconsistent behavior across environments -- Manual work that should be automated - -**Detection Methods:** -- Infrastructure audit checklists -- Configuration drift detection -- Monitoring coverage analysis -- Deployment process documentation review - -## Severity Scoring Framework - -Each piece of tech debt is scored on multiple dimensions to determine overall severity: - -### Impact Assessment (1-10 scale) - -**Development Velocity Impact** -- 1-2: Negligible impact on development speed -- 3-4: Minor slowdown, workarounds available -- 5-6: Moderate impact, affects some features -- 7-8: Significant slowdown, affects most work -- 9-10: Critical blocker, prevents new development - -**Quality Impact** -- 1-2: No impact on defect rates -- 3-4: Minor increase in minor bugs -- 5-6: Moderate increase in defects -- 7-8: Regular production issues -- 9-10: Critical reliability problems - -**Team Productivity Impact** -- 1-2: No impact on team morale or efficiency -- 3-4: Occasional frustration -- 5-6: Regular complaints from developers -- 7-8: Team actively avoiding the area -- 9-10: Causing developer turnover - -**Business Impact** -- 1-2: No customer-facing impact -- 3-4: Minor UX degradation -- 5-6: Moderate performance impact -- 7-8: Customer complaints or churn -- 9-10: Revenue-impacting issues - -### Effort Assessment - -**Size (Story Points or Hours)** -- XS (1-4 hours): Simple refactor or documentation update -- S (1-2 days): Minor architectural change -- M (3-5 days): Moderate refactoring effort -- L (1-2 weeks): Major component restructuring -- XL (3+ weeks): System-wide architectural changes - -**Risk Level** -- Low: Well-understood change with clear scope -- Medium: Some unknowns but manageable -- High: Significant unknowns, potential for scope creep - -**Skill Requirements** -- Junior: Can be handled by any team member -- Mid: Requires experienced developer -- Senior: Needs architectural expertise -- Expert: Requires deep system knowledge - -## Interest Rate Calculation - -Technical debt accrues "interest" - the additional cost of leaving it unfixed. This interest rate helps prioritize which debt to pay down first. - -### Interest Rate Formula - -``` -Interest Rate = (Impact Score × Frequency of Encounter) / Time Period -``` - -Where: -- **Impact Score**: Average severity score (1-10) -- **Frequency of Encounter**: How often developers interact with this code -- **Time Period**: Usually measured per sprint or month - -### Cost of Delay Calculation - -``` -Cost of Delay = Interest Rate × Time Until Fix × Team Size Multiplier -``` - -### Example Calculation - -**Scenario**: Legacy authentication module with poor error handling - -- Impact Score: 7 (causes regular production issues) -- Frequency: 15 encounters per sprint (3 developers × 5 times each) -- Team Size: 8 developers -- Current sprint: 1, planned fix: sprint 4 - -``` -Interest Rate = 7 × 15 = 105 points per sprint -Cost of Delay = 105 × 3 × 1.2 = 378 total cost points -``` - -This debt item should be prioritized over lower-cost items. - -## Debt Inventory Management - -### Data Structure - -Each debt item is tracked with the following attributes: - -```json -{ - "id": "DEBT-2024-001", - "title": "Legacy user authentication module", - "category": "code", - "subcategory": "error_handling", - "location": "src/auth/legacy_auth.py:45-120", - "description": "Authentication error handling uses generic exceptions", - "impact": { - "velocity": 7, - "quality": 8, - "productivity": 6, - "business": 5 - }, - "effort": { - "size": "M", - "risk": "medium", - "skill_required": "mid" - }, - "interest_rate": 105, - "cost_of_delay": 378, - "priority": "high", - "created_date": "2024-01-15", - "last_updated": "2024-01-20", - "assigned_to": null, - "status": "identified", - "tags": ["security", "user-experience", "maintainability"] -} -``` - -### Status Lifecycle - -1. **Identified** - Debt detected but not yet analyzed -2. **Analyzed** - Impact and effort assessed -3. **Prioritized** - Added to backlog with priority -4. **Planned** - Assigned to specific sprint/release -5. **In Progress** - Actively being worked on -6. **Review** - Implementation complete, under review -7. **Done** - Debt resolved and verified -8. **Won't Fix** - Consciously decided not to address - -## Prioritization Frameworks - -### 1. Cost-of-Delay vs Effort Matrix - -Plot debt items on a 2D matrix: -- X-axis: Effort (XS to XL) -- Y-axis: Cost of Delay (calculated value) - -**Priority Quadrants:** -- High Cost, Low Effort: **Immediate** (quick wins) -- High Cost, High Effort: **Planned** (major initiatives) -- Low Cost, Low Effort: **Opportunistic** (during related work) -- Low Cost, High Effort: **Backlog** (consider for future) - -### 2. Weighted Shortest Job First (WSJF) - -``` -WSJF Score = (Business Value + Time Criticality + Risk Reduction) / Effort -``` - -Where each component is scored 1-10: -- **Business Value**: Direct impact on customer value -- **Time Criticality**: How much value decreases over time -- **Risk Reduction**: How much risk is mitigated by fixing this debt - -### 3. Technical Debt Quadrant - -Based on Martin Fowler's framework: - -**Quadrant 1: Reckless & Deliberate** -- "We don't have time for design" -- Highest priority for remediation - -**Quadrant 2: Prudent & Deliberate** -- "We must ship now and deal with consequences" -- Schedule for near-term resolution - -**Quadrant 3: Reckless & Inadvertent** -- "What's layering?" -- Focus on education and process improvement - -**Quadrant 4: Prudent & Inadvertent** -- "Now we know how we should have done it" -- Normal part of learning, lowest priority - -## Refactoring Strategies - -### 1. Strangler Fig Pattern -Gradually replace old system by building new functionality around it. - -**When to use:** -- Large, monolithic systems -- High-risk changes to critical paths -- Long-term architectural migrations - -**Implementation:** -1. Identify boundaries for extraction -2. Create abstraction layer -3. Route new features to new implementation -4. Gradually migrate existing features -5. Remove old implementation - -### 2. Branch by Abstraction -Create abstraction layer to allow parallel implementations. - -**When to use:** -- Need to support old and new systems simultaneously -- High-risk changes with rollback requirements -- A/B testing infrastructure changes - -**Implementation:** -1. Create abstraction interface -2. Implement abstraction for current system -3. Replace direct calls with abstraction calls -4. Implement new version behind same abstraction -5. Switch implementations via configuration -6. Remove old implementation - -### 3. Feature Toggles -Use configuration flags to control code execution. - -**When to use:** -- Gradual rollout of refactored components -- Risk mitigation during large changes -- Experimental refactoring approaches - -**Implementation:** -1. Identify decision points in code -2. Add toggle checks at decision points -3. Implement both old and new paths -4. Test both paths thoroughly -5. Gradually move toggle to new implementation -6. Remove old path and toggle - -### 4. Parallel Run -Run old and new implementations simultaneously to verify correctness. - -**When to use:** -- Critical business logic changes -- Data processing pipeline changes -- Algorithm improvements - -**Implementation:** -1. Implement new version alongside old -2. Run both versions with same inputs -3. Compare outputs and log discrepancies -4. Investigate and fix discrepancies -5. Build confidence through parallel execution -6. Switch to new implementation -7. Remove old implementation - -## Sprint Allocation Recommendations - -### Debt-to-Feature Ratio - -Maintain healthy balance between new features and debt reduction: - -**Team Velocity < 70% of capacity:** -- 60% tech debt, 40% features -- Focus on removing major blockers - -**Team Velocity 70-85% of capacity:** -- 30% tech debt, 70% features -- Balanced maintenance approach - -**Team Velocity > 85% of capacity:** -- 15% tech debt, 85% features -- Opportunistic debt reduction only - -### Sprint Planning Integration - -**Story Point Allocation:** -- Reserve 20% of sprint capacity for tech debt -- Prioritize debt items with highest interest rates -- Include "debt tax" in feature estimates when working in high-debt areas - -**Debt Budget Tracking:** -- Track debt points completed per sprint -- Monitor debt interest rate trend -- Alert when debt accumulation exceeds team's paydown rate - -### Quarterly Planning - -**Debt Initiatives:** -- Identify 1-2 major debt themes per quarter -- Allocate dedicated sprints for large-scale refactoring -- Plan debt work around major feature releases - -**Success Metrics:** -- Debt interest rate reduction -- Developer velocity improvements -- Defect rate reduction -- Code review cycle time improvement - -## Stakeholder Reporting - -### Executive Dashboard - -**Key Metrics:** -- Overall tech debt health score (0-100) -- Debt trend direction (improving/declining) -- Cost of delayed fixes (in development days) -- High-risk debt items count - -**Monthly Report Structure:** -1. **Executive Summary** (3 bullet points) -2. **Health Score Trend** (6-month view) -3. **Top 3 Risk Items** (business impact focus) -4. **Investment Recommendation** (resource allocation) -5. **Success Stories** (debt reduced last month) - -### Engineering Team Dashboard - -**Daily Metrics:** -- New debt items identified -- Debt items resolved -- Interest rate by team/component -- Debt hotspots (most problematic areas) - -**Sprint Reviews:** -- Debt points completed vs. planned -- Velocity impact from debt work -- Newly discovered debt during feature work -- Team sentiment on code quality - -### Product Manager Reports - -**Feature Impact Analysis:** -- How debt affects feature development time -- Quality risk assessment for upcoming features -- Debt that blocks planned features -- Recommendations for feature sequence planning - -**Customer Impact Translation:** -- Debt that affects performance -- Debt that increases bug rates -- Debt that limits feature flexibility -- Investment required to maintain current quality +→ See references/debt-frameworks.md for details ## Implementation Roadmap @@ -577,4 +97,4 @@ Maintain healthy balance between new features and debt reduction: **Problem**: Building complex debt management systems that nobody uses. **Solution**: Start simple, iterate based on actual usage patterns. -Technical debt management is not just about writing better code - it's about creating sustainable development practices that balance short-term delivery pressure with long-term system health. Use these tools and frameworks to make informed decisions about when and how to invest in debt reduction. \ No newline at end of file +Technical debt management is not just about writing better code - it's about creating sustainable development practices that balance short-term delivery pressure with long-term system health. Use these tools and frameworks to make informed decisions about when and how to invest in debt reduction. diff --git a/docs/skills/finance/financial-analyst.md b/docs/skills/finance/financial-analyst.md index 9e31d4c..e3b27be 100644 --- a/docs/skills/finance/financial-analyst.md +++ b/docs/skills/finance/financial-analyst.md @@ -14,7 +14,7 @@ description: "Financial Analyst Skill - Claude Code skill from the Finance domai ## Overview -Production-ready financial analysis toolkit providing ratio analysis, DCF valuation, budget variance analysis, and rolling forecast construction. Designed for financial analysts with 3-6 years experience performing financial modeling, forecasting & budgeting, management reporting, business performance analysis, and investment analysis. +Production-ready financial analysis toolkit providing ratio analysis, DCF valuation, budget variance analysis, and rolling forecast construction. Designed for financial modeling, forecasting & budgeting, management reporting, business performance analysis, and investment analysis. ## 5-Phase Workflow @@ -26,8 +26,9 @@ Production-ready financial analysis toolkit providing ratio analysis, DCF valuat ### Phase 2: Data Analysis & Modeling - Collect and validate financial data (income statement, balance sheet, cash flow) +- **Validate input data completeness** before running ratio calculations (check for missing fields, nulls, or implausible values) - Calculate financial ratios across 5 categories (profitability, liquidity, leverage, efficiency, valuation) -- Build DCF models with WACC and terminal value calculations +- Build DCF models with WACC and terminal value calculations; **cross-check DCF outputs against sanity bounds** (e.g., implied multiples vs. comparables) - Construct budget variance analyses with favorable/unfavorable classification - Develop driver-based forecasts with scenario modeling @@ -125,6 +126,7 @@ python scripts/forecast_builder.py forecast_data.json --scenarios base,bull,bear | `references/financial-ratios-guide.md` | Ratio formulas, interpretation, industry benchmarks | | `references/valuation-methodology.md` | DCF methodology, WACC, terminal value, comps | | `references/forecasting-best-practices.md` | Driver-based forecasting, rolling forecasts, accuracy | +| `references/industry-adaptations.md` | Sector-specific metrics and considerations (SaaS, Retail, Manufacturing, Financial Services, Healthcare) | ## Templates @@ -134,38 +136,6 @@ python scripts/forecast_builder.py forecast_data.json --scenarios base,bull,bear | `assets/dcf_analysis_template.md` | DCF valuation analysis template | | `assets/forecast_report_template.md` | Revenue forecast report template | -## Industry Adaptations - -### SaaS -- Key metrics: MRR, ARR, CAC, LTV, Churn Rate, Net Revenue Retention -- Revenue recognition: subscription-based, deferred revenue tracking -- Unit economics: CAC payback period, LTV/CAC ratio -- Cohort analysis for retention and expansion revenue - -### Retail -- Key metrics: Same-store sales, Revenue per square foot, Inventory turnover -- Seasonal adjustment factors in forecasting -- Gross margin analysis by product category -- Working capital cycle optimization - -### Manufacturing -- Key metrics: Gross margin by product line, Capacity utilization, COGS breakdown -- Bill of materials cost analysis -- Absorption vs variable costing impact -- Capital expenditure planning and ROI - -### Financial Services -- Key metrics: Net Interest Margin, Efficiency Ratio, ROA, Tier 1 Capital -- Regulatory capital requirements -- Credit loss provisioning and reserves -- Fee income analysis and diversification - -### Healthcare -- Key metrics: Revenue per patient, Payer mix, Days in A/R, Operating margin -- Reimbursement rate analysis by payer -- Case mix index impact on revenue -- Compliance cost allocation - ## Key Metrics & Targets | Metric | Target | diff --git a/docs/skills/marketing-skill/analytics-tracking.md b/docs/skills/marketing-skill/analytics-tracking.md index 79fe98f..e6f2673 100644 --- a/docs/skills/marketing-skill/analytics-tracking.md +++ b/docs/skills/marketing-skill/analytics-tracking.md @@ -144,7 +144,7 @@ For any event not auto-collected, create it in GTM (preferred) or via gtag direc gtag('event', 'signup_completed', { method: 'email', user_id: 'usr_abc123', - plan_name: 'trial' + plan_name: "trial" }); ``` @@ -207,7 +207,7 @@ window.dataLayer.push({ event: 'signup_completed', signup_method: 'email', user_id: userId, - plan_name: 'trial' + plan_name: "trial" }); ``` @@ -217,7 +217,7 @@ GTM Tag: GA4 Event Parameters: signup_method: {{DLV - signup_method}} user_id: {{DLV - user_id}} - plan_name: {{DLV - plan_name}} + plan_name: "dlv-plan-name" Trigger: Custom Event - "signup_completed" ``` diff --git a/docs/skills/marketing-skill/app-store-optimization.md b/docs/skills/marketing-skill/app-store-optimization.md index 81865cb..510b31c 100644 --- a/docs/skills/marketing-skill/app-store-optimization.md +++ b/docs/skills/marketing-skill/app-store-optimization.md @@ -12,20 +12,6 @@ description: "App Store Optimization (ASO) - Claude Code skill from the Marketin # App Store Optimization (ASO) -ASO tools for researching keywords, optimizing metadata, analyzing competitors, and improving app store visibility on Apple App Store and Google Play Store. - ---- - -## Table of Contents - -- [Keyword Research Workflow](#keyword-research-workflow) -- [Metadata Optimization Workflow](#metadata-optimization-workflow) -- [Competitor Analysis Workflow](#competitor-analysis-workflow) -- [App Launch Workflow](#app-launch-workflow) -- [A/B Testing Workflow](#ab-testing-workflow) -- [Before/After Examples](#beforeafter-examples) -- [Tools and References](#tools-and-references) - --- ## Keyword Research Workflow @@ -69,13 +55,13 @@ Discover and evaluate keywords that drive app store visibility. ### Keyword Placement Priority -| Location | Search Weight | Character Limit | -|----------|---------------|-----------------| -| App Title | Highest | 30 (iOS) / 50 (Android) | -| Subtitle (iOS) | High | 30 | -| Keyword Field (iOS) | High | 100 | -| Short Description (Android) | High | 80 | -| Full Description | Medium | 4,000 | +| Location | Search Weight | +|----------|---------------| +| App Title | Highest | +| Subtitle (iOS) | High | +| Keyword Field (iOS) | High | +| Short Description (Android) | High | +| Full Description | Medium | See: [references/keyword-research-guide.md](references/keyword-research-guide.md) @@ -448,35 +434,18 @@ Trusted by 500,000+ professionals. --- -## Platform Limitations +## Platform Notes -### Data Constraints +| Platform / Constraint | Behavior / Impact | +|-----------------------|-------------------| +| iOS keyword changes | Require app submission | +| iOS promotional text | Editable without an app update | +| Android metadata changes | Index in 1-2 hours | +| Android keyword field | None — use description instead | +| Keyword volume data | Estimates only; no official source | +| Competitor data | Public listings only | -| Constraint | Impact | -|------------|--------| -| No official keyword volume data | Estimates based on third-party tools | -| Competitor data limited to public info | Cannot see internal metrics | -| Review access limited to public reviews | No access to private feedback | -| Historical data unavailable for new apps | Cannot compare to past performance | - -### Platform Behavior - -| Platform | Behavior | -|----------|----------| -| iOS | Keyword changes require app submission | -| iOS | Promotional text editable without update | -| Android | Metadata changes index in 1-2 hours | -| Android | No separate keyword field (use description) | -| Both | Algorithm changes without notice | - -### When Not to Use This Skill - -| Scenario | Alternative | -|----------|-------------| -| Web apps | Use web SEO skills | -| Enterprise apps (not public) | Internal distribution tools | -| Beta/TestFlight only | Focus on feedback, not ASO | -| Paid advertising strategy | Use paid acquisition skills | +**When not to use this skill:** web apps (use web SEO), enterprise/internal apps, TestFlight-only betas, or paid advertising strategy. --- diff --git a/docs/skills/marketing-skill/brand-guidelines.md b/docs/skills/marketing-skill/brand-guidelines.md index e982aa3..ce2a719 100644 --- a/docs/skills/marketing-skill/brand-guidelines.md +++ b/docs/skills/marketing-skill/brand-guidelines.md @@ -28,265 +28,7 @@ When helping users: --- ## Anthropic Brand Identity - -### Overview - -Anthropic's brand identity is clean, precise, and intellectually grounded. It communicates trustworthiness and technical sophistication without feeling cold or corporate. - -### Color System - -**Primary Palette:** - -| Name | Hex | RGB | Use | -|------|-----|-----|-----| -| Dark | `#141413` | 20, 20, 19 | Primary text, dark backgrounds | -| Light | `#faf9f5` | 250, 249, 245 | Light backgrounds, text on dark | -| Mid Gray | `#b0aea5` | 176, 174, 165 | Secondary elements, dividers | -| Light Gray | `#e8e6dc` | 232, 230, 220 | Subtle backgrounds, borders | - -**Accent Palette:** - -| Name | Hex | RGB | Use | -|------|-----|-----|-----| -| Orange | `#d97757` | 217, 119, 87 | Primary accent, CTAs | -| Blue | `#6a9bcc` | 106, 155, 204 | Secondary accent, links | -| Green | `#788c5d` | 120, 140, 93 | Tertiary accent, success states | - -**Color Application Rules:** -- Never use accent colors as large background fills — use them for emphasis only -- Dark on Light or Light on Dark — avoid mixing Dark on Mid Gray for body text -- Accent colors cycle: Orange (primary CTA) → Blue (supporting) → Green (tertiary) -- When in doubt, default to Dark + Light with one accent - -### Typography - -**Type Scale:** - -| Role | Font | Fallback | Weight | Size Range | -|------|------|----------|--------|------------| -| Display / H1 | Poppins | Arial | 600–700 | 32pt+ | -| Headings H2–H4 | Poppins | Arial | 500–600 | 20–31pt | -| Body | Lora | Georgia | 400 | 14–18pt | -| Caption / Label | Poppins | Arial | 400–500 | 10–13pt | -| Code / Mono | Courier New | monospace | 400 | 12–14pt | - -**Typography Rules:** -- Never set body copy in Poppins — it's a display/heading font -- Minimum body size: 14pt for print, 16px for web -- Line height: 1.5–1.6 for body, 1.1–1.2 for headings -- Letter spacing: -0.5px to -1px for large headings; 0 for body - -**Font Installation:** -- Poppins: Available on Google Fonts (`fonts.google.com/specimen/Poppins`) -- Lora: Available on Google Fonts (`fonts.google.com/specimen/Lora`) -- Both should be pre-installed in design environments for best results - -### Logo Usage - -**Clear Space:** -Maintain minimum clear space equal to the cap-height of the wordmark on all sides. No other elements should intrude on this zone. - -**Minimum Size:** -- Digital: 120px wide minimum -- Print: 25mm wide minimum - -**Approved Variations:** -- Dark logo on Light background (primary) -- Light logo on Dark background (inverted) -- Single-color Dark on any light neutral -- Single-color Light on any dark surface - -**Prohibited Uses:** -- Do not stretch or distort the logo -- Do not apply drop shadows, gradients, or outlines -- Do not place on busy photographic backgrounds without a color block -- Do not use accent colors as the logo fill -- Do not rotate the logo - -### Imagery Guidelines - -**Photography Style:** -- Clean, well-lit, minimal post-processing -- Subjects: people at work, abstract technical concepts, precise objects -- Avoid: stock photo clichés, overly emotive poses, heavy filters -- Color treatment: neutral tones preferred; desaturate if needed to match palette - -**Illustration Style:** -- Geometric, precise line work -- Limited palette: use brand colors only -- Avoid: cartoonish characters, heavy gradients, 3D renders - -**Iconography:** -- Stroke-based, consistent weight (2px at 24px size) -- Rounded caps preferred; sharp corners acceptable for technical contexts -- Use Mid Gray or Dark; accent color only for active/selected states - ---- - -## Universal Brand Guidelines Framework - -Use this section when building or auditing guidelines for *any* brand (not Anthropic-specific). - -### 1. Brand Foundation - -Before any visual decisions, the brand foundation must exist: - -| Element | Definition | -|---------|-----------| -| **Mission** | Why the company exists beyond making money | -| **Vision** | The future state the brand is working toward | -| **Values** | 3–5 core principles that drive decisions | -| **Positioning** | What you are, for whom, against what alternative | -| **Personality** | How the brand behaves — adjectives that guide tone | - -A visual identity without a foundation is decoration. The foundation drives every downstream decision. - ---- - -### 2. Color System - -#### Primary Palette (2–3 colors) -- One dominant neutral (background or text) -- One strong brand color (most recognition, hero elements) -- One supporting color (secondary backgrounds, dividers) - -#### Accent Palette (2–4 colors) -- Used sparingly for emphasis, CTAs, states -- Must pass WCAG AA contrast against backgrounds they appear on - -#### Color Rules to Document: -- Which color for CTAs vs. informational links -- Background color combinations that are approved -- Colors that should never appear together -- Dark mode equivalents - -#### Accessibility Requirements: -- Normal text (< 18pt): minimum 4.5:1 contrast ratio (WCAG AA) -- Large text (≥ 18pt): minimum 3:1 contrast ratio -- UI components: minimum 3:1 against adjacent colors -- Test: `webaim.org/resources/contrastchecker` - ---- - -### 3. Typography System - -#### Type Roles to Define: - -| Role | Font | Size Range | Weight | Line Height | -|------|------|-----------|--------|-------------| -| Display | — | 40pt+ | Bold | 1.1 | -| H1 | — | 28–40pt | SemiBold | 1.15 | -| H2 | — | 22–28pt | SemiBold | 1.2 | -| H3 | — | 18–22pt | Medium | 1.25 | -| Body | — | 15–18pt | Regular | 1.5–1.6 | -| Small / Caption | — | 12–14pt | Regular | 1.4 | -| Label / UI | — | 11–13pt | Medium | 1.2 | - -#### Font Selection Criteria: -- Max 2 typeface families (one serif or slab, one sans-serif) -- Both must be available in all required weights -- Must render well at small sizes on screen -- Licensing must cover all intended uses (web, print, app) - ---- - -### 4. Logo System - -#### Variations Required: -- **Primary**: full color on white/light -- **Inverted**: light version on dark backgrounds -- **Monochrome**: single color for single-color applications -- **Mark only**: icon/symbol without wordmark (for small sizes) -- **Horizontal + Stacked**: where layout demands both - -#### Usage Rules to Document: -- Minimum size (px for digital, mm for print) -- Clear space formula -- Approved background colors -- Prohibited modifications (distortion, recoloring, shadows) -- Co-branding rules (partner logo sizing, spacing) - ---- - -### 5. Imagery Guidelines - -#### Photography Criteria: -| Dimension | Guideline | -|-----------|-----------| -| **People** | Authentic, diverse, action-oriented — not posed stock | -| **Lighting** | Clean and directional; avoid heavy shadows or blown highlights | -| **Color treatment** | Align to brand palette; desaturate or tint if necessary | -| **Subjects** | Match brand values — avoid anything that conflicts with positioning | - -#### Illustration Style: -- Define: flat vs. 3D, line vs. filled, abstract vs. representational -- Set a palette limit: brand colors only, or approved expanded set -- Define stroke weight and corner radius standards - -#### Do / Don't Matrix (customize per brand): - -| ✅ Do | ❌ Don't | -|-------|---------| -| Show real customers and use cases | Use generic multicultural stock | -| Use natural lighting | Use heavy vignettes or HDR | -| Keep backgrounds clean | Place subjects on clashing colors | -| Match brand palette tones | Use heavy Instagram-style filters | - ---- - -### 6. Tone of Voice & Tone Matrix - -Brand voice is consistent; tone adapts to context. - -#### Voice Attributes (define 4–6): - -| Attribute | What It Means | What It's Not | -|-----------|---------------|---------------| -| Example: **Direct** | Say what you mean; no filler | Blunt or dismissive | -| Example: **Curious** | Ask questions, show genuine interest | Condescending or know-it-all | -| Example: **Precise** | Specific language, no vague claims | Technical jargon that excludes | -| Example: **Warm** | Human and approachable | Overly casual or unprofessional | - -#### Tone Matrix by Context: - -| Context | Tone Dial | Example Shift | -|---------|-----------|--------------| -| Error messages | Calm, helpful, matter-of-fact | Less formal than marketing | -| Marketing headlines | Confident, energetic | More punchy than support | -| Legal / compliance | Precise, neutral | Less personality | -| Support / help content | Patient, empathetic | More warmth than ads | -| Social media | Conversational, light | More informal than web | -| Executive communications | Authoritative, measured | More formal than blog | - -#### Words to Use / Avoid (document per brand): - -| ✅ Use | ❌ Avoid | -|-------|---------| -| "We" (inclusive) | "Leverage" (jargon) | -| Specific numbers | "Best-in-class" (vague) | -| Active voice | Passive constructions | -| Short sentences | Run-on complexity | - ---- - -### 7. Application Examples - -#### Digital -- **Web**: Primary palette for backgrounds; accent for CTAs; Poppins/brand heading font for H1–H3 -- **Email**: Inline styles only; web-safe font fallbacks always specified; logo as linked image -- **Social**: Platform-specific safe zones; brand colors dominant; minimal text on images - -#### Print -- Always use CMYK values for print production (never RGB or hex) -- Bleed: 3mm on all sides; keep critical content 5mm from trim -- Proof against Pantone reference before bulk print runs - -#### Presentations -- Cover slide: brand dark + brand light with single accent -- Body slides: white backgrounds with brand accent headers -- No custom fonts in share files — embed or substitute - ---- +→ See references/brand-identity-and-framework.md for details ## Quick Audit Checklist diff --git a/docs/skills/marketing-skill/campaign-analytics.md b/docs/skills/marketing-skill/campaign-analytics.md index 22e2f1e..6535610 100644 --- a/docs/skills/marketing-skill/campaign-analytics.md +++ b/docs/skills/marketing-skill/campaign-analytics.md @@ -16,30 +16,6 @@ Production-grade campaign performance analysis with multi-touch attribution mode --- -## Table of Contents - -- [Capabilities](#capabilities) -- [Input Requirements](#input-requirements) -- [Output Formats](#output-formats) -- [How to Use](#how-to-use) -- [Scripts](#scripts) -- [Reference Guides](#reference-guides) -- [Best Practices](#best-practices) -- [Limitations](#limitations) - ---- - -## Capabilities - -- **Multi-Touch Attribution**: Five attribution models (first-touch, last-touch, linear, time-decay, position-based) with configurable parameters -- **Funnel Conversion Analysis**: Stage-by-stage conversion rates, drop-off identification, bottleneck detection, and segment comparison -- **Campaign ROI Calculation**: ROI, ROAS, CPA, CPL, CAC metrics with industry benchmarking and underperformance flagging -- **A/B Test Support**: Templates for structured A/B test documentation and analysis -- **Channel Comparison**: Cross-channel performance comparison with normalized metrics -- **Executive Reporting**: Ready-to-use templates for campaign performance reports - ---- - ## Input Requirements All scripts accept a JSON file as positional input argument. See `assets/sample_campaign_data.json` for complete examples. @@ -93,6 +69,16 @@ All scripts accept a JSON file as positional input argument. See `assets/sample_ } ``` +### Input Validation + +Before running scripts, verify your JSON is valid and matches the expected schema. Common errors: + +- **Missing required keys** (e.g., `journeys`, `funnel.stages`, `campaigns`) → script exits with a descriptive `KeyError` +- **Mismatched array lengths** in funnel data (`stages` and `counts` must be the same length) → raises `ValueError` +- **Non-numeric monetary values** in ROI data → raises `TypeError` + +Use `python -m json.tool your_file.json` to validate JSON syntax before passing it to any script. + --- ## Output Formats @@ -104,6 +90,25 @@ All scripts support two output formats via the `--format` flag: --- +## Typical Analysis Workflow + +For a complete campaign review, run the three scripts in sequence: + +```bash +# Step 1 — Attribution: understand which channels drive conversions +python scripts/attribution_analyzer.py campaign_data.json --model time-decay + +# Step 2 — Funnel: identify where prospects drop off on the path to conversion +python scripts/funnel_analyzer.py funnel_data.json + +# Step 3 — ROI: calculate profitability and benchmark against industry standards +python scripts/campaign_roi_calculator.py campaign_data.json +``` + +Use attribution results to identify top-performing channels, then focus funnel analysis on those channels' segments, and finally validate ROI metrics to prioritize budget reallocation. + +--- + ## How to Use ### Attribution Analysis @@ -194,10 +199,10 @@ Calculates comprehensive ROI metrics with industry benchmarking: ## Best Practices -1. **Use multiple attribution models** -- No single model tells the full story. Compare at least 3 models to triangulate channel value. +1. **Use multiple attribution models** -- Compare at least 3 models to triangulate channel value; no single model tells the full story. 2. **Set appropriate lookback windows** -- Match your time-decay half-life to your average sales cycle length. -3. **Segment your funnels** -- Always compare segments (channel, cohort, geography) to identify what drives best performance. -4. **Benchmark against your own history first** -- Industry benchmarks provide context, but your own historical data is the most relevant comparison. +3. **Segment your funnels** -- Compare segments (channel, cohort, geography) to identify performance drivers. +4. **Benchmark against your own history first** -- Industry benchmarks provide context, but historical data is the most relevant comparison. 5. **Run ROI analysis at regular intervals** -- Weekly for active campaigns, monthly for strategic review. 6. **Include all costs** -- Factor in creative, tooling, and labor costs alongside media spend for accurate ROI. 7. **Document A/B tests rigorously** -- Use the provided template to ensure statistical validity and clear decision criteria. @@ -206,34 +211,12 @@ Calculates comprehensive ROI metrics with industry benchmarking: ## Limitations -- **No statistical significance testing** -- A/B test analysis requires external tools for p-value calculations. Scripts provide descriptive metrics only. -- **Standard library only** -- No advanced statistical or data processing libraries. Suitable for most campaign sizes but not optimized for datasets exceeding 100K journeys. -- **Offline analysis** -- Scripts analyze static JSON snapshots. No real-time data connections or API integrations. -- **Single-currency** -- All monetary values assumed to be in the same currency. No currency conversion support. -- **Simplified time-decay** -- Uses exponential decay based on configurable half-life. Does not account for weekday/weekend or seasonal patterns. -- **No cross-device tracking** -- Attribution operates on provided journey data as-is. Cross-device identity resolution must be handled upstream. - -## Proactive Triggers - -- **Attribution model not set** → Last-click attribution misses 60%+ of the journey. Use multi-touch. -- **No baseline metrics documented** → Can't measure improvement without baselines. -- **Data discrepancy between tools** → GA4 and ad platform numbers rarely match. Document the gap. -- **Vanity metrics dominating reports** → Pageviews don't matter. Focus on conversion metrics. - -## Output Artifacts - -| When you ask for... | You get... | -|---------------------|------------| -| "Campaign report" | Cross-channel performance report with attribution analysis | -| "Channel comparison" | Channel-by-channel ROI with budget reallocation recommendations | -| "What's working?" | Top 5 performers + bottom 5 drains with specific actions | - -## Communication - -All output passes quality verification: -- Self-verify: source attribution, assumption audit, confidence scoring -- Output format: Bottom Line → What (with confidence) → Why → How to Act -- Results only. Every finding tagged: 🟢 verified, 🟡 medium, 🔴 assumed. +- **No statistical significance testing** -- Scripts provide descriptive metrics only; p-value calculations require external tools. +- **Standard library only** -- No advanced statistical libraries. Suitable for most campaign sizes but not optimized for datasets exceeding 100K journeys. +- **Offline analysis** -- Scripts analyze static JSON snapshots; no real-time data connections or API integrations. +- **Single-currency** -- All monetary values assumed to be in the same currency; no currency conversion support. +- **Simplified time-decay** -- Exponential decay based on configurable half-life; does not account for weekday/weekend or seasonal patterns. +- **No cross-device tracking** -- Attribution operates on provided journey data as-is; cross-device identity resolution must be handled upstream. ## Related Skills diff --git a/docs/skills/marketing-skill/content-strategy.md b/docs/skills/marketing-skill/content-strategy.md index a52a658..889a198 100644 --- a/docs/skills/marketing-skill/content-strategy.md +++ b/docs/skills/marketing-skill/content-strategy.md @@ -45,281 +45,7 @@ Gather this context (ask if not provided): --- ## Searchable vs Shareable - -Every piece of content must be searchable, shareable, or both. Prioritize in that order—search traffic is the foundation. - -**Searchable content** captures existing demand. Optimized for people actively looking for answers. - -**Shareable content** creates demand. Spreads ideas and gets people talking. - -### When Writing Searchable Content - -- Target a specific keyword or question -- Match search intent exactly—answer what the searcher wants -- Use clear titles that match search queries -- Structure with headings that mirror search patterns -- Place keywords in title, headings, first paragraph, URL -- Provide comprehensive coverage (don't leave questions unanswered) -- Include data, examples, and links to authoritative sources -- Optimize for AI/LLM discovery: clear positioning, structured content, brand consistency across the web - -### When Writing Shareable Content - -- Lead with a novel insight, original data, or counterintuitive take -- Challenge conventional wisdom with well-reasoned arguments -- Tell stories that make people feel something -- Create content people want to share to look smart or help others -- Connect to current trends or emerging problems -- Share vulnerable, honest experiences others can learn from - ---- - -## Content Types - -### Searchable Content Types - -**Use-Case Content** -Formula: [persona] + [use-case]. Targets long-tail keywords. -- "Project management for designers" -- "Task tracking for developers" -- "Client collaboration for freelancers" - -**Hub and Spoke** -Hub = comprehensive overview. Spokes = related subtopics. -``` -/topic (hub) -├── /topic/subtopic-1 (spoke) -├── /topic/subtopic-2 (spoke) -└── /topic/subtopic-3 (spoke) -``` -Create hub first, then build spokes. Interlink strategically. - -**Note:** Most content works fine under `/blog`. Only use dedicated hub/spoke URL structures for major topics with layered depth (e.g., Atlassian's `/agile` guide). For typical blog posts, `/blog/post-title` is sufficient. - -**Template Libraries** -High-intent keywords + product adoption. -- Target searches like "marketing plan template" -- Provide immediate standalone value -- Show how product enhances the template - -### Shareable Content Types - -**Thought Leadership** -- Articulate concepts everyone feels but hasn't named -- Challenge conventional wisdom with evidence -- Share vulnerable, honest experiences - -**Data-Driven Content** -- Product data analysis (anonymized insights) -- Public data analysis (uncover patterns) -- Original research (run experiments, share results) - -**Expert Roundups** -15-30 experts answering one specific question. Built-in distribution. - -**Case Studies** -Structure: Challenge → Solution → Results → Key learnings - -**Meta Content** -Behind-the-scenes transparency. "How We Got Our First $5k MRR," "Why We Chose Debt Over VC." - -For programmatic content at scale, see **programmatic-seo** skill. - ---- - -## Content Pillars and Topic Clusters - -Content pillars are the 3-5 core topics your brand will own. Each pillar spawns a cluster of related content. - -Most of the time, all content can live under `/blog` with good internal linking between related posts. Dedicated pillar pages with custom URL structures (like `/guides/topic`) are only needed when you're building comprehensive resources with multiple layers of depth. - -### How to Identify Pillars - -1. **Product-led**: What problems does your product solve? -2. **Audience-led**: What does your ICP need to learn? -3. **Search-led**: What topics have volume in your space? -4. **Competitor-led**: What are competitors ranking for? - -### Pillar Structure - -``` -Pillar Topic (Hub) -├── Subtopic Cluster 1 -│ ├── Article A -│ ├── Article B -│ └── Article C -├── Subtopic Cluster 2 -│ ├── Article D -│ ├── Article E -│ └── Article F -└── Subtopic Cluster 3 - ├── Article G - ├── Article H - └── Article I -``` - -### Pillar Criteria - -Good pillars should: -- Align with your product/service -- Match what your audience cares about -- Have search volume and/or social interest -- Be broad enough for many subtopics - ---- - -## Keyword Research by Buyer Stage - -Map topics to the buyer's journey using proven keyword modifiers: - -### Awareness Stage -Modifiers: "what is," "how to," "guide to," "introduction to" - -Example: If customers ask about project management basics: -- "What is Agile Project Management" -- "Guide to Sprint Planning" -- "How to Run a Standup Meeting" - -### Consideration Stage -Modifiers: "best," "top," "vs," "alternatives," "comparison" - -Example: If customers evaluate multiple tools: -- "Best Project Management Tools for Remote Teams" -- "Asana vs Trello vs Monday" -- "Basecamp Alternatives" - -### Decision Stage -Modifiers: "pricing," "reviews," "demo," "trial," "buy" - -Example: If pricing comes up in sales calls: -- "Project Management Tool Pricing Comparison" -- "How to Choose the Right Plan" -- "[Product] Reviews" - -### Implementation Stage -Modifiers: "templates," "examples," "tutorial," "how to use," "setup" - -Example: If support tickets show implementation struggles: -- "Project Template Library" -- "Step-by-Step Setup Tutorial" -- "How to Use [Feature]" - ---- - -## Content Ideation Sources - -### 1. Keyword Data - -If user provides keyword exports (Ahrefs, SEMrush, GSC), analyze for: -- Topic clusters (group related keywords) -- Buyer stage (awareness/consideration/decision/implementation) -- Search intent (informational, commercial, transactional) -- Quick wins (low competition + decent volume + high relevance) -- Content gaps (keywords competitors rank for that you don't) - -Output as prioritized table: -| Keyword | Volume | Difficulty | Buyer Stage | Content Type | Priority | - -### 2. Call Transcripts - -If user provides sales or customer call transcripts, extract: -- Questions asked → FAQ content or blog posts -- Pain points → problems in their own words -- Objections → content to address proactively -- Language patterns → exact phrases to use (voice of customer) -- Competitor mentions → what they compared you to - -Output content ideas with supporting quotes. - -### 3. Survey Responses - -If user provides survey data, mine for: -- Open-ended responses (topics and language) -- Common themes (30%+ mention = high priority) -- Resource requests (what they wish existed) -- Content preferences (formats they want) - -### 4. Forum Research - -Use web search to find content ideas: - -**Reddit:** `site:reddit.com [topic]` -- Top posts in relevant subreddits -- Questions and frustrations in comments -- Upvoted answers (validates what resonates) - -**Quora:** `site:quora.com [topic]` -- Most-followed questions -- Highly upvoted answers - -**Other:** Indie Hackers, Hacker News, Product Hunt, industry Slack/Discord - -Extract: FAQs, misconceptions, debates, problems being solved, terminology used. - -### 5. Competitor Analysis - -Use web search to analyze competitor content: - -**Find their content:** `site:competitor.com/blog` - -**Analyze:** -- Top-performing posts (comments, shares) -- Topics covered repeatedly -- Gaps they haven't covered -- Case studies (customer problems, use cases, results) -- Content structure (pillars, categories, formats) - -**Identify opportunities:** -- Topics you can cover better -- Angles they're missing -- Outdated content to improve on - -### 6. Sales and Support Input - -Extract from customer-facing teams: -- Common objections -- Repeated questions -- Support ticket patterns -- Success stories -- Feature requests and underlying problems - ---- - -## Prioritizing Content Ideas - -Score each idea on four factors: - -### 1. Customer Impact (40%) -- How frequently did this topic come up in research? -- What percentage of customers face this challenge? -- How emotionally charged was this pain point? -- What's the potential LTV of customers with this need? - -### 2. Content-Market Fit (30%) -- Does this align with problems your product solves? -- Can you offer unique insights from customer research? -- Do you have customer stories to support this? -- Will this naturally lead to product interest? - -### 3. Search Potential (20%) -- What's the monthly search volume? -- How competitive is this topic? -- Are there related long-tail opportunities? -- Is search interest growing or declining? - -### 4. Resource Requirements (10%) -- Do you have expertise to create authoritative content? -- What additional research is needed? -- What assets (graphics, data, examples) will you need? - -### Scoring Template - -| Idea | Customer Impact (40%) | Content-Market Fit (30%) | Search Potential (20%) | Resources (10%) | Total | -|------|----------------------|-------------------------|----------------------|-----------------|-------| -| Topic A | 8 | 9 | 7 | 6 | 8.0 | -| Topic B | 6 | 7 | 9 | 8 | 7.1 | - ---- +→ See references/content-strategy-reference.md for details ## Output Format diff --git a/docs/skills/marketing-skill/email-sequence.md b/docs/skills/marketing-skill/email-sequence.md index 338515b..a221ca4 100644 --- a/docs/skills/marketing-skill/email-sequence.md +++ b/docs/skills/marketing-skill/email-sequence.md @@ -45,212 +45,7 @@ Before creating a sequence, understand: --- ## Core Principles - -### 1. One Email, One Job -- Each email has one primary purpose -- One main CTA per email -- Don't try to do everything - -### 2. Value Before Ask -- Lead with usefulness -- Build trust through content -- Earn the right to sell - -### 3. Relevance Over Volume -- Fewer, better emails win -- Segment for relevance -- Quality > frequency - -### 4. Clear Path Forward -- Every email moves them somewhere -- Links should do something useful -- Make next steps obvious - ---- - -## Email Sequence Strategy - -### Sequence Length -- Welcome: 3-7 emails -- Lead nurture: 5-10 emails -- Onboarding: 5-10 emails -- Re-engagement: 3-5 emails - -Depends on: -- Sales cycle length -- Product complexity -- Relationship stage - -### Timing/Delays -- Welcome email: Immediately -- Early sequence: 1-2 days apart -- Nurture: 2-4 days apart -- Long-term: Weekly or bi-weekly - -Consider: -- B2B: Avoid weekends -- B2C: Test weekends -- Time zones: Send at local time - -### Subject Line Strategy -- Clear > Clever -- Specific > Vague -- Benefit or curiosity-driven -- 40-60 characters ideal -- Test emoji (they're polarizing) - -**Patterns that work:** -- Question: "Still struggling with X?" -- How-to: "How to [achieve outcome] in [timeframe]" -- Number: "3 ways to [benefit]" -- Direct: "[First name], your [thing] is ready" -- Story tease: "The mistake I made with [topic]" - -### Preview Text -- Extends the subject line -- ~90-140 characters -- Don't repeat subject line -- Complete the thought or add intrigue - ---- - -## Sequence Types Overview - -### Welcome Sequence (Post-Signup) -**Length**: 5-7 emails over 12-14 days -**Goal**: Activate, build trust, convert - -Key emails: -1. Welcome + deliver promised value (immediate) -2. Quick win (day 1-2) -3. Story/Why (day 3-4) -4. Social proof (day 5-6) -5. Overcome objection (day 7-8) -6. Core feature highlight (day 9-11) -7. Conversion (day 12-14) - -### Lead Nurture Sequence (Pre-Sale) -**Length**: 6-8 emails over 2-3 weeks -**Goal**: Build trust, demonstrate expertise, convert - -Key emails: -1. Deliver lead magnet + intro (immediate) -2. Expand on topic (day 2-3) -3. Problem deep-dive (day 4-5) -4. Solution framework (day 6-8) -5. Case study (day 9-11) -6. Differentiation (day 12-14) -7. Objection handler (day 15-18) -8. Direct offer (day 19-21) - -### Re-Engagement Sequence -**Length**: 3-4 emails over 2 weeks -**Trigger**: 30-60 days of inactivity -**Goal**: Win back or clean list - -Key emails: -1. Check-in (genuine concern) -2. Value reminder (what's new) -3. Incentive (special offer) -4. Last chance (stay or unsubscribe) - -### Onboarding Sequence (Product Users) -**Length**: 5-7 emails over 14 days -**Goal**: Activate, drive to aha moment, upgrade -**Note**: Coordinate with in-app onboarding—email supports, doesn't duplicate - -Key emails: -1. Welcome + first step (immediate) -2. Getting started help (day 1) -3. Feature highlight (day 2-3) -4. Success story (day 4-5) -5. Check-in (day 7) -6. Advanced tip (day 10-12) -7. Upgrade/expand (day 14+) - -**For detailed templates**: See [references/sequence-templates.md](references/sequence-templates.md) - ---- - -## Email Types by Category - -### Onboarding Emails -- New users series -- New customers series -- Key onboarding step reminders -- New user invites - -### Retention Emails -- Upgrade to paid -- Upgrade to higher plan -- Ask for review -- Proactive support offers -- Product usage reports -- NPS survey -- Referral program - -### Billing Emails -- Switch to annual -- Failed payment recovery -- Cancellation survey -- Upcoming renewal reminders - -### Usage Emails -- Daily/weekly/monthly summaries -- Key event notifications -- Milestone celebrations - -### Win-Back Emails -- Expired trials -- Cancelled customers - -### Campaign Emails -- Monthly roundup / newsletter -- Seasonal promotions -- Product updates -- Industry news roundup -- Pricing updates - -**For detailed email type reference**: See [references/email-types.md](references/email-types.md) - ---- - -## Email Copy Guidelines - -### Structure -1. **Hook**: First line grabs attention -2. **Context**: Why this matters to them -3. **Value**: The useful content -4. **CTA**: What to do next -5. **Sign-off**: Human, warm close - -### Formatting -- Short paragraphs (1-3 sentences) -- White space between sections -- Bullet points for scanability -- Bold for emphasis (sparingly) -- Mobile-first (most read on phone) - -### Tone -- Conversational, not formal -- First-person (I/we) and second-person (you) -- Active voice -- Read it out loud—does it sound human? - -### Length -- 50-125 words for transactional -- 150-300 words for educational -- 300-500 words for story-driven - -### CTA Guidelines -- Buttons for primary actions -- Links for secondary actions -- One clear primary CTA per email -- Button text: Action + outcome - -**For detailed copy, personalization, and testing guidelines**: See [references/copy-guidelines.md](references/copy-guidelines.md) - ---- +→ See references/email-sequence-playbook.md for details ## Output Format diff --git a/docs/skills/marketing-skill/form-cro.md b/docs/skills/marketing-skill/form-cro.md index de6b6ad..3149692 100644 --- a/docs/skills/marketing-skill/form-cro.md +++ b/docs/skills/marketing-skill/form-cro.md @@ -44,274 +44,7 @@ Before providing recommendations, identify: --- ## Core Principles - -### 1. Every Field Has a Cost -Each field reduces completion rate. Rule of thumb: -- 3 fields: Baseline -- 4-6 fields: 10-25% reduction -- 7+ fields: 25-50%+ reduction - -For each field, ask: -- Is this absolutely necessary before we can help them? -- Can we get this information another way? -- Can we ask this later? - -### 2. Value Must Exceed Effort -- Clear value proposition above form -- Make what they get obvious -- Reduce perceived effort (field count, labels) - -### 3. Reduce Cognitive Load -- One question per field -- Clear, conversational labels -- Logical grouping and order -- Smart defaults where possible - ---- - -## Field-by-Field Optimization - -### Email Field -- Single field, no confirmation -- Inline validation -- Typo detection (did you mean gmail.com?) -- Proper mobile keyboard - -### Name Fields -- Single "Name" vs. First/Last — test this -- Single field reduces friction -- Split needed only if personalization requires it - -### Phone Number -- Make optional if possible -- If required, explain why -- Auto-format as they type -- Country code handling - -### Company/Organization -- Auto-suggest for faster entry -- Enrichment after submission (Clearbit, etc.) -- Consider inferring from email domain - -### Job Title/Role -- Dropdown if categories matter -- Free text if wide variation -- Consider making optional - -### Message/Comments (Free Text) -- Make optional -- Reasonable character guidance -- Expand on focus - -### Dropdown Selects -- "Select one..." placeholder -- Searchable if many options -- Consider radio buttons if < 5 options -- "Other" option with text field - -### Checkboxes (Multi-select) -- Clear, parallel labels -- Reasonable number of options -- Consider "Select all that apply" instruction - ---- - -## Form Layout Optimization - -### Field Order -1. Start with easiest fields (name, email) -2. Build commitment before asking more -3. Sensitive fields last (phone, company size) -4. Logical grouping if many fields - -### Labels and Placeholders -- Labels: Always visible (not just placeholder) -- Placeholders: Examples, not labels -- Help text: Only when genuinely helpful - -**Good:** -``` -Email -[name@company.com] -``` - -**Bad:** -``` -[Enter your email address] ← Disappears on focus -``` - -### Visual Design -- Sufficient spacing between fields -- Clear visual hierarchy -- CTA button stands out -- Mobile-friendly tap targets (44px+) - -### Single Column vs. Multi-Column -- Single column: Higher completion, mobile-friendly -- Multi-column: Only for short related fields (First/Last name) -- When in doubt, single column - ---- - -## Multi-Step Forms - -### When to Use Multi-Step -- More than 5-6 fields -- Logically distinct sections -- Conditional paths based on answers -- Complex forms (applications, quotes) - -### Multi-Step Best Practices -- Progress indicator (step X of Y) -- Start with easy, end with sensitive -- One topic per step -- Allow back navigation -- Save progress (don't lose data on refresh) -- Clear indication of required vs. optional - -### Progressive Commitment Pattern -1. Low-friction start (just email) -2. More detail (name, company) -3. Qualifying questions -4. Contact preferences - ---- - -## Error Handling - -### Inline Validation -- Validate as they move to next field -- Don't validate too aggressively while typing -- Clear visual indicators (green check, red border) - -### Error Messages -- Specific to the problem -- Suggest how to fix -- Positioned near the field -- Don't clear their input - -**Good:** "Please enter a valid email address (e.g., name@company.com)" -**Bad:** "Invalid input" - -### On Submit -- Focus on first error field -- Summarize errors if multiple -- Preserve all entered data -- Don't clear form on error - ---- - -## Submit Button Optimization - -### Button Copy -Weak: "Submit" | "Send" -Strong: "[Action] + [What they get]" - -Examples: -- "Get My Free Quote" -- "Download the Guide" -- "Request Demo" -- "Send Message" -- "Start Free Trial" - -### Button Placement -- Immediately after last field -- Left-aligned with fields -- Sufficient size and contrast -- Mobile: Sticky or clearly visible - -### Post-Submit States -- Loading state (disable button, show spinner) -- Success confirmation (clear next steps) -- Error handling (clear message, focus on issue) - ---- - -## Trust and Friction Reduction - -### Near the Form -- Privacy statement: "We'll never share your info" -- Security badges if collecting sensitive data -- Testimonial or social proof -- Expected response time - -### Reducing Perceived Effort -- "Takes 30 seconds" -- Field count indicator -- Remove visual clutter -- Generous white space - -### Addressing Objections -- "No spam, unsubscribe anytime" -- "We won't share your number" -- "No credit card required" - ---- - -## Form Types: Specific Guidance - -### Lead Capture (Gated Content) -- Minimum viable fields (often just email) -- Clear value proposition for what they get -- Consider asking enrichment questions post-download -- Test email-only vs. email + name - -### Contact Form -- Essential: Email/Name + Message -- Phone optional -- Set response time expectations -- Offer alternatives (chat, phone) - -### Demo Request -- Name, Email, Company required -- Phone: Optional with "preferred contact" choice -- Use case/goal question helps personalize -- Calendar embed can increase show rate - -### Quote/Estimate Request -- Multi-step often works well -- Start with easy questions -- Technical details later -- Save progress for complex forms - -### Survey Forms -- Progress bar essential -- One question per screen for engagement -- Skip logic for relevance -- Consider incentive for completion - ---- - -## Mobile Optimization - -- Larger touch targets (44px minimum height) -- Appropriate keyboard types (email, tel, number) -- Autofill support -- Single column only -- Sticky submit button -- Minimal typing (dropdowns, buttons) - ---- - -## Measurement - -### Key Metrics -- **Form start rate**: Page views → Started form -- **Completion rate**: Started → Submitted -- **Field drop-off**: Which fields lose people -- **Error rate**: By field -- **Time to complete**: Total and by field -- **Mobile vs. desktop**: Completion by device - -### What to Track -- Form views -- First field focus -- Each field completion -- Errors by field -- Submit attempts -- Successful submissions - ---- +→ See references/form-cro-playbook.md for details ## Output Format diff --git a/docs/skills/marketing-skill/launch-strategy.md b/docs/skills/marketing-skill/launch-strategy.md index d234f31..59a8d91 100644 --- a/docs/skills/marketing-skill/launch-strategy.md +++ b/docs/skills/marketing-skill/launch-strategy.md @@ -22,320 +22,7 @@ If `.claude/product-marketing-context.md` exists, read it before asking question --- ## Core Philosophy - -The best companies don't just launch once—they launch again and again. Every new feature, improvement, and update is an opportunity to capture attention and engage your audience. - -A strong launch isn't about a single moment. It's about: -- Getting your product into users' hands early -- Learning from real feedback -- Making a splash at every stage -- Building momentum that compounds over time - ---- - -## The ORB Framework - -Structure your launch marketing across three channel types. Everything should ultimately lead back to owned channels. - -### Owned Channels -You own the channel (though not the audience). Direct access without algorithms or platform rules. - -**Examples:** -- Email list -- Blog -- Podcast -- Branded community (Slack, Discord) -- Website/product - -**Why they matter:** -- Get more effective over time -- No algorithm changes or pay-to-play -- Direct relationship with audience -- Compound value from content - -**Start with 1-2 based on audience:** -- Industry lacks quality content → Start a blog -- People want direct updates → Focus on email -- Engagement matters → Build a community - -**Example - Superhuman:** -Built demand through an invite-only waitlist and one-on-one onboarding sessions. Every new user got a 30-minute live demo. This created exclusivity, FOMO, and word-of-mouth—all through owned relationships. Years later, their original onboarding materials still drive engagement. - -### Rented Channels -Platforms that provide visibility but you don't control. Algorithms shift, rules change, pay-to-play increases. - -**Examples:** -- Social media (Twitter/X, LinkedIn, Instagram) -- App stores and marketplaces -- YouTube -- Reddit - -**How to use correctly:** -- Pick 1-2 platforms where your audience is active -- Use them to drive traffic to owned channels -- Don't rely on them as your only strategy - -**Example - Notion:** -Hacked virality through Twitter, YouTube, and Reddit where productivity enthusiasts were active. Encouraged community to share templates and workflows. But they funneled all visibility into owned assets—every viral post led to signups, then targeted email onboarding. - -**Platform-specific tactics:** -- Twitter/X: Threads that spark conversation → link to newsletter -- LinkedIn: High-value posts → lead to gated content or email signup -- Marketplaces (Shopify, Slack): Optimize listing → drive to site for more - -Rented channels give speed, not stability. Capture momentum by bringing users into your owned ecosystem. - -### Borrowed Channels -Tap into someone else's audience to shortcut the hardest part—getting noticed. - -**Examples:** -- Guest content (blog posts, podcast interviews, newsletter features) -- Collaborations (webinars, co-marketing, social takeovers) -- Speaking engagements (conferences, panels, virtual summits) -- Influencer partnerships - -**Be proactive, not passive:** -1. List industry leaders your audience follows -2. Pitch win-win collaborations -3. Use tools like SparkToro or Listen Notes to find audience overlap -4. Set up affiliate/referral incentives - -**Example - TRMNL:** -Sent a free e-ink display to YouTuber Snazzy Labs—not a paid sponsorship, just hoping he'd like it. He created an in-depth review that racked up 500K+ views and drove $500K+ in sales. They also set up an affiliate program for ongoing promotion. - -Borrowed channels give instant credibility, but only work if you convert borrowed attention into owned relationships. - ---- - -## Five-Phase Launch Approach - -Launching isn't a one-day event. It's a phased process that builds momentum. - -### Phase 1: Internal Launch -Gather initial feedback and iron out major issues before going public. - -**Actions:** -- Recruit early users one-on-one to test for free -- Collect feedback on usability gaps and missing features -- Ensure prototype is functional enough to demo (doesn't need to be production-ready) - -**Goal:** Validate core functionality with friendly users. - -### Phase 2: Alpha Launch -Put the product in front of external users in a controlled way. - -**Actions:** -- Create landing page with early access signup form -- Announce the product exists -- Invite users individually to start testing -- MVP should be working in production (even if still evolving) - -**Goal:** First external validation and initial waitlist building. - -### Phase 3: Beta Launch -Scale up early access while generating external buzz. - -**Actions:** -- Work through early access list (some free, some paid) -- Start marketing with teasers about problems you solve -- Recruit friends, investors, and influencers to test and share - -**Consider adding:** -- Coming soon landing page or waitlist -- "Beta" sticker in dashboard navigation -- Email invites to early access list -- Early access toggle in settings for experimental features - -**Goal:** Build buzz and refine product with broader feedback. - -### Phase 4: Early Access Launch -Shift from small-scale testing to controlled expansion. - -**Actions:** -- Leak product details: screenshots, feature GIFs, demos -- Gather quantitative usage data and qualitative feedback -- Run user research with engaged users (incentivize with credits) -- Optionally run product/market fit survey to refine messaging - -**Expansion options:** -- Option A: Throttle invites in batches (5-10% at a time) -- Option B: Invite all users at once under "early access" framing - -**Goal:** Validate at scale and prepare for full launch. - -### Phase 5: Full Launch -Open the floodgates. - -**Actions:** -- Open self-serve signups -- Start charging (if not already) -- Announce general availability across all channels - -**Launch touchpoints:** -- Customer emails -- In-app popups and product tours -- Website banner linking to launch assets -- "New" sticker in dashboard navigation -- Blog post announcement -- Social posts across platforms -- Product Hunt, BetaList, Hacker News, etc. - -**Goal:** Maximum visibility and conversion to paying users. - ---- - -## Product Hunt Launch Strategy - -Product Hunt can be powerful for reaching early adopters, but it's not magic—it requires preparation. - -### Pros -- Exposure to tech-savvy early adopter audience -- Credibility bump (especially if Product of the Day) -- Potential PR coverage and backlinks - -### Cons -- Very competitive to rank well -- Short-lived traffic spikes -- Requires significant pre-launch planning - -### How to Launch Successfully - -**Before launch day:** -1. Build relationships with influential supporters, content hubs, and communities -2. Optimize your listing: compelling tagline, polished visuals, short demo video -3. Study successful launches to identify what worked -4. Engage in relevant communities—provide value before pitching -5. Prepare your team for all-day engagement - -**On launch day:** -1. Treat it as an all-day event -2. Respond to every comment in real-time -3. Answer questions and spark discussions -4. Encourage your existing audience to engage -5. Direct traffic back to your site to capture signups - -**After launch day:** -1. Follow up with everyone who engaged -2. Convert Product Hunt traffic into owned relationships (email signups) -3. Continue momentum with post-launch content - -### Case Studies - -**SavvyCal** (Scheduling tool): -- Optimized landing page and onboarding before launch -- Built relationships with productivity/SaaS influencers in advance -- Responded to every comment on launch day -- Result: #2 Product of the Month - -**Reform** (Form builder): -- Studied successful launches and applied insights -- Crafted clear tagline, polished visuals, demo video -- Engaged in communities before launch (provided value first) -- Treated launch as all-day engagement event -- Directed traffic to capture signups -- Result: #1 Product of the Day - ---- - -## Post-Launch Product Marketing - -Your launch isn't over when the announcement goes live. Now comes adoption and retention work. - -### Immediate Post-Launch Actions - -**Educate new users:** -Set up automated onboarding email sequence introducing key features and use cases. - -**Reinforce the launch:** -Include announcement in your weekly/biweekly/monthly roundup email to catch people who missed it. - -**Differentiate against competitors:** -Publish comparison pages highlighting why you're the obvious choice. - -**Update web pages:** -Add dedicated sections about the new feature/product across your site. - -**Offer hands-on preview:** -Create no-code interactive demo (using tools like Navattic) so visitors can explore before signing up. - -### Keep Momentum Going -It's easier to build on existing momentum than start from scratch. Every touchpoint reinforces the launch. - ---- - -## Ongoing Launch Strategy - -Don't rely on a single launch event. Regular updates and feature rollouts sustain engagement. - -### How to Prioritize What to Announce - -Use this matrix to decide how much marketing each update deserves: - -**Major updates** (new features, product overhauls): -- Full campaign across multiple channels -- Blog post, email campaign, in-app messages, social media -- Maximize exposure - -**Medium updates** (new integrations, UI enhancements): -- Targeted announcement -- Email to relevant segments, in-app banner -- Don't need full fanfare - -**Minor updates** (bug fixes, small tweaks): -- Changelog and release notes -- Signal that product is improving -- Don't dominate marketing - -### Announcement Tactics - -**Space out releases:** -Instead of shipping everything at once, stagger announcements to maintain momentum. - -**Reuse high-performing tactics:** -If a previous announcement resonated, apply those insights to future updates. - -**Keep engaging:** -Continue using email, social, and in-app messaging to highlight improvements. - -**Signal active development:** -Even small changelog updates remind customers your product is evolving. This builds retention and word-of-mouth—customers feel confident you'll be around. - ---- - -## Launch Checklist - -### Pre-Launch -- [ ] Landing page with clear value proposition -- [ ] Email capture / waitlist signup -- [ ] Early access list built -- [ ] Owned channels established (email, blog, community) -- [ ] Rented channel presence (social profiles optimized) -- [ ] Borrowed channel opportunities identified (podcasts, influencers) -- [ ] Product Hunt listing prepared (if using) -- [ ] Launch assets created (screenshots, demo video, GIFs) -- [ ] Onboarding flow ready -- [ ] Analytics/tracking in place - -### Launch Day -- [ ] Announcement email to list -- [ ] Blog post published -- [ ] Social posts scheduled and posted -- [ ] Product Hunt listing live (if using) -- [ ] In-app announcement for existing users -- [ ] Website banner/notification active -- [ ] Team ready to engage and respond -- [ ] Monitor for issues and feedback - -### Post-Launch -- [ ] Onboarding email sequence active -- [ ] Follow-up with engaged prospects -- [ ] Roundup email includes announcement -- [ ] Comparison pages published -- [ ] Interactive demo created -- [ ] Gather and act on feedback -- [ ] Plan next launch moment - ---- +→ See references/launch-frameworks-and-checklists.md for details ## Task-Specific Questions diff --git a/docs/skills/marketing-skill/marketing-demand-acquisition.md b/docs/skills/marketing-skill/marketing-demand-acquisition.md index d3e4f80..71591cb 100644 --- a/docs/skills/marketing-skill/marketing-demand-acquisition.md +++ b/docs/skills/marketing-skill/marketing-demand-acquisition.md @@ -16,7 +16,6 @@ Acquisition playbook for Series A+ startups scaling internationally (EU/US/Canad ## Table of Contents -- [Role Coverage](#role-coverage) - [Core KPIs](#core-kpis) - [Demand Generation Framework](#demand-generation-framework) - [Paid Media Channels](#paid-media-channels) @@ -28,17 +27,6 @@ Acquisition playbook for Series A+ startups scaling internationally (EU/US/Canad --- -## Role Coverage - -| Role | Focus Areas | -|------|-------------| -| Demand Generation Manager | Multi-channel campaigns, pipeline generation | -| Paid Media Marketer | Paid search/social/display optimization | -| SEO Manager | Organic acquisition, technical SEO | -| Partnerships Manager | Co-marketing, channel partnerships | - ---- - ## Core KPIs **Demand Gen:** MQL/SQL volume, cost per opportunity, marketing-sourced pipeline $, MQL→SQL rate @@ -301,21 +289,6 @@ Required: - **CAC exceeding LTV** → Demand gen is unprofitable. Optimize or cut channels. - **No nurture for non-ready leads** → 80% of leads aren't ready to buy. Nurture converts them later. -## Output Artifacts - -| When you ask for... | You get... | -|---------------------|------------| -| "Demand gen plan" | Multi-channel acquisition strategy with budget allocation | -| "Pipeline analysis" | Funnel conversion rates with bottleneck identification | -| "Channel strategy" | Channel selection matrix based on audience and budget | - -## Communication - -All output passes quality verification: -- Self-verify: source attribution, assumption audit, confidence scoring -- Output format: Bottom Line → What (with confidence) → Why → How to Act -- Results only. Every finding tagged: 🟢 verified, 🟡 medium, 🔴 assumed. - ## Related Skills - **paid-ads**: For executing paid acquisition campaigns. diff --git a/docs/skills/marketing-skill/marketing-strategy-pmm.md b/docs/skills/marketing-skill/marketing-strategy-pmm.md index d3bea30..e8fcc87 100644 --- a/docs/skills/marketing-skill/marketing-strategy-pmm.md +++ b/docs/skills/marketing-skill/marketing-strategy-pmm.md @@ -53,20 +53,11 @@ Define ideal customer profile for targeting: ### Buyer Personas -**Economic Buyer** (signs contract): -- Title: VP, Director, Head of [Department] -- Goals: ROI, team productivity, cost reduction -- Messaging: Business outcomes, ROI, case studies - -**Technical Buyer** (evaluates product): -- Title: Engineer, Architect, Tech Lead -- Goals: Technical fit, easy integration -- Messaging: Architecture, security, documentation - -**User/Champion** (advocates internally): -- Title: Manager, Team Lead, Power User -- Goals: Makes job easier, quick wins -- Messaging: UX, ease of use, time savings +| Persona | Title | Goals | Messaging | +|---------|-------|-------|-----------| +| Economic Buyer | VP, Director, Head of [Department] | ROI, team productivity, cost reduction | Business outcomes, ROI, case studies | +| Technical Buyer | Engineer, Architect, Tech Lead | Technical fit, easy integration | Architecture, security, documentation | +| User/Champion | Manager, Team Lead, Power User | Makes job easier, quick wins | UX, ease of use, time savings | ### ICP Validation Checklist diff --git a/docs/skills/marketing-skill/popup-cro.md b/docs/skills/marketing-skill/popup-cro.md index 5d90ca8..904b2b1 100644 --- a/docs/skills/marketing-skill/popup-cro.md +++ b/docs/skills/marketing-skill/popup-cro.md @@ -44,264 +44,7 @@ Before providing recommendations, understand: --- ## Core Principles - -### 1. Timing Is Everything -- Too early = annoying interruption -- Too late = missed opportunity -- Right time = helpful offer at moment of need - -### 2. Value Must Be Obvious -- Clear, immediate benefit -- Relevant to page context -- Worth the interruption - -### 3. Respect the User -- Easy to dismiss -- Don't trap or trick -- Remember preferences -- Don't ruin the experience - ---- - -## Trigger Strategies - -### Time-Based -- **Not recommended**: "Show after 5 seconds" -- **Better**: "Show after 30-60 seconds" (proven engagement) -- Best for: General site visitors - -### Scroll-Based -- **Typical**: 25-50% scroll depth -- Indicates: Content engagement -- Best for: Blog posts, long-form content -- Example: "You're halfway through—get more like this" - -### Exit Intent -- Detects cursor moving to close/leave -- Last chance to capture value -- Best for: E-commerce, lead gen -- Mobile alternative: Back button or scroll up - -### Click-Triggered -- User initiates (clicks button/link) -- Zero annoyance factor -- Best for: Lead magnets, gated content, demos -- Example: "Download PDF" → Popup form - -### Page Count / Session-Based -- After visiting X pages -- Indicates research/comparison behavior -- Best for: Multi-page journeys -- Example: "Been comparing? Here's a summary..." - -### Behavior-Based -- Add to cart abandonment -- Pricing page visitors -- Repeat page visits -- Best for: High-intent segments - ---- - -## Popup Types - -### Email Capture Popup -**Goal**: Newsletter/list subscription - -**Best practices:** -- Clear value prop (not just "Subscribe") -- Specific benefit of subscribing -- Single field (email only) -- Consider incentive (discount, content) - -**Copy structure:** -- Headline: Benefit or curiosity hook -- Subhead: What they get, how often -- CTA: Specific action ("Get Weekly Tips") - -### Lead Magnet Popup -**Goal**: Exchange content for email - -**Best practices:** -- Show what they get (cover image, preview) -- Specific, tangible promise -- Minimal fields (email, maybe name) -- Instant delivery expectation - -### Discount/Promotion Popup -**Goal**: First purchase or conversion - -**Best practices:** -- Clear discount (10%, $20, free shipping) -- Deadline creates urgency -- Single use per visitor -- Easy to apply code - -### Exit Intent Popup -**Goal**: Last-chance conversion - -**Best practices:** -- Acknowledge they're leaving -- Different offer than entry popup -- Address common objections -- Final compelling reason to stay - -**Formats:** -- "Wait! Before you go..." -- "Forget something?" -- "Get 10% off your first order" -- "Questions? Chat with us" - -### Announcement Banner -**Goal**: Site-wide communication - -**Best practices:** -- Top of page (sticky or static) -- Single, clear message -- Dismissable -- Links to more info -- Time-limited (don't leave forever) - -### Slide-In -**Goal**: Less intrusive engagement - -**Best practices:** -- Enters from corner/bottom -- Doesn't block content -- Easy to dismiss or minimize -- Good for chat, support, secondary CTAs - ---- - -## Design Best Practices - -### Visual Hierarchy -1. Headline (largest, first seen) -2. Value prop/offer (clear benefit) -3. Form/CTA (obvious action) -4. Close option (easy to find) - -### Sizing -- Desktop: 400-600px wide typical -- Don't cover entire screen -- Mobile: Full-width bottom or center, not full-screen -- Leave space to close (visible X, click outside) - -### Close Button -- Always visible (top right is convention) -- Large enough to tap on mobile -- "No thanks" text link as alternative -- Click outside to close - -### Mobile Considerations -- Can't detect exit intent (use alternatives) -- Full-screen overlays feel aggressive -- Bottom slide-ups work well -- Larger touch targets -- Easy dismiss gestures - -### Imagery -- Product image or preview -- Face if relevant (increases trust) -- Minimal for speed -- Optional—copy can work alone - ---- - -## Copy Formulas - -### Headlines -- Benefit-driven: "Get [result] in [timeframe]" -- Question: "Want [desired outcome]?" -- Command: "Don't miss [thing]" -- Social proof: "Join [X] people who..." -- Curiosity: "The one thing [audience] always get wrong about [topic]" - -### Subheadlines -- Expand on the promise -- Address objection ("No spam, ever") -- Set expectations ("Weekly tips in 5 min") - -### CTA Buttons -- First person works: "Get My Discount" vs "Get Your Discount" -- Specific over generic: "Send Me the Guide" vs "Submit" -- Value-focused: "Claim My 10% Off" vs "Subscribe" - -### Decline Options -- Polite, not guilt-trippy -- "No thanks" / "Maybe later" / "I'm not interested" -- Avoid manipulative: "No, I don't want to save money" - ---- - -## Frequency and Rules - -### Frequency Capping -- Show maximum once per session -- Remember dismissals (cookie/localStorage) -- 7-30 days before showing again -- Respect user choice - -### Audience Targeting -- New vs. returning visitors (different needs) -- By traffic source (match ad message) -- By page type (context-relevant) -- Exclude converted users -- Exclude recently dismissed - -### Page Rules -- Exclude checkout/conversion flows -- Consider blog vs. product pages -- Match offer to page context - ---- - -## Compliance and Accessibility - -### GDPR/Privacy -- Clear consent language -- Link to privacy policy -- Don't pre-check opt-ins -- Honor unsubscribe/preferences - -### Accessibility -- Keyboard navigable (Tab, Enter, Esc) -- Focus trap while open -- Screen reader compatible -- Sufficient color contrast -- Don't rely on color alone - -### Google Guidelines -- Intrusive interstitials hurt SEO -- Mobile especially sensitive -- Allow: Cookie notices, age verification, reasonable banners -- Avoid: Full-screen before content on mobile - ---- - -## Measurement - -### Key Metrics -- **Impression rate**: Visitors who see popup -- **Conversion rate**: Impressions → Submissions -- **Close rate**: How many dismiss immediately -- **Engagement rate**: Interaction before close -- **Time to close**: How long before dismissing - -### What to Track -- Popup views -- Form focus -- Submission attempts -- Successful submissions -- Close button clicks -- Outside clicks -- Escape key - -### Benchmarks -- Email popup: 2-5% conversion typical -- Exit intent: 3-10% conversion -- Click-triggered: Higher (10%+, self-selected) - ---- +→ See references/popup-cro-playbook.md for details ## Output Format diff --git a/docs/skills/marketing-skill/prompt-engineer-toolkit.md b/docs/skills/marketing-skill/prompt-engineer-toolkit.md index 2efb499..d619dd7 100644 --- a/docs/skills/marketing-skill/prompt-engineer-toolkit.md +++ b/docs/skills/marketing-skill/prompt-engineer-toolkit.md @@ -12,13 +12,9 @@ description: "Prompt Engineer Toolkit - Claude Code skill from the Marketing dom # Prompt Engineer Toolkit -**Tier:** POWERFUL -**Category:** Marketing Skill / AI Operations -**Domain:** Prompt Engineering, LLM Optimization, AI Workflows - ## Overview -Use this skill to move prompts from ad-hoc drafts to production assets with repeatable testing, versioning, and regression safety. It emphasizes measurable quality over intuition. +Use this skill to move prompts from ad-hoc drafts to production assets with repeatable testing, versioning, and regression safety. It emphasizes measurable quality over intuition. Apply it when launching a new LLM feature that needs reliable outputs, when prompt quality degrades after model or instruction changes, when multiple team members edit prompts and need history/diffs, when you need evidence-based prompt choice for production rollout, or when you want consistent prompt governance across environments. ## Core Capabilities @@ -29,14 +25,6 @@ Use this skill to move prompts from ad-hoc drafts to production assets with repe - Reusable prompt templates and selection guidance - Regression-friendly workflows for model/prompt updates -## When to Use - -- You are launching a new LLM feature and need reliable outputs -- Prompt quality degrades after model or instruction changes -- Multiple team members edit prompts and need history/diffs -- You need evidence-based prompt choice for production rollout -- You want consistent prompt governance across environments - ## Key Workflows ### 1. Run Prompt A/B Test @@ -98,22 +86,24 @@ python3 scripts/prompt_versioner.py changelog --name support_classifier - Manages prompt history (`add`, `list`, `diff`, `changelog`) - Stores metadata and content snapshots locally -## Common Pitfalls +## Pitfalls, Best Practices & Review Checklist -1. Picking prompts by anecdotal single-case outputs -2. Changing prompt + model simultaneously without control group -3. Missing forbidden-content checks in evaluation criteria -4. Editing prompts without version metadata or rationale -5. Failing to diff semantic changes before deploy +**Avoid these mistakes:** +1. Picking prompts from single-case outputs — use a realistic, edge-case-rich test suite. +2. Changing prompt and model simultaneously — always isolate variables. +3. Missing `must_not_contain` (forbidden-content) checks in evaluation criteria. +4. Editing prompts without version metadata, author, or change rationale. +5. Skipping semantic diffs before deploying a new prompt version. +6. Optimizing one benchmark while harming edge cases — track the full suite. +7. Model swap without rerunning the baseline A/B suite. -## Best Practices - -1. Keep test cases realistic and edge-case rich. -2. Always include negative checks (`must_not_contain`). -3. Store prompt versions with author and change reason. -4. Run A/B tests before and after major model upgrades. -5. Separate reusable templates from production prompt instances. -6. Maintain a small golden regression suite for every critical prompt. +**Before promoting any prompt, confirm:** +- [ ] Task intent is explicit and unambiguous. +- [ ] Output schema/format is explicit. +- [ ] Safety and exclusion constraints are explicit. +- [ ] No contradictory instructions. +- [ ] No unnecessary verbosity tokens. +- [ ] A/B score improves and violation count stays at zero. ## References @@ -147,47 +137,3 @@ This enables deterministic grading across prompt variants. 3. Run A/B suite against same cases. 4. Promote only if winner improves average and keeps violation count at zero. 5. Track post-release feedback and feed new failure cases back into test suite. - -## Prompt Review Checklist - -1. Task intent is explicit and unambiguous. -2. Output schema/format is explicit. -3. Safety and exclusion constraints are explicit. -4. Prompt avoids contradictory instructions. -5. Prompt avoids unnecessary verbosity tokens. - -## Common Operational Risks - -- Evaluating with too few test cases (false confidence) -- Optimizing for one benchmark while harming edge cases -- Missing audit trail for prompt edits in multi-author teams -- Model swap without rerunning baseline A/B suite - -## Proactive Triggers - -- **AI output sounds generic** → Prompts lack brand voice context. Include voice guidelines. -- **Inconsistent output quality** → Prompts too vague. Add specific examples and constraints. -- **No quality checks on AI content** → AI output needs human review. Never publish without editing. -- **Same prompt style for all tasks** → Different tasks need different prompt structures. - -## Output Artifacts - -| When you ask for... | You get... | -|---------------------|------------| -| "Improve my prompts" | Prompt audit with specific rewrites for better output | -| "Prompt templates" | Task-specific prompt templates for marketing use cases | -| "AI content workflow" | End-to-end AI-assisted content production workflow | - -## Communication - -All output passes quality verification: -- Self-verify: source attribution, assumption audit, confidence scoring -- Output format: Bottom Line → What (with confidence) → Why → How to Act -- Results only. Every finding tagged: 🟢 verified, 🟡 medium, 🔴 assumed. - -## Related Skills - -- **content-production**: For the full content pipeline. Prompt engineering supports AI-assisted writing. -- **ad-creative**: For generating ad variations using prompt techniques. -- **content-humanizer**: For refining AI-generated output to sound natural. -- **marketing-context**: Provides brand context that improves prompt outputs. diff --git a/docs/skills/marketing-skill/seo-audit.md b/docs/skills/marketing-skill/seo-audit.md index 5ad8d50..58bc9e5 100644 --- a/docs/skills/marketing-skill/seo-audit.md +++ b/docs/skills/marketing-skill/seo-audit.md @@ -39,292 +39,7 @@ Before auditing, understand: --- ## Audit Framework - -### Priority Order -1. **Crawlability & Indexation** (can Google find and index it?) -2. **Technical Foundations** (is the site fast and functional?) -3. **On-Page Optimization** (is content optimized?) -4. **Content Quality** (does it deserve to rank?) -5. **Authority & Links** (does it have credibility?) - ---- - -## Technical SEO Audit - -### Crawlability - -**Robots.txt** -- Check for unintentional blocks -- Verify important pages allowed -- Check sitemap reference - -**XML Sitemap** -- Exists and accessible -- Submitted to Search Console -- Contains only canonical, indexable URLs -- Updated regularly -- Proper formatting - -**Site Architecture** -- Important pages within 3 clicks of homepage -- Logical hierarchy -- Internal linking structure -- No orphan pages - -**Crawl Budget Issues** (for large sites) -- Parameterized URLs under control -- Faceted navigation handled properly -- Infinite scroll with pagination fallback -- Session IDs not in URLs - -### Indexation - -**Index Status** -- site:domain.com check -- Search Console coverage report -- Compare indexed vs. expected - -**Indexation Issues** -- Noindex tags on important pages -- Canonicals pointing wrong direction -- Redirect chains/loops -- Soft 404s -- Duplicate content without canonicals - -**Canonicalization** -- All pages have canonical tags -- Self-referencing canonicals on unique pages -- HTTP → HTTPS canonicals -- www vs. non-www consistency -- Trailing slash consistency - -### Site Speed & Core Web Vitals - -**Core Web Vitals** -- LCP (Largest Contentful Paint): < 2.5s -- INP (Interaction to Next Paint): < 200ms -- CLS (Cumulative Layout Shift): < 0.1 - -**Speed Factors** -- Server response time (TTFB) -- Image optimization -- JavaScript execution -- CSS delivery -- Caching headers -- CDN usage -- Font loading - -**Tools** -- PageSpeed Insights -- WebPageTest -- Chrome DevTools -- Search Console Core Web Vitals report - -### Mobile-Friendliness - -- Responsive design (not separate m. site) -- Tap target sizes -- Viewport configured -- No horizontal scroll -- Same content as desktop -- Mobile-first indexing readiness - -### Security & HTTPS - -- HTTPS across entire site -- Valid SSL certificate -- No mixed content -- HTTP → HTTPS redirects -- HSTS header (bonus) - -### URL Structure - -- Readable, descriptive URLs -- Keywords in URLs where natural -- Consistent structure -- No unnecessary parameters -- Lowercase and hyphen-separated - ---- - -## On-Page SEO Audit - -### Title Tags - -**Check for:** -- Unique titles for each page -- Primary keyword near beginning -- 50-60 characters (visible in SERP) -- Compelling and click-worthy -- Brand name placement (end, usually) - -**Common issues:** -- Duplicate titles -- Too long (truncated) -- Too short (wasted opportunity) -- Keyword stuffing -- Missing entirely - -### Meta Descriptions - -**Check for:** -- Unique descriptions per page -- 150-160 characters -- Includes primary keyword -- Clear value proposition -- Call to action - -**Common issues:** -- Duplicate descriptions -- Auto-generated garbage -- Too long/short -- No compelling reason to click - -### Heading Structure - -**Check for:** -- One H1 per page -- H1 contains primary keyword -- Logical hierarchy (H1 → H2 → H3) -- Headings describe content -- Not just for styling - -**Common issues:** -- Multiple H1s -- Skip levels (H1 → H3) -- Headings used for styling only -- No H1 on page - -### Content Optimization - -**Primary Page Content** -- Keyword in first 100 words -- Related keywords naturally used -- Sufficient depth/length for topic -- Answers search intent -- Better than competitors - -**Thin Content Issues** -- Pages with little unique content -- Tag/category pages with no value -- Doorway pages -- Duplicate or near-duplicate content - -### Image Optimization - -**Check for:** -- Descriptive file names -- Alt text on all images -- Alt text describes image -- Compressed file sizes -- Modern formats (WebP) -- Lazy loading implemented -- Responsive images - -### Internal Linking - -**Check for:** -- Important pages well-linked -- Descriptive anchor text -- Logical link relationships -- No broken internal links -- Reasonable link count per page - -**Common issues:** -- Orphan pages (no internal links) -- Over-optimized anchor text -- Important pages buried -- Excessive footer/sidebar links - -### Keyword Targeting - -**Per Page** -- Clear primary keyword target -- Title, H1, URL aligned -- Content satisfies search intent -- Not competing with other pages (cannibalization) - -**Site-Wide** -- Keyword mapping document -- No major gaps in coverage -- No keyword cannibalization -- Logical topical clusters - ---- - -## Content Quality Assessment - -### E-E-A-T Signals - -**Experience** -- First-hand experience demonstrated -- Original insights/data -- Real examples and case studies - -**Expertise** -- Author credentials visible -- Accurate, detailed information -- Properly sourced claims - -**Authoritativeness** -- Recognized in the space -- Cited by others -- Industry credentials - -**Trustworthiness** -- Accurate information -- Transparent about business -- Contact information available -- Privacy policy, terms -- Secure site (HTTPS) - -### Content Depth - -- Comprehensive coverage of topic -- Answers follow-up questions -- Better than top-ranking competitors -- Updated and current - -### User Engagement Signals - -- Time on page -- Bounce rate in context -- Pages per session -- Return visits - ---- - -## Common Issues by Site Type - -### SaaS/Product Sites -- Product pages lack content depth -- Blog not integrated with product pages -- Missing comparison/alternative pages -- Feature pages thin on content -- No glossary/educational content - -### E-commerce -- Thin category pages -- Duplicate product descriptions -- Missing product schema -- Faceted navigation creating duplicates -- Out-of-stock pages mishandled - -### Content/Blog Sites -- Outdated content not refreshed -- Keyword cannibalization -- No topical clustering -- Poor internal linking -- Missing author pages - -### Local Business -- Inconsistent NAP -- Missing local schema -- No Google Business Profile optimization -- Missing location pages -- No local content - ---- +→ See references/seo-audit-reference.md for details ## Output Format diff --git a/docs/skills/marketing-skill/signup-flow-cro.md b/docs/skills/marketing-skill/signup-flow-cro.md index 181194a..a25dc6b 100644 --- a/docs/skills/marketing-skill/signup-flow-cro.md +++ b/docs/skills/marketing-skill/signup-flow-cro.md @@ -42,183 +42,7 @@ Before providing recommendations, understand: --- ## Core Principles - -### 1. Minimize Required Fields -Every field reduces conversion. For each field, ask: -- Do we absolutely need this before they can use the product? -- Can we collect this later through progressive profiling? -- Can we infer this from other data? - -**Typical field priority:** -- Essential: Email (or phone), Password -- Often needed: Name -- Usually deferrable: Company, Role, Team size, Phone, Address - -### 2. Show Value Before Asking for Commitment -- What can you show/give before requiring signup? -- Can they experience the product before creating an account? -- Reverse the order: value first, signup second - -### 3. Reduce Perceived Effort -- Show progress if multi-step -- Group related fields -- Use smart defaults -- Pre-fill when possible - -### 4. Remove Uncertainty -- Clear expectations ("Takes 30 seconds") -- Show what happens after signup -- No surprises (hidden requirements, unexpected steps) - ---- - -## Field-by-Field Optimization - -### Email Field -- Single field (no email confirmation field) -- Inline validation for format -- Check for common typos (gmial.com → gmail.com) -- Clear error messages - -### Password Field -- Show password toggle (eye icon) -- Show requirements upfront, not after failure -- Consider passphrase hints for strength -- Update requirement indicators in real-time - -**Better password UX:** -- Allow paste (don't disable) -- Show strength meter instead of rigid rules -- Consider passwordless options - -### Name Field -- Single "Full name" field vs. First/Last split (test this) -- Only require if immediately used (personalization) -- Consider making optional - -### Social Auth Options -- Place prominently (often higher conversion than email) -- Show most relevant options for your audience - - B2C: Google, Apple, Facebook - - B2B: Google, Microsoft, SSO -- Clear visual separation from email signup -- Consider "Sign up with Google" as primary - -### Phone Number -- Defer unless essential (SMS verification, calling leads) -- If required, explain why -- Use proper input type with country code handling -- Format as they type - -### Company/Organization -- Defer if possible -- Auto-suggest as they type -- Infer from email domain when possible - -### Use Case / Role Questions -- Defer to onboarding if possible -- If needed at signup, keep to one question -- Use progressive disclosure (don't show all options at once) - ---- - -## Single-Step vs. Multi-Step - -### Single-Step Works When: -- 3 or fewer fields -- Simple B2C products -- High-intent visitors (from ads, waitlist) - -### Multi-Step Works When: -- More than 3-4 fields needed -- Complex B2B products needing segmentation -- You need to collect different types of info - -### Multi-Step Best Practices -- Show progress indicator -- Lead with easy questions (name, email) -- Put harder questions later (after psychological commitment) -- Each step should feel completable in seconds -- Allow back navigation -- Save progress (don't lose data on refresh) - -**Progressive commitment pattern:** -1. Email only (lowest barrier) -2. Password + name -3. Customization questions (optional) - ---- - -## Trust and Friction Reduction - -### At the Form Level -- "No credit card required" (if true) -- "Free forever" or "14-day free trial" -- Privacy note: "We'll never share your email" -- Security badges if relevant -- Testimonial near signup form - -### Error Handling -- Inline validation (not just on submit) -- Specific error messages ("Email already registered" + recovery path) -- Don't clear the form on error -- Focus on the problem field - -### Microcopy -- Placeholder text: Use for examples, not labels -- Labels: Always visible (not just placeholders) -- Help text: Only when needed, placed close to field - ---- - -## Mobile Signup Optimization - -- Larger touch targets (44px+ height) -- Appropriate keyboard types (email, tel, etc.) -- Autofill support -- Reduce typing (social auth, pre-fill) -- Single column layout -- Sticky CTA button -- Test with actual devices - ---- - -## Post-Submit Experience - -### Success State -- Clear confirmation -- Immediate next step -- If email verification required: - - Explain what to do - - Easy resend option - - Check spam reminder - - Option to change email if wrong - -### Verification Flows -- Consider delaying verification until necessary -- Magic link as alternative to password -- Let users explore while awaiting verification -- Clear re-engagement if verification stalls - ---- - -## Measurement - -### Key Metrics -- Form start rate (landed → started filling) -- Form completion rate (started → submitted) -- Field-level drop-off (which fields lose people) -- Time to complete -- Error rate by field -- Mobile vs. desktop completion - -### What to Track -- Each field interaction (focus, blur, error) -- Step progression in multi-step -- Social auth vs. email signup ratio -- Time between steps - ---- +→ See references/signup-cro-playbook.md for details ## Output Format diff --git a/docs/skills/product-team/competitive-teardown.md b/docs/skills/product-team/competitive-teardown.md index 9fc8aa3..728e4cf 100644 --- a/docs/skills/product-team/competitive-teardown.md +++ b/docs/skills/product-team/competitive-teardown.md @@ -10,31 +10,14 @@ description: "Competitive Teardown - Claude Code skill from the Product domain." --- +# Competitive Teardown + **Tier:** POWERFUL **Category:** Product Team **Domain:** Competitive Intelligence, Product Strategy, Market Analysis --- -## Overview - -Run a structured competitive analysis on any product or company. Synthesizes data from pricing pages, app store reviews, job postings, SEO signals, and social media into actionable insights: feature matrices, SWOT, positioning maps, UX audits, and a stakeholder presentation template. - ---- - -## Core Capabilities - -- Feature comparison matrix (scored 1-5 across 12 dimensions) -- Pricing model analysis (per-seat, usage-based, flat rate) -- SWOT analysis -- Positioning map (2x2 matrix) -- UX audit (onboarding, key workflows, mobile) -- Content strategy gap analysis -- Action item roadmap (quick wins / medium-term / strategic) -- Stakeholder presentation template - ---- - ## When to Use - Before a product strategy or roadmap session @@ -45,38 +28,28 @@ Run a structured competitive analysis on any product or company. Synthesizes dat --- +## Teardown Workflow + +Follow these steps in sequence to produce a complete teardown: + +1. **Define competitors** — List 2–4 competitors to analyze. Confirm which is the primary focus. +2. **Collect data** — Use `DATA_COLLECTION.md` to gather raw signals from at least 3 sources per competitor (website, reviews, job postings, SEO, social). + _Validation checkpoint: Before proceeding, confirm you have pricing data, at least 20 reviews, and job posting counts for each competitor._ +3. **Score using rubric** — Apply the 12-dimension rubric below to produce a numeric scorecard for each competitor and your own product. + _Validation checkpoint: Every dimension should have a score and at least one supporting evidence note._ +4. **Generate outputs** — Populate the templates in `TEMPLATES.md` (Feature Matrix, Pricing Analysis, SWOT, Positioning Map, UX Audit). +5. **Build action plan** — Translate findings into the Action Items template (quick wins / medium-term / strategic). +6. **Package for stakeholders** — Assemble the Stakeholder Presentation using outputs from steps 3–5. + +--- + ## Data Collection Guide +> Full executable scripts for each source are in `DATA_COLLECTION.md`. Summaries of what to capture are below. + ### 1. Website Analysis -```bash -# Scrape pricing page structure -curl -s "https://competitor.com/pricing" | \ - python3 -c " -import sys -from html.parser import HTMLParser - -class TextExtractor(HTMLParser): - def __init__(self): - super().__init__() - self.text = [] - def handle_data(self, data): - if data.strip(): - self.text.append(data.strip()) - -p = TextExtractor() -p.feed(sys.stdin.read()) -print('\n'.join(p.text[:200])) -" - -# Check changelog / release notes -curl -s "https://competitor.com/changelog" | grep -i "added\|new\|launched\|improved" - -# Feature list from sitemap -curl -s "https://competitor.com/sitemap.xml" | grep -oP '(?<=)[^<]+' | head -50 -``` - -Key things to capture from the website: +Key things to capture: - Pricing tiers and price points - Feature lists per tier - Primary CTA and messaging @@ -86,51 +59,21 @@ Key things to capture from the website: ### 2. App Store Reviews -```bash -# iOS reviews via RSS -curl "https://itunes.apple.com/rss/customerreviews/id=[APP_ID]/sortBy=mostRecent/json" | \ - python3 -c " -import sys, json -data = json.load(sys.stdin) -entries = data.get('feed', {}).get('entry', []) -for e in entries[1:]: # skip first (app metadata) - rating = e.get('im:rating', {}).get('label', '?') - title = e.get('title', {}).get('label', '') - content = e.get('content', {}).get('label', '') - print(f'[{rating}] {title}: {content[:200]}') -" - -# Google Play via scraping (use playwright or a reviews API) -# Categorize reviews into: praise / feature requests / bugs / UX complaints -``` - Review sentiment categories: - **Praise** → what users love (defend / strengthen these) - **Feature requests** → unmet needs (opportunity gaps) - **Bugs** → quality signals - **UX complaints** → friction points you can beat them on -### 3. Job Postings (Team Size & Tech Stack Signals) - -```python -# Search LinkedIn / Greenhouse / Lever / Workable -import requests - -# Example: scrape Greenhouse job board -def get_jobs(company_token): - r = requests.get(f"https://boards-api.greenhouse.io/v1/boards/{company_token}/jobs") - return r.json().get('jobs', []) - -jobs = get_jobs("competitor-name") -departments = {} -for job in jobs: - dept = job.get('departments', [{}])[0].get('name', 'Unknown') - departments[dept] = departments.get(dept, 0) + 1 - -print("Team breakdown by open roles:") -for dept, count in sorted(departments.items(), key=lambda x: -x[1]): - print(f" {dept}: {count} open roles") +**Sample App Store query (iTunes Search API):** ``` +GET https://itunes.apple.com/search?term=&entity=software&limit=1 +# Extract trackId, then: +GET https://itunes.apple.com/rss/customerreviews/id=/sortBy=mostRecent/json?l=en&limit=50 +``` +Parse `entry[].content.label` for review text and `entry[].im:rating.label` for star rating. + +### 3. Job Postings (Team Size & Tech Stack Signals) Signals from job postings: - **Engineering volume** → scaling vs. consolidating @@ -141,21 +84,6 @@ Signals from job postings: ### 4. SEO Analysis -```bash -# Organic keyword gap (using Ahrefs/Semrush API or free alternatives) -# Ubersuggest, SpyFu, or SimilarWeb free tiers - -# Quick domain overview via Moz free API -curl "https://moz.com/api/free/v2/url-metrics?targets[]=competitor.com" \ - -H "x-moz-token: YOUR_TOKEN" - -# Check their blog topics (sitemap) -curl "https://competitor.com/sitemap-posts.xml" | \ - grep -oP '(?<=)[^<]+' | \ - sed 's|.*/||' | \ - tr '-' ' ' -``` - SEO signals to capture: - Top 20 organic keywords (intent: informational / navigational / commercial) - Domain Authority / backlink count @@ -164,18 +92,7 @@ SEO signals to capture: ### 5. Social Media Sentiment -```bash -# Twitter/X search (via API v2) -curl "https://api.twitter.com/2/tweets/search/recent?query=%40competitor+OR+%22competitor+name%22&max_results=100" \ - -H "Authorization: Bearer $TWITTER_BEARER_TOKEN" | \ - python3 -c " -import sys, json -data = json.load(sys.stdin) -tweets = data.get('data', []) -for t in tweets: - print(t['text'][:150]) -" -``` +Capture recent mentions via Twitter/X API v2, Reddit, or LinkedIn. Look for recurring praise, complaints, and feature requests. See `DATA_COLLECTION.md` for API query examples. --- @@ -196,253 +113,61 @@ for t in tweets: | 11 | **Community** | None | Forum / Slack | Active, vibrant community | | 12 | **Innovation** | No recent releases | Quarterly | Frequent, meaningful | ---- +**Example completed row** (Competitor: Acme Corp, Dimension 3 – UX): -## Feature Comparison Matrix Template +| Dimension | Acme Corp Score | Evidence | +|-----------|----------------|---------| +| UX | 2 | App Store reviews cite "confusing navigation" (38 mentions); onboarding requires 7 steps before TTFV; no onboarding wizard; CC required at signup. | -```markdown -## Feature Comparison Matrix - -| Feature | [YOUR PRODUCT] | [COMPETITOR A] | [COMPETITOR B] | [COMPETITOR C] | -|---------|---------------|----------------|----------------|----------------| -| **Core Features** | | | | | -| [Feature 1] | 5 | 4 | 3 | 2 | -| [Feature 2] | 3 | 5 | 4 | 3 | -| [Feature 3] | 4 | 3 | 5 | 1 | -| **Pricing** | | | | | -| Free tier | Yes | No | Limited | Yes | -| Starting price | $X/mo | $Y/mo | $Z/mo | $W/mo | -| Enterprise | Custom | Custom | No | Custom | -| **Platform** | | | | | -| Web app | 5 | 5 | 4 | 3 | -| Mobile iOS | 4 | 3 | 5 | 2 | -| Mobile Android | 4 | 3 | 4 | 2 | -| API | 5 | 4 | 3 | 1 | -| **TOTAL SCORE** | **XX/60** | **XX/60** | **XX/60** | **XX/60** | - -### Score Legend: 5=Best-in-class, 4=Strong, 3=Average, 2=Below average, 1=Weak/Missing -``` +Apply this pattern to all 12 dimensions for each competitor. --- -## Pricing Analysis Template +## Templates -```markdown -## Pricing Analysis +> Full template markdown is in `TEMPLATES.md`. Abbreviated reference below. -### Model Comparison -| Competitor | Model | Entry | Mid | Enterprise | Free Trial | -|-----------|-------|-------|-----|------------|------------| -| [Yours] | Per-seat | $X | $Y | Custom | 14 days | -| [Comp A] | Usage-based | $X | $Y | Custom | 30 days | -| [Comp B] | Flat rate | $X | - | Custom | No | -| [Comp C] | Freemium | $0 | $Y | Custom | Freemium | +### Feature Comparison Matrix -### Pricing Intelligence -- **Price leader:** [Competitor] at $X/mo for comparable features -- **Value leader:** [Competitor] - most features per dollar -- **Premium positioning:** [Competitor] - 2x market price, targets enterprise -- **Our position:** [Describe where you sit and why] +Rows: core features, pricing tiers, platform capabilities (web, iOS, Android, API). +Columns: your product + up to 3 competitors. +Score each cell 1–5. Sum to get total out of 60. +**Score legend:** 5=Best-in-class, 4=Strong, 3=Average, 2=Below average, 1=Weak/Missing -### Pricing Opportunity -- [e.g., "No competitor offers usage-based pricing — opportunity for SMBs"] -- [e.g., "All competitors charge per seat — flat rate could disrupt"] -- [e.g., "Freemium tier could capture top-of-funnel the others miss"] -``` +### Pricing Analysis ---- +Capture per competitor: model type (per-seat / usage-based / flat rate / freemium), entry/mid/enterprise price points, free trial length. +Summarize: price leader, value leader, premium positioning, your position, and 2–3 pricing opportunity bullets. -## SWOT Analysis Template +### SWOT Analysis -```markdown -## SWOT Analysis: [COMPETITOR NAME] +For each competitor: 3–5 bullets per quadrant (Strengths, Weaknesses, Opportunities for us, Threats to us). Anchor every bullet to a data signal (review quote, job posting count, pricing page, etc.). -### Strengths -- [e.g., "3x more integrations than any competitor"] -- [e.g., "Strong brand recognition in enterprise segment"] -- [e.g., "Best-in-class mobile UX (4.8 App Store rating)"] +### Positioning Map -### Weaknesses -- [e.g., "No free tier — losing top-of-funnel to freemium players"] -- [e.g., "Pricing complexity confuses buyers (3 pages of pricing)"] -- [e.g., "App store reviews cite slow support response"] +2x2 axes (e.g., Simple ↔ Complex / Low Value ↔ High Value). Place each competitor and your product. Bubble size = market share or funding. See `TEMPLATES.md` for ASCII and editable versions. -### Opportunities (for US) -- [e.g., "They have no presence in DACH — our opening"] -- [e.g., "Their API is limited — power users frustrated"] -- [e.g., "Recent layoffs in engineering suggest slower roadmap"] +### UX Audit Checklist -### Threats (to Us) -- [e.g., "Well-funded — can undercut pricing for 12+ months"] -- [e.g., "Strong channel partner network we don't have"] -- [e.g., "Announced AI feature launching Q2 — may close our gap"] -``` +Onboarding: TTFV (minutes), steps to activation, CC-required, onboarding wizard quality. +Key workflows: steps, friction points, comparative score (yours vs. theirs). +Mobile: iOS/Android ratings, feature parity, top complaint and praise. +Navigation: global search, keyboard shortcuts, in-app help. ---- +### Action Items -## Positioning Map +| Horizon | Effort | Examples | +|---------|--------|---------| +| Quick wins (0–4 wks) | Low | Add review badges, publish comparison landing page | +| Medium-term (1–3 mo) | Moderate | Launch free tier, improve onboarding TTFV, add top-requested integration | +| Strategic (3–12 mo) | High | Enter new market, build API v2, achieve SOC2 Type II | -``` - HIGH VALUE - | - [COMP A] | [YOURS] - (feature-rich, | (balanced, - expensive) | mid-price) - | -COMPLEX ────────────┼──────────────── SIMPLE - | - [COMP B] | [COMP C] - (complex, | (simple, - cheap) | cheap) - | - LOW VALUE +### Stakeholder Presentation (7 slides) -Axes: X = Complexity (Simple ↔ Complex) - Y = Value delivered (Low ↔ High) - -Bubble size = market share or funding -``` - ---- - -## UX Audit Checklist - -```markdown -## UX Audit: [COMPETITOR] - -### Onboarding Flow -- [ ] Time to first value (TTFV): _____ minutes -- [ ] Steps to activation: _____ -- [ ] Email verification required? Yes / No -- [ ] Credit card required for trial? Yes / No -- [ ] Onboarding checklist / wizard? Yes / No -- [ ] Empty state quality: 1-5 ___ - -### Key Workflows -| Workflow | Steps | Friction Points | Our Score | Their Score | -|----------|-------|-----------------|-----------|-------------| -| [Core action 1] | X | [notes] | X/5 | X/5 | -| [Core action 2] | X | [notes] | X/5 | X/5 | -| [Core action 3] | X | [notes] | X/5 | X/5 | - -### Mobile Experience -- iOS rating: _____ / 5 ([X] reviews) -- Android rating: _____ / 5 ([X] reviews) -- Mobile feature parity: Full / Partial / Web-only -- Top mobile complaint: _____ -- Top mobile praise: _____ - -### Navigation & IA -- [ ] Global search available? -- [ ] Keyboard shortcuts? -- [ ] Breadcrumbs / clear navigation? -- [ ] Help / docs accessible in-app? -``` - ---- - -## Action Items Template - -```markdown -## Action Items from Competitive Teardown - -### Quick Wins (0-4 weeks, low effort, high impact) -- [ ] [e.g., "Add G2/Capterra badges — competitor displays these prominently"] -- [ ] [e.g., "Publish integration page — competitor's ranks for '[product] integrations'"] -- [ ] [e.g., "Add comparison landing page targeting '[competitor] alternative' keyword"] - -### Medium-Term (1-3 months, moderate effort) -- [ ] [e.g., "Launch free tier to capture top-of-funnel competitor is missing"] -- [ ] [e.g., "Improve onboarding — competitor's TTFV is 4min vs our 12min"] -- [ ] [e.g., "Build [integration] — #1 request in competitor app store reviews"] - -### Strategic (3-12 months, high effort) -- [ ] [e.g., "Enter DACH market — competitor has no German localization"] -- [ ] [e.g., "Build API v2 — power users leaving competitor for API limitations"] -- [ ] [e.g., "Achieve SOC2 Type II — competitor uses this as primary enterprise objection handler"] -``` - ---- - -## Stakeholder Presentation Template - -```markdown -# [COMPETITOR NAME] Teardown -## Competitive Intelligence Report — [DATE] - ---- - -### Executive Summary (1 slide) -- Overall threat level: LOW / MEDIUM / HIGH / CRITICAL -- Their biggest strength vs. us: [1 sentence] -- Our biggest opportunity vs. them: [1 sentence] -- Recommended priority action: [1 sentence] - ---- - -### Market Position (1 slide) -[Insert 2x2 positioning map] - ---- - -### Feature Scorecard (1 slide) -[Insert 12-dimension radar chart or table] -Overall: [COMPETITOR] = XX/60 | [YOURS] = XX/60 - ---- - -### Pricing Analysis (1 slide) -[Insert pricing comparison table] -Key insight: [1-2 sentences] - ---- - -### UX Highlights (1 slide) -What they do better: [3 bullets] -Where we beat them: [3 bullets] - ---- - -### Voice of Customer (1 slide) -Top 3 complaints about [COMPETITOR] from reviews: -1. [Quote or paraphrase] -2. [Quote or paraphrase] -3. [Quote or paraphrase] - ---- - -### Our Action Plan (1 slide) -Quick wins: [2-3 bullets] -Medium-term: [2-3 bullets] -Strategic: [1-2 bullets] - ---- - -### Appendix -- Raw feature matrix -- Full review analysis -- Job posting breakdown -- SEO keyword comparison -``` - ---- - -## Common Pitfalls - -1. **Recency bias** - Pricing pages change; always date-stamp your data -2. **Feature theater** - A competitor may list a feature that barely works; check reviews -3. **Vanity metrics** - "10,000 integrations" via Zapier != 10,000 native integrations -4. **Ignoring momentum** - A weaker competitor growing 3x YoY is a bigger threat than a stronger one shrinking -5. **Only comparing features** - Brand perception and community often matter more than features -6. **Single-source analysis** - Website alone misses the real user experience; always add reviews - ---- - -## Best Practices - -- Run teardowns quarterly; competitors move fast -- Assign a DRI (directly responsible individual) for each major competitor -- Build a "battle card" 1-pager per competitor for sales to use -- Track competitor job postings monthly as a leading indicator of product direction -- Screenshot pricing pages — they change and you want the history -- Include a "what we copied from them" section internally — intellectual honesty builds better products +1. **Executive Summary** — Threat level (LOW/MEDIUM/HIGH/CRITICAL), top strength, top opportunity, recommended action +2. **Market Position** — 2x2 positioning map +3. **Feature Scorecard** — 12-dimension radar or table, total scores +4. **Pricing Analysis** — Comparison table + key insight +5. **UX Highlights** — What they do better (3 bullets) vs. where we win (3 bullets) +6. **Voice of Customer** — Top 3 review complaints (quoted or paraphrased) +7. **Our Action Plan** — Quick wins, medium-term, strategic priorities; Appendix with raw data diff --git a/docs/skills/product-team/landing-page-generator.md b/docs/skills/product-team/landing-page-generator.md index d7b3348..13ff672 100644 --- a/docs/skills/product-team/landing-page-generator.md +++ b/docs/skills/product-team/landing-page-generator.md @@ -10,13 +10,7 @@ description: "Landing Page Generator - Claude Code skill from the Product domain --- -**Tier:** POWERFUL -**Category:** Product Team -**Domain:** Marketing / Conversion Rate Optimization - ---- - -## Overview +# Landing Page Generator Generate high-converting landing pages from a product description. Output complete Next.js/React components with multiple section variants, proven copy frameworks, SEO optimization, and performance-first patterns. Not lorem ipsum — actual copy that converts. @@ -38,12 +32,16 @@ Generate high-converting landing pages from a product description. Output comple --- -## When to Use +## Generation Workflow -- Launching a new product or feature -- Creating a dedicated campaign or promo page -- A/B testing landing page variants -- Replacing a static page with a conversion-optimized one +Follow these steps in order for every landing page request: + +1. **Gather inputs** — collect product name, tagline, audience, pain point, key benefit, pricing tiers, design style, and copy framework using the trigger format below. Ask only for missing fields. +2. **Select design style** — map the user's choice (or infer from context) to one of the four Tailwind class sets in the Design Style Reference. +3. **Apply copy framework** — write all headline and body copy using the chosen framework (PAS / AIDA / BAB) before generating components. +4. **Generate sections in order** — Hero → Features → Pricing → FAQ → Testimonials → CTA → Footer. Skip sections not relevant to the product. +5. **Validate against SEO checklist** — run through every item in the SEO Checklist before outputting final code. Fix any gaps inline. +6. **Output final components** — deliver complete, copy-paste-ready TSX files with all Tailwind classes, SEO meta, and structured data included. --- @@ -64,77 +62,37 @@ Copy framework: PAS | AIDA | BAB ## Design Style Reference -### Dark SaaS -```css -/* Tailwind classes */ -bg-gray-950 text-white -accent: violet-500, violet-400 -cards: bg-gray-900 border border-gray-800 -CTA button: bg-violet-600 hover:bg-violet-500 -``` +| Style | Background | Accent | Cards | CTA Button | +|---|---|---|---|---| +| **Dark SaaS** | `bg-gray-950 text-white` | `violet-500/400` | `bg-gray-900 border border-gray-800` | `bg-violet-600 hover:bg-violet-500` | +| **Clean Minimal** | `bg-white text-gray-900` | `blue-600` | `bg-gray-50 border border-gray-200 rounded-2xl` | `bg-blue-600 hover:bg-blue-700` | +| **Bold Startup** | `bg-white text-gray-900` | `orange-500` | `shadow-xl rounded-3xl` | `bg-orange-500 hover:bg-orange-600 text-white` | +| **Enterprise** | `bg-slate-50 text-slate-900` | `slate-700` | `bg-white border border-slate-200 shadow-sm` | `bg-slate-900 hover:bg-slate-800 text-white` | -### Clean Minimal -```css -bg-white text-gray-900 -accent: blue-600 -cards: bg-gray-50 border border-gray-200 rounded-2xl -CTA button: bg-blue-600 hover:bg-blue-700 -``` - -### Bold Startup -```css -bg-white text-gray-900 -accent: orange-500 -headings: font-black tracking-tight -cards: shadow-xl rounded-3xl -CTA button: bg-orange-500 hover:bg-orange-600 text-white -``` - -### Enterprise -```css -bg-slate-50 text-slate-900 -accent: slate-700 -cards: bg-white border border-slate-200 shadow-sm -CTA button: bg-slate-900 hover:bg-slate-800 text-white -``` +> **Bold Startup** headings: add `font-black tracking-tight` to all `

`/`

` elements. --- ## Copy Frameworks -### PAS (Problem → Agitate → Solution) -``` -HERO HEADLINE: [Painful state they're in] -SUBHEAD: [Agitate: what happens if they don't fix it] -CTA: [Solution: what you offer] +**PAS (Problem → Agitate → Solution)** +- H1: Painful state they're in +- Sub: What happens if they don't fix it +- CTA: What you offer +- *Example — H1:* "Your team wastes 3 hours a day on manual reporting" / *Sub:* "Every hour spent on spreadsheets is an hour not closing deals. Your competitors are already automated." / *CTA:* "Automate your reports in 10 minutes →" -Example: -H1: "Your team wastes 3 hours a day on manual reporting" -Sub: "Every hour spent on spreadsheets is an hour not closing deals. - Your competitors are already automated." -CTA: "Automate your reports in 10 minutes →" -``` +**AIDA (Attention → Interest → Desire → Action)** +- H1: Bold attention-grabbing statement → Sub: Interesting fact or benefit → Features: Desire-building proof points → CTA: Clear action -### AIDA (Attention → Interest → Desire → Action) -``` -H1: [Bold attention-grabbing statement] -Sub: [Interesting fact or benefit] -Features: [Desire-building proof points] -CTA: [Clear action] -``` - -### BAB (Before → After → Bridge) -``` -H1: "[Before state] → [After state]" -Sub: "Here's how [product] bridges the gap" -Features: [Bridge: how it works] -``` +**BAB (Before → After → Bridge)** +- H1: "[Before state] → [After state]" → Sub: "Here's how [product] bridges the gap" → Features: How it works (the bridge) --- -## Hero Variants +## Representative Component: Hero (Centered Gradient — Dark SaaS) + +Use this as the structural template for all hero variants. Swap layout classes, gradient direction, and image placement for split, video-bg, and minimal variants. -### Variant 1: Centered Gradient (Dark SaaS) ```tsx export function HeroCentered() { return ( @@ -171,193 +129,27 @@ export function HeroCentered() { } ``` -### Variant 2: Split (Image + Copy) -```tsx -export function HeroSplit() { - return ( -
-
-

- Stop losing customers to slow support -

-

- Respond to every ticket in under 2 minutes with AI-powered triage, - smart routing, and one-click replies. -

-
- - -
-
- ✓ 14-day trial - ✓ No credit card - ✓ Cancel anytime -
-
-
- Product screenshot -
-
- ) -} -``` - --- -## Feature Section: Alternating +## Other Section Patterns -```tsx -const features = [ - { - title: "Real-time error tracking", - description: "Catch exceptions the moment they happen. Stack traces, user context, and breadcrumbs — all in one place.", - image: "/features/errors.png", - badge: "Core", - }, - { - title: "One-click rollback", - description: "Bad deploy? Roll back to any previous version in under 30 seconds without touching your terminal.", - image: "/features/rollback.png", - badge: "New", - }, -] +### Feature Section (Alternating) -export function FeaturesAlternating() { - return ( -
-
- {features.map((feature, i) => ( -
-
- - {feature.badge} - -

{feature.title}

-

{feature.description}

-
-
- {feature.title} -
-
- ))} -
-
- ) -} -``` +Map over a `features` array with `{ title, description, image, badge }`. Toggle layout direction with `i % 2 === 1 ? "lg:flex-row-reverse" : ""`. Use `` with explicit `width`/`height` and `rounded-2xl shadow-xl`. Wrap in `
` with `max-w-6xl` container. ---- +### Pricing Table -## Pricing Section +Map over a `plans` array with `{ name, price, description, features[], cta, highlighted }`. Highlighted plan gets `border-2 border-violet-500 bg-violet-950/50 ring-4 ring-violet-500/20`; others get `border border-gray-800 bg-gray-900`. Render `null` price as "Custom". Use `` icon per feature row. Layout: `grid gap-8 lg:grid-cols-3`. -```tsx -const plans = [ - { - name: "Starter", - price: 0, - description: "For solo developers", - features: ["5 projects", "10k events/month", "7-day retention", "Email support"], - cta: "Get started free", - highlighted: false, - }, - { - name: "Pro", - price: 49, - description: "For growing teams", - features: ["Unlimited projects", "1M events/month", "90-day retention", "Priority support", "Custom alerts", "SSO"], - cta: "Start free trial", - highlighted: true, - }, - { - name: "Enterprise", - price: null, - description: "For large organizations", - features: ["Everything in Pro", "Unlimited events", "SLA guarantee", "Dedicated support", "Custom contracts", "SAML/SCIM"], - cta: "Contact sales", - highlighted: false, - }, -] +### FAQ with Schema Markup -export function Pricing() { - return ( -
-
-

Simple, predictable pricing

-

Start free. Scale as you grow.

-
- {plans.map((plan) => ( -
- {plan.highlighted && ( -
Most popular
- )} -

{plan.name}

-

{plan.description}

-
- {plan.price !== null ? ( - ${plan.price}/mo - ) : ( - Custom - )} -
-
    - {plan.features.map((f) => ( -
  • - - {f} -
  • - ))} -
- -
- ))} -
-
-
- ) -} -``` +Inject `FAQPage` JSON-LD via `