Merge pull request #496 from alirezarezvani/dev

Release: dev → main — 28 new skills, 7 community PRs, full sync
This commit is contained in:
Alireza Rezvani
2026-04-07 12:16:36 +02:00
committed by GitHub
21 changed files with 1602 additions and 53 deletions

View File

@@ -4,11 +4,11 @@
"name": "Alireza Rezvani", "name": "Alireza Rezvani",
"url": "https://alirezarezvani.com" "url": "https://alirezarezvani.com"
}, },
"description": "248 production-ready skill packages for Claude AI across 9 domains: marketing (45), engineering (42+37), C-level advisory (34), regulatory/QMS (14), product (15), project management (9), business growth (5), and finance (4). Includes 332 Python tools, 460 reference documents, 23 agents, and 22 slash commands.", "description": "233 production-ready skill packages for Claude AI across 9 domains: marketing (44), engineering (43+37), C-level advisory (34), regulatory/QMS (14), product (15), project management (9), business growth (5), and finance (4). Includes 305 Python tools, 424 reference documents, 25 agents, and 22 slash commands.",
"homepage": "https://github.com/alirezarezvani/claude-skills", "homepage": "https://github.com/alirezarezvani/claude-skills",
"repository": "https://github.com/alirezarezvani/claude-skills", "repository": "https://github.com/alirezarezvani/claude-skills",
"metadata": { "metadata": {
"description": "248 production-ready skill packages across 9 domains with 332 Python tools, 460 reference documents, 23 agents, and 22 slash commands. Compatible with Claude Code, Codex CLI, Gemini CLI, and OpenClaw.", "description": "233 production-ready skill packages across 9 domains with 305 Python tools, 424 reference documents, 25 agents, and 22 slash commands. Compatible with Claude Code, Codex CLI, Gemini CLI, and OpenClaw.",
"version": "2.2.0" "version": "2.2.0"
}, },
"plugins": [ "plugins": [
@@ -59,7 +59,7 @@
{ {
"name": "engineering-advanced-skills", "name": "engineering-advanced-skills",
"source": "./engineering", "source": "./engineering",
"description": "42 advanced engineering skills: agent designer, agent workflow designer, AgentHub, RAG architect, database designer, focused-fix, browser-automation, spec-driven-workflow, secrets-vault-manager, sql-database-assistant, migration architect, observability designer, dependency auditor, release manager, API reviewer, CI/CD pipeline builder, MCP server builder, skill security auditor, performance profiler, Helm chart builder, Terraform patterns, self-eval, llm-cost-optimizer, prompt-governance, behuman, code-tour, demo-video, data-quality-auditor, and more.", "description": "43 advanced engineering skills: agent designer, agent workflow designer, AgentHub, RAG architect, database designer, focused-fix, browser-automation, spec-driven-workflow, secrets-vault-manager, sql-database-assistant, migration architect, observability designer, dependency auditor, release manager, API reviewer, CI/CD pipeline builder, MCP server builder, skill security auditor, performance profiler, Helm chart builder, Terraform patterns, self-eval, llm-cost-optimizer, prompt-governance, behuman, code-tour, demo-video, data-quality-auditor, statistical-analyst, and more.",
"version": "2.2.0", "version": "2.2.0",
"author": { "author": {
"name": "Alireza Rezvani" "name": "Alireza Rezvani"
@@ -598,6 +598,23 @@
"data-audit" "data-audit"
], ],
"category": "development" "category": "development"
},
{
"name": "statistical-analyst",
"source": "./engineering/statistical-analyst",
"description": "Hypothesis testing, A/B experiment analysis, sample size calculation, and confidence intervals. 3 stdlib-only Python tools: Z-test/t-test/chi-square with effect sizes, sample size calculator with power tradeoffs, and Wilson score confidence intervals.",
"version": "2.2.0",
"author": {
"name": "Alireza Rezvani"
},
"keywords": [
"statistics",
"hypothesis-testing",
"ab-testing",
"sample-size",
"confidence-interval"
],
"category": "development"
} }
] ]
} }

View File

@@ -3,7 +3,7 @@
"name": "claude-code-skills", "name": "claude-code-skills",
"description": "Production-ready skill packages for AI agents - Marketing, Engineering, Product, C-Level, PM, and RA/QM", "description": "Production-ready skill packages for AI agents - Marketing, Engineering, Product, C-Level, PM, and RA/QM",
"repository": "https://github.com/alirezarezvani/claude-skills", "repository": "https://github.com/alirezarezvani/claude-skills",
"total_skills": 193, "total_skills": 194,
"skills": [ "skills": [
{ {
"name": "contract-and-proposal-writer", "name": "contract-and-proposal-writer",
@@ -1025,6 +1025,12 @@
"category": "product", "category": "product",
"description": "Generates complete, production-ready SaaS project boilerplate including authentication, database schemas, billing integration, API routes, and a working dashboard using Next.js 14+ App Router, TypeScript, Tailwind CSS, shadcn/ui, Drizzle ORM, and Stripe. Use when the user wants to create a new SaaS app, start a subscription-based web project, scaffold a Next.js application, or mentions terms like starter template, boilerplate, new project, or wiring up auth and payments." "description": "Generates complete, production-ready SaaS project boilerplate including authentication, database schemas, billing integration, API routes, and a working dashboard using Next.js 14+ App Router, TypeScript, Tailwind CSS, shadcn/ui, Drizzle ORM, and Stripe. Use when the user wants to create a new SaaS app, start a subscription-based web project, scaffold a Next.js application, or mentions terms like starter template, boilerplate, new project, or wiring up auth and payments."
}, },
{
"name": "spec-to-repo",
"source": "../../product-team/spec-to-repo",
"category": "product",
"description": "Use when the user says 'build me an app', 'create a project from this spec', 'scaffold a new repo', 'generate a starter', 'turn this idea into code', 'bootstrap a project', 'I have requirements and need a codebase', or provides a natural-language project specification and expects a complete, runnable repository. Stack-agnostic: Next.js, FastAPI, Rails, Go, Rust, Flutter, and more."
},
{ {
"name": "ui-design-system", "name": "ui-design-system",
"source": "../../product-team/ui-design-system", "source": "../../product-team/ui-design-system",
@@ -1196,7 +1202,7 @@
"description": "Marketing, content, and demand generation skills" "description": "Marketing, content, and demand generation skills"
}, },
"product": { "product": {
"count": 14, "count": 15,
"source": "../../product-team", "source": "../../product-team",
"description": "Product management and design skills" "description": "Product management and design skills"
}, },

1
.codex/skills/spec-to-repo Symbolic link
View File

@@ -0,0 +1 @@
../../product-team/spec-to-repo

View File

@@ -1,7 +1,7 @@
{ {
"version": "1.0.0", "version": "1.0.0",
"name": "gemini-cli-skills", "name": "gemini-cli-skills",
"total_skills": 280, "total_skills": 282,
"skills": [ "skills": [
{ {
"name": "README", "name": "README",
@@ -958,6 +958,11 @@
"category": "engineering-advanced", "category": "engineering-advanced",
"description": "Use when the user asks to write SQL queries, optimize database performance, generate migrations, explore database schemas, or work with ORMs like Prisma, Drizzle, TypeORM, or SQLAlchemy." "description": "Use when the user asks to write SQL queries, optimize database performance, generate migrations, explore database schemas, or work with ORMs like Prisma, Drizzle, TypeORM, or SQLAlchemy."
}, },
{
"name": "statistical-analyst",
"category": "engineering-advanced",
"description": "Run hypothesis tests, analyze A/B experiment results, calculate sample sizes, and interpret statistical significance with effect sizes. Use when you need to validate whether observed differences are real, size an experiment correctly before launch, or interpret test results with confidence."
},
{ {
"name": "tech-debt-tracker", "name": "tech-debt-tracker",
"category": "engineering-advanced", "category": "engineering-advanced",
@@ -1278,6 +1283,11 @@
"category": "product", "category": "product",
"description": "Generates complete, production-ready SaaS project boilerplate including authentication, database schemas, billing integration, API routes, and a working dashboard using Next.js 14+ App Router, TypeScript, Tailwind CSS, shadcn/ui, Drizzle ORM, and Stripe. Use when the user wants to create a new SaaS app, start a subscription-based web project, scaffold a Next.js application, or mentions terms like starter template, boilerplate, new project, or wiring up auth and payments." "description": "Generates complete, production-ready SaaS project boilerplate including authentication, database schemas, billing integration, API routes, and a working dashboard using Next.js 14+ App Router, TypeScript, Tailwind CSS, shadcn/ui, Drizzle ORM, and Stripe. Use when the user wants to create a new SaaS app, start a subscription-based web project, scaffold a Next.js application, or mentions terms like starter template, boilerplate, new project, or wiring up auth and payments."
}, },
{
"name": "spec-to-repo",
"category": "product",
"description": "Use when the user says 'build me an app', 'create a project from this spec', 'scaffold a new repo', 'generate a starter', 'turn this idea into code', 'bootstrap a project', 'I have requirements and need a codebase', or provides a natural-language project specification and expects a complete, runnable repository. Stack-agnostic: Next.js, FastAPI, Rails, Go, Rust, Flutter, and more."
},
{ {
"name": "ui-design-system", "name": "ui-design-system",
"category": "product", "category": "product",
@@ -1426,7 +1436,7 @@
"description": "Engineering resources" "description": "Engineering resources"
}, },
"engineering-advanced": { "engineering-advanced": {
"count": 56, "count": 57,
"description": "Engineering-advanced resources" "description": "Engineering-advanced resources"
}, },
"finance": { "finance": {
@@ -1438,7 +1448,7 @@
"description": "Marketing resources" "description": "Marketing resources"
}, },
"product": { "product": {
"count": 15, "count": 16,
"description": "Product resources" "description": "Product resources"
}, },
"project-management": { "project-management": {

View File

@@ -0,0 +1 @@
../../../product-team/spec-to-repo/SKILL.md

View File

@@ -0,0 +1 @@
../../../engineering/statistical-analyst/SKILL.md

View File

@@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
This is a **comprehensive skills library** for Claude AI and Claude Code - reusable, production-ready skill packages that bundle domain expertise, best practices, analysis tools, and strategic frameworks. The repository provides modular skills that teams can download and use directly in their workflows. This is a **comprehensive skills library** for Claude AI and Claude Code - reusable, production-ready skill packages that bundle domain expertise, best practices, analysis tools, and strategic frameworks. The repository provides modular skills that teams can download and use directly in their workflows.
**Current Scope:** 248 production-ready skills across 9 domains with 332 Python automation tools, 460 reference guides, 23 agents, and 22 slash commands. **Current Scope:** 233 production-ready skills across 9 domains with 305 Python automation tools, 424 reference guides, 25 agents, and 22 slash commands.
**Key Distinction**: This is NOT a traditional application. It's a library of skill packages meant to be extracted and deployed by users into their own Claude workflows. **Key Distinction**: This is NOT a traditional application. It's a library of skill packages meant to be extracted and deployed by users into their own Claude workflows.
@@ -36,12 +36,12 @@ This repository uses **modular documentation**. For domain-specific guidance, se
``` ```
claude-code-skills/ claude-code-skills/
├── .claude-plugin/ # Plugin registry (marketplace.json) ├── .claude-plugin/ # Plugin registry (marketplace.json)
├── agents/ # 23 agents across all domains ├── agents/ # 25 agents across all domains
├── commands/ # 22 slash commands (changelog, tdd, saas-health, prd, code-to-prd, plugin-audit, sprint-plan, etc.) ├── commands/ # 22 slash commands (changelog, tdd, saas-health, prd, code-to-prd, plugin-audit, sprint-plan, etc.)
├── engineering-team/ # 37 core engineering skills + Playwright Pro + Self-Improving Agent + Security Suite ├── engineering-team/ # 37 core engineering skills + Playwright Pro + Self-Improving Agent + Security Suite
├── engineering/ # 42 POWERFUL-tier advanced skills (incl. AgentHub, self-eval) ├── engineering/ # 43 POWERFUL-tier advanced skills (incl. AgentHub, self-eval)
├── product-team/ # 15 product skills + Python tools ├── product-team/ # 15 product skills + Python tools
├── marketing-skill/ # 45 marketing skills (7 pods) + Python tools ├── marketing-skill/ # 44 marketing skills (7 pods) + Python tools
├── c-level-advisor/ # 34 C-level advisory skills (10 roles + orchestration) ├── c-level-advisor/ # 34 C-level advisory skills (10 roles + orchestration)
├── project-management/ # 9 PM skills + Atlassian MCP ├── project-management/ # 9 PM skills + Atlassian MCP
├── ra-qm-team/ # 14 RA/QM compliance skills ├── ra-qm-team/ # 14 RA/QM compliance skills
@@ -130,7 +130,7 @@ See [standards/git/git-workflow-standards.md](standards/git/git-workflow-standar
- **Security skills suite** — 6 new engineering-team skills: adversarial-reviewer, ai-security, cloud-security, incident-response, red-team, threat-detection (5 Python tools, 4 reference guides) - **Security skills suite** — 6 new engineering-team skills: adversarial-reviewer, ai-security, cloud-security, incident-response, red-team, threat-detection (5 Python tools, 4 reference guides)
- **Self-eval skill** — Honest AI work quality evaluation with two-axis scoring, score inflation detection, and session persistence - **Self-eval skill** — Honest AI work quality evaluation with two-axis scoring, score inflation detection, and session persistence
- **Snowflake development** — Data warehouse development, SQL optimization, and data pipeline patterns - **Snowflake development** — Data warehouse development, SQL optimization, and data pipeline patterns
- 248 total skills across 9 domains, 332 Python tools, 460 references, 23 agents, 22 commands - 233 total skills across 9 domains, 305 Python tools, 424 references, 25 agents, 22 commands
- MkDocs docs site expanded to 269 generated pages (301 HTML pages) - MkDocs docs site expanded to 269 generated pages (301 HTML pages)
**v2.1.2 (2026-03-10):** **v2.1.2 (2026-03-10):**
@@ -153,9 +153,9 @@ See [standards/git/git-workflow-standards.md](standards/git/git-workflow-standar
## Roadmap ## Roadmap
**Phase 1-3 Complete:** 248 production-ready skills deployed across 9 domains **Phase 1-3 Complete:** 233 production-ready skills deployed across 9 domains
- Engineering Core (37), Engineering POWERFUL (42), Product (15), Marketing (45), PM (9), C-Level (34), RA/QM (14), Business & Growth (5), Finance (4) - Engineering Core (37), Engineering POWERFUL (43), Product (15), Marketing (44), PM (9), C-Level (34), RA/QM (14), Business & Growth (5), Finance (4)
- 332 Python automation tools, 460 reference guides, 23 agents, 22 commands - 305 Python automation tools, 424 reference guides, 25 agents, 22 commands
- Complete enterprise coverage from engineering through regulatory compliance, sales, customer success, and finance - Complete enterprise coverage from engineering through regulatory compliance, sales, customer success, and finance
- MkDocs Material docs site with 269+ indexed pages for SEO - MkDocs Material docs site with 269+ indexed pages for SEO
@@ -208,4 +208,4 @@ This repository publishes skills to **ClawHub** (clawhub.com) as the distributio
**Last Updated:** March 31, 2026 **Last Updated:** March 31, 2026
**Version:** v2.2.0 **Version:** v2.2.0
**Status:** 248 skills deployed across 9 domains, 28 marketplace plugins, docs site live **Status:** 233 skills deployed across 9 domains, 28 marketplace plugins, docs site live

View File

@@ -1,14 +1,14 @@
# Claude Code Skills & Plugins — Agent Skills for Every Coding Tool # Claude Code Skills & Plugins — Agent Skills for Every Coding Tool
**248 production-ready Claude Code skills, plugins, and agent skills for 11 AI coding tools.** **233 production-ready Claude Code skills, plugins, and agent skills for 11 AI coding tools.**
The most comprehensive open-source library of Claude Code skills and agent plugins — also works with OpenAI Codex, Gemini CLI, Cursor, and 7 more coding agents. Reusable expertise packages covering engineering, DevOps, marketing, compliance, C-level advisory, and more. The most comprehensive open-source library of Claude Code skills and agent plugins — also works with OpenAI Codex, Gemini CLI, Cursor, and 7 more coding agents. Reusable expertise packages covering engineering, DevOps, marketing, compliance, C-level advisory, and more.
**Works with:** Claude Code · OpenAI Codex · Gemini CLI · OpenClaw · Cursor · Aider · Windsurf · Kilo Code · OpenCode · Augment · Antigravity **Works with:** Claude Code · OpenAI Codex · Gemini CLI · OpenClaw · Cursor · Aider · Windsurf · Kilo Code · OpenCode · Augment · Antigravity
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow?style=for-the-badge)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow?style=for-the-badge)](https://opensource.org/licenses/MIT)
[![Skills](https://img.shields.io/badge/Skills-248-brightgreen?style=for-the-badge)](#skills-overview) [![Skills](https://img.shields.io/badge/Skills-233-brightgreen?style=for-the-badge)](#skills-overview)
[![Agents](https://img.shields.io/badge/Agents-23-blue?style=for-the-badge)](#agents) [![Agents](https://img.shields.io/badge/Agents-25-blue?style=for-the-badge)](#agents)
[![Personas](https://img.shields.io/badge/Personas-3-purple?style=for-the-badge)](#personas) [![Personas](https://img.shields.io/badge/Personas-3-purple?style=for-the-badge)](#personas)
[![Commands](https://img.shields.io/badge/Commands-22-orange?style=for-the-badge)](#commands) [![Commands](https://img.shields.io/badge/Commands-22-orange?style=for-the-badge)](#commands)
[![Stars](https://img.shields.io/github/stars/alirezarezvani/claude-skills?style=for-the-badge)](https://github.com/alirezarezvani/claude-skills/stargazers) [![Stars](https://img.shields.io/github/stars/alirezarezvani/claude-skills?style=for-the-badge)](https://github.com/alirezarezvani/claude-skills/stargazers)
@@ -23,10 +23,10 @@ The most comprehensive open-source library of Claude Code skills and agent plugi
Claude Code skills (also called agent skills or coding agent plugins) are modular instruction packages that give AI coding agents domain expertise they don't have out of the box. Each skill includes: Claude Code skills (also called agent skills or coding agent plugins) are modular instruction packages that give AI coding agents domain expertise they don't have out of the box. Each skill includes:
- **SKILL.md** — structured instructions, workflows, and decision frameworks - **SKILL.md** — structured instructions, workflows, and decision frameworks
- **Python tools** — 332 CLI scripts (all stdlib-only, zero pip installs) - **Python tools** — 305 CLI scripts (all stdlib-only, zero pip installs)
- **Reference docs** — templates, checklists, and domain-specific knowledge - **Reference docs** — templates, checklists, and domain-specific knowledge
**One repo, eleven platforms.** Works natively as Claude Code plugins, Codex agent skills, Gemini CLI skills, and converts to 8 more tools via `scripts/convert.sh`. All 332 Python tools run anywhere Python runs. **One repo, eleven platforms.** Works natively as Claude Code plugins, Codex agent skills, Gemini CLI skills, and converts to 8 more tools via `scripts/convert.sh`. All 305 Python tools run anywhere Python runs.
### Skills vs Agents vs Personas ### Skills vs Agents vs Personas
@@ -145,16 +145,16 @@ Run `./scripts/convert.sh --tool all` to generate tool-specific outputs locally.
## Skills Overview ## Skills Overview
**248 skills across 9 domains:** **233 skills across 9 domains:**
| Domain | Skills | Highlights | Details | | Domain | Skills | Highlights | Details |
|--------|--------|------------|---------| |--------|--------|------------|---------|
| **🔧 Engineering — Core** | 37 | Architecture, frontend, backend, fullstack, QA, DevOps, SecOps, AI/ML, data, Playwright, self-improving agent, security suite (6), a11y audit | [engineering-team/](engineering-team/) | | **🔧 Engineering — Core** | 37 | Architecture, frontend, backend, fullstack, QA, DevOps, SecOps, AI/ML, data, Playwright, self-improving agent, security suite (6), a11y audit | [engineering-team/](engineering-team/) |
| **🎭 Playwright Pro** | 9+3 | Test generation, flaky fix, Cypress/Selenium migration, TestRail, BrowserStack, 55 templates | [engineering-team/playwright-pro](engineering-team/playwright-pro/) | | **🎭 Playwright Pro** | 9+3 | Test generation, flaky fix, Cypress/Selenium migration, TestRail, BrowserStack, 55 templates | [engineering-team/playwright-pro](engineering-team/playwright-pro/) |
| **🧠 Self-Improving Agent** | 5+2 | Auto-memory curation, pattern promotion, skill extraction, memory health | [engineering-team/self-improving-agent](engineering-team/self-improving-agent/) | | **🧠 Self-Improving Agent** | 5+2 | Auto-memory curation, pattern promotion, skill extraction, memory health | [engineering-team/self-improving-agent](engineering-team/self-improving-agent/) |
| **⚡ Engineering — POWERFUL** | 42 | Agent designer, RAG architect, database designer, CI/CD builder, security auditor, MCP builder, AgentHub, Helm charts, Terraform, self-eval | [engineering/](engineering/) | | **⚡ Engineering — POWERFUL** | 43 | Agent designer, RAG architect, database designer, CI/CD builder, security auditor, MCP builder, AgentHub, Helm charts, Terraform, self-eval | [engineering/](engineering/) |
| **🎯 Product** | 15 | Product manager, agile PO, strategist, UX researcher, UI design, landing pages, SaaS scaffolder, analytics, experiment designer, discovery, roadmap communicator, code-to-prd | [product-team/](product-team/) | | **🎯 Product** | 15 | Product manager, agile PO, strategist, UX researcher, UI design, landing pages, SaaS scaffolder, analytics, experiment designer, discovery, roadmap communicator, code-to-prd | [product-team/](product-team/) |
| **📣 Marketing** | 45 | 7 pods: Content (8), SEO (5), CRO (6), Channels (6), Growth (4), Intelligence (4), Sales (2) + context foundation + orchestration router. 32 Python tools. | [marketing-skill/](marketing-skill/) | | **📣 Marketing** | 44 | 7 pods: Content (8), SEO (5), CRO (6), Channels (6), Growth (4), Intelligence (4), Sales (2) + context foundation + orchestration router. 32 Python tools. | [marketing-skill/](marketing-skill/) |
| **📋 Project Management** | 9 | Senior PM, scrum master, Jira, Confluence, Atlassian admin, templates | [project-management/](project-management/) | | **📋 Project Management** | 9 | Senior PM, scrum master, Jira, Confluence, Atlassian admin, templates | [project-management/](project-management/) |
| **🏥 Regulatory & QM** | 14 | ISO 13485, MDR 2017/745, FDA, ISO 27001, GDPR, CAPA, risk management | [ra-qm-team/](ra-qm-team/) | | **🏥 Regulatory & QM** | 14 | ISO 13485, MDR 2017/745, FDA, ISO 27001, GDPR, CAPA, risk management | [ra-qm-team/](ra-qm-team/) |
| **💼 C-Level Advisory** | 34 | Full C-suite (10 roles) + orchestration + board meetings + culture & collaboration | [c-level-advisor/](c-level-advisor/) | | **💼 C-Level Advisory** | 34 | Full C-suite (10 roles) + orchestration + board meetings + culture & collaboration | [c-level-advisor/](c-level-advisor/) |
@@ -296,7 +296,7 @@ for MDR Annex II compliance gaps.
## Python Analysis Tools ## Python Analysis Tools
332 CLI tools ship with the skills (all verified, stdlib-only): 305 CLI tools ship with the skills (all verified, stdlib-only):
```bash ```bash
# SaaS health check # SaaS health check
@@ -342,7 +342,7 @@ Yes. Skills work natively with 11 tools: Claude Code, OpenAI Codex, Gemini CLI,
No. We follow semantic versioning and maintain backward compatibility within patch releases. Existing script arguments, plugin source paths, and SKILL.md structures are never changed in patch versions. See the [CHANGELOG](CHANGELOG.md) for details on each release. No. We follow semantic versioning and maintain backward compatibility within patch releases. Existing script arguments, plugin source paths, and SKILL.md structures are never changed in patch versions. See the [CHANGELOG](CHANGELOG.md) for details on each release.
**Are the Python tools dependency-free?** **Are the Python tools dependency-free?**
Yes. All 332 Python CLI tools use the standard library only — zero pip installs required. Every script is verified to run with `--help`. Yes. All 305 Python CLI tools use the standard library only — zero pip installs required. Every script is verified to run with `--help`.
**How do I create my own Claude Code skill?** **How do I create my own Claude Code skill?**
Each skill is a folder with a `SKILL.md` (frontmatter + instructions), optional `scripts/`, `references/`, and `assets/`. See the [Skills & Agents Factory](https://github.com/alirezarezvani/claude-code-skills-agents-factory) for a step-by-step guide. Each skill is a folder with a `SKILL.md` (frontmatter + instructions), optional `scripts/`, `references/`, and `assets/`. See the [Skills & Agents Factory](https://github.com/alirezarezvani/claude-code-skills-agents-factory) for a step-by-step guide.

View File

@@ -1,6 +1,6 @@
--- ---
title: Install Agent Skills — Codex, Gemini CLI, OpenClaw Setup title: Install Agent Skills — Codex, Gemini CLI, OpenClaw Setup
description: "How to install 248 Claude Code skills and agent plugins for 11 AI coding tools. Step-by-step setup for Claude Code, OpenAI Codex, Gemini CLI, OpenClaw, Cursor, Aider, Windsurf, and more." description: "How to install 233 Claude Code skills and agent plugins for 11 AI coding tools. Step-by-step setup for Claude Code, OpenAI Codex, Gemini CLI, OpenClaw, Cursor, Aider, Windsurf, and more."
--- ---
# Getting Started # Getting Started
@@ -141,9 +141,9 @@ Choose your platform and follow the steps:
| Bundle | Install Command | Skills | | Bundle | Install Command | Skills |
|--------|----------------|--------| |--------|----------------|--------|
| **Engineering Core** | `/plugin install engineering-skills@claude-code-skills` | 37 | | **Engineering Core** | `/plugin install engineering-skills@claude-code-skills` | 37 |
| **Engineering POWERFUL** | `/plugin install engineering-advanced-skills@claude-code-skills` | 42 | | **Engineering POWERFUL** | `/plugin install engineering-advanced-skills@claude-code-skills` | 43 |
| **Product** | `/plugin install product-skills@claude-code-skills` | 15 | | **Product** | `/plugin install product-skills@claude-code-skills` | 15 |
| **Marketing** | `/plugin install marketing-skills@claude-code-skills` | 45 | | **Marketing** | `/plugin install marketing-skills@claude-code-skills` | 44 |
| **Regulatory & Quality** | `/plugin install ra-qm-skills@claude-code-skills` | 14 | | **Regulatory & Quality** | `/plugin install ra-qm-skills@claude-code-skills` | 14 |
| **Project Management** | `/plugin install pm-skills@claude-code-skills` | 9 | | **Project Management** | `/plugin install pm-skills@claude-code-skills` | 9 |
| **C-Level Advisory** | `/plugin install c-level-skills@claude-code-skills` | 34 | | **C-Level Advisory** | `/plugin install c-level-skills@claude-code-skills` | 34 |
@@ -182,7 +182,7 @@ AI-augmented development. Optimize for SEO.
## Python Tools ## Python Tools
All 332 tools use the standard library only — zero pip installs, all verified. All 305 tools use the standard library only — zero pip installs, all verified.
```bash ```bash
# Security audit a skill before installing # Security audit a skill before installing
@@ -254,7 +254,7 @@ See the [Skills & Agents Factory](https://github.com/alirezarezvani/claude-code-
Yes. Run `./scripts/gemini-install.sh` to set up skills for Gemini CLI. A sync script (`scripts/sync-gemini-skills.py`) generates the skills index automatically. Yes. Run `./scripts/gemini-install.sh` to set up skills for Gemini CLI. A sync script (`scripts/sync-gemini-skills.py`) generates the skills index automatically.
??? question "Does this work with Cursor, Windsurf, Aider, or other tools?" ??? question "Does this work with Cursor, Windsurf, Aider, or other tools?"
Yes. All 248 skills can be converted to native formats for Cursor, Aider, Kilo Code, Windsurf, OpenCode, Augment, and Antigravity. Run `./scripts/convert.sh --tool all` and then install with `./scripts/install.sh --tool <name>`. See [Multi-Tool Integrations](integrations.md) for details. Yes. All 233 skills can be converted to native formats for Cursor, Aider, Kilo Code, Windsurf, OpenCode, Augment, and Antigravity. Run `./scripts/convert.sh --tool all` and then install with `./scripts/install.sh --tool <name>`. See [Multi-Tool Integrations](integrations.md) for details.
??? question "Can I use Agent Skills in ChatGPT?" ??? question "Can I use Agent Skills in ChatGPT?"
Yes. We have [6 Custom GPTs](custom-gpts.md) that bring Agent Skills directly into ChatGPT — no installation needed. Just click and start chatting. Yes. We have [6 Custom GPTs](custom-gpts.md) that bring Agent Skills directly into ChatGPT — no installation needed. Just click and start chatting.

View File

@@ -1,6 +1,6 @@
--- ---
title: 248 Agent Skills for Codex, Gemini CLI & OpenClaw title: 233 Agent Skills for Codex, Gemini CLI & OpenClaw
description: "248 production-ready Claude Code skills and agent plugins for 11 AI coding tools. Engineering, product, marketing, compliance, and finance agent skills for Claude Code, OpenAI Codex, Gemini CLI, Cursor, and OpenClaw." description: "233 production-ready Claude Code skills and agent plugins for 11 AI coding tools. Engineering, product, marketing, compliance, and finance agent skills for Claude Code, OpenAI Codex, Gemini CLI, Cursor, and OpenClaw."
hide: hide:
- toc - toc
- edit - edit
@@ -14,7 +14,7 @@ hide:
# Agent Skills # Agent Skills
248 production-ready skills, 23 agents, 3 personas, and an orchestration protocol for AI coding tools. 233 production-ready skills, 25 agents, 3 personas, and an orchestration protocol for AI coding tools.
{ .hero-subtitle } { .hero-subtitle }
[Get Started](getting-started.md){ .md-button .md-button--primary } [Get Started](getting-started.md){ .md-button .md-button--primary }
@@ -49,7 +49,7 @@ hide:
<div class="grid cards" markdown> <div class="grid cards" markdown>
- :material-toolbox:{ .lg .middle } **248 Skills** - :material-toolbox:{ .lg .middle } **233 Skills**
--- ---
@@ -57,7 +57,7 @@ hide:
[:octicons-arrow-right-24: Browse skills](skills/) [:octicons-arrow-right-24: Browse skills](skills/)
- :material-robot:{ .lg .middle } **23 Agents** - :material-robot:{ .lg .middle } **25 Agents**
--- ---
@@ -81,7 +81,7 @@ hide:
[:octicons-arrow-right-24: Learn patterns](orchestration.md) [:octicons-arrow-right-24: Learn patterns](orchestration.md)
- :material-language-python:{ .lg .middle } **332 Python Tools** - :material-language-python:{ .lg .middle } **305 Python Tools**
--- ---
@@ -143,7 +143,7 @@ hide:
Agent designer, RAG architect, database designer, CI/CD builder, MCP server builder, security auditor, tech debt tracker Agent designer, RAG architect, database designer, CI/CD builder, MCP server builder, security auditor, tech debt tracker
[:octicons-arrow-right-24: 42 skills](skills/engineering/) [:octicons-arrow-right-24: 43 skills](skills/engineering/)
- :material-bullseye-arrow:{ .lg .middle } **Product** - :material-bullseye-arrow:{ .lg .middle } **Product**
@@ -151,7 +151,7 @@ hide:
Product manager, agile PO, strategist, UX researcher, UI design system, landing pages, SaaS scaffolder, analytics, experiment designer Product manager, agile PO, strategist, UX researcher, UI design system, landing pages, SaaS scaffolder, analytics, experiment designer
[:octicons-arrow-right-24: 14 skills](skills/product-team/) [:octicons-arrow-right-24: 15 skills](skills/product-team/)
- :material-bullhorn:{ .lg .middle } **Marketing** - :material-bullhorn:{ .lg .middle } **Marketing**
@@ -159,7 +159,7 @@ hide:
Content, SEO, CRO, channels, growth, intelligence, sales — 7 specialist pods with 32 Python tools Content, SEO, CRO, channels, growth, intelligence, sales — 7 specialist pods with 32 Python tools
[:octicons-arrow-right-24: 45 skills](skills/marketing-skill/) [:octicons-arrow-right-24: 44 skills](skills/marketing-skill/)
- :material-clipboard-check:{ .lg .middle } **Project Management** - :material-clipboard-check:{ .lg .middle } **Project Management**
@@ -175,7 +175,7 @@ hide:
Full C-suite (10 roles), orchestration, board meetings, culture frameworks, strategic alignment Full C-suite (10 roles), orchestration, board meetings, culture frameworks, strategic alignment
[:octicons-arrow-right-24: 28 skills](skills/c-level-advisor/) [:octicons-arrow-right-24: 34 skills](skills/c-level-advisor/)
- :material-shield-check:{ .lg .middle } **Regulatory & Quality** - :material-shield-check:{ .lg .middle } **Regulatory & Quality**
@@ -191,7 +191,7 @@ hide:
Customer success, sales engineer, revenue operations, contracts & proposals Customer success, sales engineer, revenue operations, contracts & proposals
[:octicons-arrow-right-24: 4 skills](skills/business-growth/) [:octicons-arrow-right-24: 5 skills](skills/business-growth/)
- :material-currency-usd:{ .lg .middle } **Finance** - :material-currency-usd:{ .lg .middle } **Finance**

View File

@@ -1,13 +1,13 @@
--- ---
title: "Engineering - POWERFUL Skills — Agent Skills & Codex Plugins" title: "Engineering - POWERFUL Skills — Agent Skills & Codex Plugins"
description: "55 engineering - powerful skills — advanced agent-native skill and Claude Code plugin for AI agent design, infrastructure, and automation. Works with Claude Code, Codex CLI, Gemini CLI, and OpenClaw." description: "56 engineering - powerful skills — advanced agent-native skill and Claude Code plugin for AI agent design, infrastructure, and automation. Works with Claude Code, Codex CLI, Gemini CLI, and OpenClaw."
--- ---
<div class="domain-header" markdown> <div class="domain-header" markdown>
# :material-rocket-launch: Engineering - POWERFUL # :material-rocket-launch: Engineering - POWERFUL
<p class="domain-count">55 skills in this domain</p> <p class="domain-count">56 skills in this domain</p>
</div> </div>
@@ -263,6 +263,12 @@ description: "55 engineering - powerful skills — advanced agent-native skill a
The operational companion to database design. While database-designer focuses on schema architecture and database-sch... The operational companion to database design. While database-designer focuses on schema architecture and database-sch...
- **[Z-test for two proportions (A/B conversion rates)](statistical-analyst.md)**
---
python3 scripts/hypothesistester.py --test ztest \
- **[Tech Debt Tracker](tech-debt-tracker.md)** - **[Tech Debt Tracker](tech-debt-tracker.md)**
--- ---

View File

@@ -0,0 +1,258 @@
---
title: "Z-test for two proportions (A/B conversion rates) — Agent Skill for Codex & OpenClaw"
description: "Run hypothesis tests, analyze A/B experiment results, calculate sample sizes, and interpret statistical significance with effect sizes. Use when you. Agent skill for Claude Code, Codex CLI, Gemini CLI, OpenClaw."
---
# Z-test for two proportions (A/B conversion rates)
<div class="page-meta" markdown>
<span class="meta-badge">:material-rocket-launch: Engineering - POWERFUL</span>
<span class="meta-badge">:material-identifier: `statistical-analyst`</span>
<span class="meta-badge">:material-github: <a href="https://github.com/alirezarezvani/claude-skills/tree/main/engineering/statistical-analyst/SKILL.md">Source</a></span>
</div>
<div class="install-banner" markdown>
<span class="install-label">Install:</span> <code>claude /plugin install engineering-advanced-skills</code>
</div>
You are an expert statistician and data scientist. Your goal is to help teams make decisions grounded in statistical evidence — not gut feel. You distinguish signal from noise, size experiments correctly before they start, and interpret results with full context: significance, effect size, power, and practical impact.
You treat "statistically significant" and "practically significant" as separate questions and always answer both.
---
## Entry Points
### Mode 1 — Analyze Experiment Results (A/B Test)
Use when an experiment has already run and you have result data.
1. **Clarify** — Confirm metric type (conversion rate, mean, count), sample sizes, and observed values
2. **Choose test** — Proportions → Z-test; Continuous means → t-test; Categorical → Chi-square
3. **Run** — Execute `hypothesis_tester.py` with appropriate method
4. **Interpret** — Report p-value, confidence interval, effect size (Cohen's d / Cohen's h / Cramér's V)
5. **Decide** — Ship / hold / extend using the decision framework below
### Mode 2 — Size an Experiment (Pre-Launch)
Use before launching a test to ensure it will be conclusive.
1. **Define** — Baseline rate, minimum detectable effect (MDE), significance level (α), power (1β)
2. **Calculate** — Run `sample_size_calculator.py` to get required N per variant
3. **Sanity-check** — Confirm traffic volume can deliver N within acceptable time window
4. **Document** — Lock the stopping rule before launch to prevent p-hacking
### Mode 3 — Interpret Existing Numbers
Use when someone shares a result and asks "is this significant?" or "what does this mean?"
1. Ask for: sample sizes, observed values, baseline, and what decision depends on the result
2. Run the appropriate test
3. Report using the Bottom Line → What → Why → How to Act structure
4. Flag any validity threats (peeking, multiple comparisons, SUTVA violations)
---
## Tools
### `scripts/hypothesis_tester.py`
Run Z-test (proportions), two-sample t-test (means), or Chi-square test (categorical). Returns p-value, confidence interval, effect size, and a plain-English verdict.
```bash
# Z-test for two proportions (A/B conversion rates)
python3 scripts/hypothesis_tester.py --test ztest \
--control-n 5000 --control-x 250 \
--treatment-n 5000 --treatment-x 310
# Two-sample t-test (comparing means, e.g. revenue per user)
python3 scripts/hypothesis_tester.py --test ttest \
--control-mean 42.3 --control-std 18.1 --control-n 800 \
--treatment-mean 46.1 --treatment-std 19.4 --treatment-n 820
# Chi-square test (multi-category outcomes)
python3 scripts/hypothesis_tester.py --test chi2 \
--observed "120,80,50" --expected "100,100,50"
# Output JSON for downstream use
python3 scripts/hypothesis_tester.py --test ztest \
--control-n 5000 --control-x 250 \
--treatment-n 5000 --treatment-x 310 \
--format json
```
### `scripts/sample_size_calculator.py`
Calculate required sample size per variant before launching an experiment.
```bash
# Proportion test (conversion rate experiment)
python3 scripts/sample_size_calculator.py --test proportion \
--baseline 0.05 --mde 0.20 --alpha 0.05 --power 0.80
# Mean test (continuous metric experiment)
python3 scripts/sample_size_calculator.py --test mean \
--baseline-mean 42.3 --baseline-std 18.1 --mde 0.10 \
--alpha 0.05 --power 0.80
# Show tradeoff table across power levels
python3 scripts/sample_size_calculator.py --test proportion \
--baseline 0.05 --mde 0.20 --table
# Output JSON
python3 scripts/sample_size_calculator.py --test proportion \
--baseline 0.05 --mde 0.20 --format json
```
### `scripts/confidence_interval.py`
Compute confidence intervals for a proportion or mean. Use for reporting observed metrics with uncertainty bounds.
```bash
# CI for a proportion
python3 scripts/confidence_interval.py --type proportion \
--n 1200 --x 96
# CI for a mean
python3 scripts/confidence_interval.py --type mean \
--n 800 --mean 42.3 --std 18.1
# Custom confidence level
python3 scripts/confidence_interval.py --type proportion \
--n 1200 --x 96 --confidence 0.99
# Output JSON
python3 scripts/confidence_interval.py --type proportion \
--n 1200 --x 96 --format json
```
---
## Test Selection Guide
| Scenario | Metric | Test |
|---|---|---|
| A/B conversion rate (clicked/not) | Proportion | Z-test for two proportions |
| A/B revenue, load time, session length | Continuous mean | Two-sample t-test (Welch's) |
| A/B/C/n multi-variant with categories | Categorical counts | Chi-square |
| Single sample vs. known value | Mean vs. constant | One-sample t-test |
| Non-normal data, small n | Rank-based | Use Mann-Whitney U (flag for human) |
**When NOT to use these tools:**
- n < 30 per group without checking normality
- Metrics with heavy tails (e.g. revenue with whales) — consider log transform or trimmed mean first
- Sequential / peeking scenarios — use sequential testing or SPRT instead
- Clustered data (e.g. users within countries) — standard tests assume independence
---
## Decision Framework (Post-Experiment)
Use this after running the test:
| p-value | Effect Size | Practical Impact | Decision |
|---|---|---|---|
| < α | Large / Medium | Meaningful | ✅ Ship |
| < α | Small | Negligible | ⚠️ Hold — statistically significant but not worth the complexity |
| ≥ α | — | — | 🔁 Extend (if underpowered) or ❌ Kill |
| < α | Any | Negative UX | ❌ Kill regardless |
**Always ask:** "If this effect were exactly as measured, would the business care?" If no — don't ship on significance alone.
---
## Effect Size Reference
Effect sizes translate statistical results into practical language:
**Cohen's d (means):**
| d | Interpretation |
|---|---|
| < 0.2 | Negligible |
| 0.20.5 | Small |
| 0.50.8 | Medium |
| > 0.8 | Large |
**Cohen's h (proportions):**
| h | Interpretation |
|---|---|
| < 0.2 | Negligible |
| 0.20.5 | Small |
| 0.50.8 | Medium |
| > 0.8 | Large |
**Cramér's V (chi-square):**
| V | Interpretation |
|---|---|
| < 0.1 | Negligible |
| 0.10.3 | Small |
| 0.30.5 | Medium |
| > 0.5 | Large |
---
## Proactive Risk Triggers
Surface these unprompted when you spot the signals:
- **Peeking / early stopping** — Running a test and checking results daily inflates false positive rate. Ask: "Did you look at results before the planned end date?"
- **Multiple comparisons** — Testing 10 metrics at α=0.05 gives ~40% chance of at least one false positive. Flag when > 3 metrics are being evaluated.
- **Underpowered test** — If n is below the required sample size, a non-significant result tells you nothing. Always check power retroactively.
- **SUTVA violations** — If users in control and treatment can interact (e.g. social features, shared inventory), the independence assumption breaks.
- **Simpson's Paradox** — An aggregate result can reverse when segmented. Flag when segment-level results are available.
- **Novelty effect** — Significant early results in UX tests often decay. Flag for post-novelty re-measurement.
---
## Output Artifacts
| Request | Deliverable |
|---|---|
| "Did our test win?" | Significance report: p-value, CI, effect size, verdict, caveats |
| "How big should our test be?" | Sample size report with power/MDE tradeoff table |
| "What's the confidence interval for X?" | CI report with margin of error and interpretation |
| "Is this difference real?" | Hypothesis test with plain-English conclusion |
| "How long should we run this?" | Duration estimate = (required N per variant) / (daily traffic per variant) |
| "We tested 5 things — what's significant?" | Multiple comparison analysis with Bonferroni-adjusted thresholds |
---
## Quality Loop
Tag every finding with confidence:
- 🟢 **Verified** — Test assumptions met, sufficient n, no validity threats
- 🟡 **Likely** — Minor assumption violations; interpret directionally
- 🔴 **Inconclusive** — Underpowered, peeking, or data integrity issue; do not act
---
## Communication Standard
Structure all results as:
**Bottom Line** — One sentence: "Treatment increased conversion by 1.2pp (95% CI: 0.42.0pp). Result is statistically significant (p=0.003) with a small effect (h=0.18). Recommend shipping."
**What** — The numbers: observed rates/means, difference, p-value, CI, effect size
**Why It Matters** — Business translation: what does the effect size mean in revenue, users, or decisions?
**How to Act** — Ship / hold / extend / kill with specific rationale
---
## Related Skills
| Skill | Use When |
|---|---|
| `marketing-skill/ab-test-setup` | Designing the experiment before it runs — randomization, instrumentation, holdout |
| `engineering/data-quality-auditor` | Verifying input data integrity before running any statistical test |
| `product-team/experiment-designer` | Structuring the hypothesis, success metrics, and guardrail metrics |
| `product-team/product-analytics` | Analyzing product funnel and retention metrics |
| `finance/saas-metrics-coach` | Interpreting SaaS KPIs that may feed into experiments (ARR, churn, LTV) |
| `marketing-skill/campaign-analytics` | Statistical analysis of marketing campaign performance |
**When NOT to use this skill:**
- You need to design or instrument the experiment — use `marketing-skill/ab-test-setup` or `product-team/experiment-designer`
- You need to clean or validate the input data — use `engineering/data-quality-auditor` first
- You need Bayesian inference or multi-armed bandit analysis — flag that frequentist tests may not be appropriate
---
## References
- `references/statistical-testing-concepts.md` — t-test, Z-test, chi-square theory; p-value interpretation; Type I/II errors; power analysis math

View File

@@ -1,13 +1,13 @@
--- ---
title: "Product Skills — Agent Skills & Codex Plugins" title: "Product Skills — Agent Skills & Codex Plugins"
description: "15 product skills — product management agent skill and Claude Code plugin for PRDs, discovery, analytics, and roadmaps. Works with Claude Code, Codex CLI, Gemini CLI, and OpenClaw." description: "16 product skills — product management agent skill and Claude Code plugin for PRDs, discovery, analytics, and roadmaps. Works with Claude Code, Codex CLI, Gemini CLI, and OpenClaw."
--- ---
<div class="domain-header" markdown> <div class="domain-header" markdown>
# :material-lightbulb-outline: Product # :material-lightbulb-outline: Product
<p class="domain-count">15 skills in this domain</p> <p class="domain-count">16 skills in this domain</p>
</div> </div>
@@ -95,6 +95,12 @@ description: "15 product skills — product management agent skill and Claude Co
Tier: POWERFUL Tier: POWERFUL
- **[Spec to Repo](spec-to-repo.md)**
---
Turn a natural-language project specification into a complete, runnable starter repository. Not a template filler — a...
- **[UI Design System](ui-design-system.md)** - **[UI Design System](ui-design-system.md)**
--- ---

View File

@@ -101,18 +101,58 @@ See:
- Flattening at low level: product used occasionally, revisit value metric. - Flattening at low level: product used occasionally, revisit value metric.
- Improving newer cohorts: onboarding or positioning improvements are working. - Improving newer cohorts: onboarding or positioning improvements are working.
## Anti-Patterns
| Anti-pattern | Fix |
|---|---|
| **Vanity metrics** — tracking pageviews or total signups without activation context | Always pair acquisition metrics with activation rate and retention |
| **Single-point retention** — reporting "30-day retention is 20%" | Compare retention curves across cohorts, not isolated snapshots |
| **Dashboard overload** — 30+ metrics on one screen | Executive layer: 5-7 metrics. Feature layer: per-feature only |
| **No decision rule** — tracking a KPI with no threshold or action plan | Every KPI needs: target, threshold, owner, and "if below X, then Y" |
| **Averaging across segments** — reporting blended metrics that hide segment differences | Always segment by cohort, plan tier, channel, or geography |
| **Ignoring seasonality** — comparing this week to last week without adjusting | Use period-over-period with same-period-last-year context |
## Tooling ## Tooling
### `scripts/metrics_calculator.py` ### `scripts/metrics_calculator.py`
CLI utility for: CLI utility for retention, cohort, and funnel analysis from CSV data. Supports text and JSON output.
- Retention rate calculations by cohort age
- Cohort table generation
- Basic funnel conversion analysis
Examples:
```bash ```bash
# Retention analysis
python3 scripts/metrics_calculator.py retention events.csv python3 scripts/metrics_calculator.py retention events.csv
python3 scripts/metrics_calculator.py retention events.csv --format json
# Cohort matrix
python3 scripts/metrics_calculator.py cohort events.csv --cohort-grain month python3 scripts/metrics_calculator.py cohort events.csv --cohort-grain month
python3 scripts/metrics_calculator.py cohort events.csv --cohort-grain week --format json
# Funnel conversion
python3 scripts/metrics_calculator.py funnel funnel.csv --stages visit,signup,activate,pay python3 scripts/metrics_calculator.py funnel funnel.csv --stages visit,signup,activate,pay
python3 scripts/metrics_calculator.py funnel funnel.csv --stages visit,signup,activate,pay --format json
``` ```
**CSV format for retention/cohort:**
```csv
user_id,cohort_date,activity_date
u001,2026-01-01,2026-01-01
u001,2026-01-01,2026-01-03
u002,2026-01-02,2026-01-02
```
**CSV format for funnel:**
```csv
user_id,stage
u001,visit
u001,signup
u001,activate
u002,visit
u002,signup
```
## Cross-References
- Related: `product-team/experiment-designer` — for A/B test planning after identifying metric opportunities
- Related: `product-team/product-manager-toolkit` — for RICE prioritization of metric-driven features
- Related: `product-team/product-discovery` — for assumption mapping when metrics reveal unknowns
- Related: `finance/saas-metrics-coach` — for SaaS-specific metrics (ARR, MRR, churn, LTV)

View File

@@ -0,0 +1,285 @@
---
title: "Spec to Repo — Agent Skill for Product Teams"
description: "Use when the user says 'build me an app', 'create a project from this spec', 'scaffold a new repo', 'generate a starter', 'turn this idea into code'. Agent skill for Claude Code, Codex CLI, Gemini CLI, OpenClaw."
---
# Spec to Repo
<div class="page-meta" markdown>
<span class="meta-badge">:material-lightbulb-outline: Product</span>
<span class="meta-badge">:material-identifier: `spec-to-repo`</span>
<span class="meta-badge">:material-github: <a href="https://github.com/alirezarezvani/claude-skills/tree/main/product-team/spec-to-repo/SKILL.md">Source</a></span>
</div>
<div class="install-banner" markdown>
<span class="install-label">Install:</span> <code>claude /plugin install product-skills</code>
</div>
Turn a natural-language project specification into a complete, runnable starter repository. Not a template filler — a spec interpreter that generates real, working code for any stack.
## When to Use
- User provides a text description of an app and wants code
- User has a PRD, requirements doc, or feature list and needs a codebase
- User says "build me an app that...", "scaffold this", "bootstrap a project"
- User wants a working starter repo, not just a file tree
**Not this skill** when the user wants a SaaS app with Stripe + Auth specifically — use `product-team/saas-scaffolder` instead.
## Core Workflow
### Phase 1 — Parse & Interpret
Read the spec. Extract these fields silently:
| Field | Source | Required |
|-------|--------|----------|
| App name | Explicit or infer from description | yes |
| Description | First sentence of spec | yes |
| Features | Bullet points or sentences describing behavior | yes |
| Tech stack | Explicit ("use FastAPI") or infer from context | yes |
| Auth | "login", "users", "accounts", "roles" | if mentioned |
| Database | "store", "save", "persist", "records", "schema" | if mentioned |
| API surface | "endpoint", "API", "REST", "GraphQL" | if mentioned |
| Deploy target | "Vercel", "Docker", "AWS", "Railway" | if mentioned |
**Stack inference rules** (when user doesn't specify):
| Signal | Inferred stack |
|--------|---------------|
| "web app", "dashboard", "SaaS" | Next.js + TypeScript |
| "API", "backend", "microservice" | FastAPI (Python) or Express (Node) |
| "mobile app" | Flutter or React Native |
| "CLI tool" | Go or Python |
| "data pipeline" | Python |
| "high performance", "systems" | Rust or Go |
After parsing, present a structured interpretation back to the user:
```
## Spec Interpretation
**App:** [name]
**Stack:** [framework + language]
**Features:**
1. [feature]
2. [feature]
**Database:** [yes/no — engine]
**Auth:** [yes/no — method]
**Deploy:** [target]
Does this match your intent? Any corrections before I generate?
```
Flag ambiguities. Ask **at most 3** clarifying questions. If the user says "just build it", proceed with best-guess defaults.
### Phase 2 — Architecture
Design the project before writing any files:
1. **Select template** — Match to a stack template from `references/stack-templates.md`
2. **Define file tree** — List every file that will be created
3. **Map features to files** — Each feature gets at minimum one file/component
4. **Design database schema** — If applicable, define tables/collections with fields and types
5. **Identify dependencies** — List every package with version constraints
6. **Plan API routes** — If applicable, list every endpoint with method, path, request/response shape
Present the file tree to the user before generating:
```
project-name/
├── README.md
├── .env.example
├── .gitignore
├── .github/workflows/ci.yml
├── package.json / requirements.txt / go.mod
├── src/
│ ├── ...
├── tests/
│ ├── ...
└── ...
```
### Phase 3 — Generate
Write every file. Rules:
- **Real code, not stubs.** Every function has a real implementation. No `// TODO: implement` or `pass` placeholders.
- **Syntactically valid.** Every file must parse without errors in its language.
- **Imports match dependencies.** Every import must correspond to a package in the manifest (package.json, requirements.txt, go.mod, etc.).
- **Types included.** TypeScript projects use types. Python projects use type hints. Go projects use typed structs.
- **Environment variables.** Generate `.env.example` with every required variable, commented with purpose.
- **README.md.** Include: project description, prerequisites, setup steps (clone, install, configure env, run), and available scripts/commands.
- **CI config.** Generate `.github/workflows/ci.yml` with: install, lint (if linter in deps), test, build.
- **.gitignore.** Stack-appropriate ignores (node_modules, __pycache__, .env, build artifacts).
**File generation order:**
1. Manifest (package.json / requirements.txt / go.mod)
2. Config files (.env.example, .gitignore, CI)
3. Database schema / migrations
4. Core business logic
5. API routes / endpoints
6. UI components (if applicable)
7. Tests
8. README.md
### Phase 4 — Validate
After generation, run through this checklist:
- [ ] Every imported package exists in the manifest
- [ ] Every file referenced by an import exists in the tree
- [ ] `.env.example` lists every env var used in code
- [ ] `.gitignore` covers build artifacts and secrets
- [ ] README has setup instructions that actually work
- [ ] No hardcoded secrets, API keys, or passwords
- [ ] At least one test file exists
- [ ] Build/start command is documented and would work
Run `scripts/validate_project.py` against the generated directory to catch common issues.
## Examples
### Example 1: Task Management API
**Input spec:**
> "Build me a task management API. Users can create, list, update, and delete tasks. Tasks have a title, description, status (todo/in-progress/done), and due date. Use FastAPI with SQLite. Add basic auth with API keys."
**Output file tree:**
```
task-api/
├── README.md
├── .env.example # API_KEY, DATABASE_URL
├── .gitignore
├── .github/workflows/ci.yml
├── requirements.txt # fastapi, uvicorn, sqlalchemy, pytest
├── main.py # FastAPI app, CORS, lifespan
├── models.py # SQLAlchemy Task model
├── schemas.py # Pydantic request/response schemas
├── database.py # SQLite engine + session
├── auth.py # API key middleware
├── routers/
│ └── tasks.py # CRUD endpoints
└── tests/
└── test_tasks.py # Smoke tests for each endpoint
```
### Example 2: Recipe Sharing Web App
**Input spec:**
> "I want a recipe sharing website. Users sign up, post recipes with ingredients and steps, browse other recipes, and save favorites. Use Next.js with Tailwind. Store data in PostgreSQL."
**Output file tree:**
```
recipe-share/
├── README.md
├── .env.example # DATABASE_URL, NEXTAUTH_SECRET, NEXTAUTH_URL
├── .gitignore
├── .github/workflows/ci.yml
├── package.json # next, react, tailwindcss, prisma, next-auth
├── tailwind.config.ts
├── tsconfig.json
├── next.config.ts
├── prisma/
│ └── schema.prisma # User, Recipe, Ingredient, Favorite models
├── src/
│ ├── app/
│ │ ├── layout.tsx
│ │ ├── page.tsx # Homepage — recipe feed
│ │ ├── recipes/
│ │ │ ├── page.tsx # Browse recipes
│ │ │ ├── [id]/page.tsx # Recipe detail
│ │ │ └── new/page.tsx # Create recipe form
│ │ └── api/
│ │ ├── auth/[...nextauth]/route.ts
│ │ └── recipes/route.ts
│ ├── components/
│ │ ├── RecipeCard.tsx
│ │ ├── RecipeForm.tsx
│ │ └── Navbar.tsx
│ └── lib/
│ ├── prisma.ts
│ └── auth.ts
└── tests/
└── recipes.test.ts
```
### Example 3: CLI Expense Tracker
**Input spec:**
> "Python CLI tool for tracking expenses. Commands: add, list, summary, export-csv. Store in a local SQLite file. No external API."
**Output file tree:**
```
expense-tracker/
├── README.md
├── .gitignore
├── .github/workflows/ci.yml
├── pyproject.toml
├── src/
│ └── expense_tracker/
│ ├── __init__.py
│ ├── cli.py # argparse commands
│ ├── database.py # SQLite operations
│ ├── models.py # Expense dataclass
│ └── formatters.py # Table + CSV output
└── tests/
└── test_cli.py
```
## Anti-Patterns
| Anti-pattern | Fix |
|---|---|
| **Placeholder code**`// TODO: implement`, `pass`, empty function bodies | Every function has a real implementation. If complex, implement a working simplified version. |
| **Stack override** — picking Next.js when the user said Flask | Always honor explicit tech preferences. Only infer when the user doesn't specify. |
| **Missing .gitignore** — committing node_modules or .env | Generate stack-appropriate .gitignore as one of the first files. |
| **Phantom imports** — importing packages not in the manifest | Cross-check every import against package.json / requirements.txt before finishing. |
| **Over-engineering MVP** — adding Redis caching, rate limiting, WebSockets to a v1 | Build the minimum that works. The user can iterate. |
| **Ignoring stated preferences** — user says "PostgreSQL" and you generate MongoDB | Parse the spec carefully. Explicit preferences are non-negotiable. |
| **Missing env vars** — code reads `process.env.X` but `.env.example` doesn't list it | Every env var used in code must appear in `.env.example` with a comment. |
| **No tests** — shipping a repo with zero test files | At minimum: one smoke test per API endpoint or one test per core function. |
| **Hallucinated APIs** — generating code that calls library methods that don't exist | Stick to well-documented, stable APIs. When unsure, use the simplest approach. |
## Validation Script
### `scripts/validate_project.py`
Checks a generated project directory for common issues:
```bash
# Validate a generated project
python3 scripts/validate_project.py /path/to/generated-project
# JSON output
python3 scripts/validate_project.py /path/to/generated-project --format json
```
Checks performed:
- README.md exists and is non-empty
- .gitignore exists
- .env.example exists (if code references env vars)
- Package manifest exists (package.json, requirements.txt, go.mod, Cargo.toml, pubspec.yaml)
- No .env file committed (secrets leak)
- At least one test file exists
- No TODO/FIXME placeholders in generated code
## Progressive Enhancement
For complex specs, generate in stages:
1. **MVP** — Core feature only, working end-to-end
2. **Auth** — Add authentication if requested
3. **Polish** — Error handling, validation, loading states
4. **Deploy** — Docker, CI, deploy config
Ask the user after MVP: "Core is working. Want me to add auth/polish/deploy next, or iterate on what's here?"
## Cross-References
- Related: `product-team/saas-scaffolder` — SaaS-specific scaffolding (Next.js + Stripe + Auth)
- Related: `engineering/spec-driven-workflow` — spec-first development methodology
- Related: `engineering/database-designer` — database schema design patterns
- Related: `engineering-team/senior-fullstack` — full-stack implementation patterns

View File

@@ -0,0 +1,13 @@
{
"name": "statistical-analyst",
"description": "Hypothesis testing, A/B experiment analysis, sample size calculation, and confidence intervals. 3 stdlib-only Python tools with Z-test, t-test, chi-square, effect sizes, power analysis, and Wilson score intervals.",
"version": "2.2.0",
"author": {
"name": "Alireza Rezvani",
"url": "https://alirezarezvani.com"
},
"homepage": "https://github.com/alirezarezvani/claude-skills/tree/main/engineering/statistical-analyst",
"repository": "https://github.com/alirezarezvani/claude-skills",
"license": "MIT",
"skills": "./"
}

View File

@@ -1,6 +1,6 @@
site_name: Claude Code Skills & Agent Plugins site_name: Claude Code Skills & Agent Plugins
site_url: https://alirezarezvani.github.io/claude-skills/ site_url: https://alirezarezvani.github.io/claude-skills/
site_description: "248 production-ready skills, 23 agents, 3 personas, and an orchestration protocol for 11 AI coding tools. Reusable expertise for engineering, product, marketing, compliance, and more." site_description: "233 production-ready skills, 25 agents, 3 personas, and an orchestration protocol for 11 AI coding tools. Reusable expertise for engineering, product, marketing, compliance, and more."
site_author: Alireza Rezvani site_author: Alireza Rezvani
repo_url: https://github.com/alirezarezvani/claude-skills repo_url: https://github.com/alirezarezvani/claude-skills
repo_name: alirezarezvani/claude-skills repo_name: alirezarezvani/claude-skills
@@ -214,6 +214,7 @@ nav:
- "Self-Eval": skills/engineering/self-eval.md - "Self-Eval": skills/engineering/self-eval.md
- "Skill Security Auditor": skills/engineering/skill-security-auditor.md - "Skill Security Auditor": skills/engineering/skill-security-auditor.md
- "Skill Tester": skills/engineering/skill-tester.md - "Skill Tester": skills/engineering/skill-tester.md
- "Statistical Analyst": skills/engineering/statistical-analyst.md
- "Spec-Driven Workflow": skills/engineering/spec-driven-workflow.md - "Spec-Driven Workflow": skills/engineering/spec-driven-workflow.md
- "SQL Database Assistant": skills/engineering/sql-database-assistant.md - "SQL Database Assistant": skills/engineering/sql-database-assistant.md
- "Tech Debt Tracker": skills/engineering/tech-debt-tracker.md - "Tech Debt Tracker": skills/engineering/tech-debt-tracker.md

View File

@@ -0,0 +1,274 @@
---
name: spec-to-repo
description: "Use when the user says 'build me an app', 'create a project from this spec', 'scaffold a new repo', 'generate a starter', 'turn this idea into code', 'bootstrap a project', 'I have requirements and need a codebase', or provides a natural-language project specification and expects a complete, runnable repository. Stack-agnostic: Next.js, FastAPI, Rails, Go, Rust, Flutter, and more."
---
# Spec to Repo
Turn a natural-language project specification into a complete, runnable starter repository. Not a template filler — a spec interpreter that generates real, working code for any stack.
## When to Use
- User provides a text description of an app and wants code
- User has a PRD, requirements doc, or feature list and needs a codebase
- User says "build me an app that...", "scaffold this", "bootstrap a project"
- User wants a working starter repo, not just a file tree
**Not this skill** when the user wants a SaaS app with Stripe + Auth specifically — use `product-team/saas-scaffolder` instead.
## Core Workflow
### Phase 1 — Parse & Interpret
Read the spec. Extract these fields silently:
| Field | Source | Required |
|-------|--------|----------|
| App name | Explicit or infer from description | yes |
| Description | First sentence of spec | yes |
| Features | Bullet points or sentences describing behavior | yes |
| Tech stack | Explicit ("use FastAPI") or infer from context | yes |
| Auth | "login", "users", "accounts", "roles" | if mentioned |
| Database | "store", "save", "persist", "records", "schema" | if mentioned |
| API surface | "endpoint", "API", "REST", "GraphQL" | if mentioned |
| Deploy target | "Vercel", "Docker", "AWS", "Railway" | if mentioned |
**Stack inference rules** (when user doesn't specify):
| Signal | Inferred stack |
|--------|---------------|
| "web app", "dashboard", "SaaS" | Next.js + TypeScript |
| "API", "backend", "microservice" | FastAPI (Python) or Express (Node) |
| "mobile app" | Flutter or React Native |
| "CLI tool" | Go or Python |
| "data pipeline" | Python |
| "high performance", "systems" | Rust or Go |
After parsing, present a structured interpretation back to the user:
```
## Spec Interpretation
**App:** [name]
**Stack:** [framework + language]
**Features:**
1. [feature]
2. [feature]
**Database:** [yes/no — engine]
**Auth:** [yes/no — method]
**Deploy:** [target]
Does this match your intent? Any corrections before I generate?
```
Flag ambiguities. Ask **at most 3** clarifying questions. If the user says "just build it", proceed with best-guess defaults.
### Phase 2 — Architecture
Design the project before writing any files:
1. **Select template** — Match to a stack template from `references/stack-templates.md`
2. **Define file tree** — List every file that will be created
3. **Map features to files** — Each feature gets at minimum one file/component
4. **Design database schema** — If applicable, define tables/collections with fields and types
5. **Identify dependencies** — List every package with version constraints
6. **Plan API routes** — If applicable, list every endpoint with method, path, request/response shape
Present the file tree to the user before generating:
```
project-name/
├── README.md
├── .env.example
├── .gitignore
├── .github/workflows/ci.yml
├── package.json / requirements.txt / go.mod
├── src/
│ ├── ...
├── tests/
│ ├── ...
└── ...
```
### Phase 3 — Generate
Write every file. Rules:
- **Real code, not stubs.** Every function has a real implementation. No `// TODO: implement` or `pass` placeholders.
- **Syntactically valid.** Every file must parse without errors in its language.
- **Imports match dependencies.** Every import must correspond to a package in the manifest (package.json, requirements.txt, go.mod, etc.).
- **Types included.** TypeScript projects use types. Python projects use type hints. Go projects use typed structs.
- **Environment variables.** Generate `.env.example` with every required variable, commented with purpose.
- **README.md.** Include: project description, prerequisites, setup steps (clone, install, configure env, run), and available scripts/commands.
- **CI config.** Generate `.github/workflows/ci.yml` with: install, lint (if linter in deps), test, build.
- **.gitignore.** Stack-appropriate ignores (node_modules, __pycache__, .env, build artifacts).
**File generation order:**
1. Manifest (package.json / requirements.txt / go.mod)
2. Config files (.env.example, .gitignore, CI)
3. Database schema / migrations
4. Core business logic
5. API routes / endpoints
6. UI components (if applicable)
7. Tests
8. README.md
### Phase 4 — Validate
After generation, run through this checklist:
- [ ] Every imported package exists in the manifest
- [ ] Every file referenced by an import exists in the tree
- [ ] `.env.example` lists every env var used in code
- [ ] `.gitignore` covers build artifacts and secrets
- [ ] README has setup instructions that actually work
- [ ] No hardcoded secrets, API keys, or passwords
- [ ] At least one test file exists
- [ ] Build/start command is documented and would work
Run `scripts/validate_project.py` against the generated directory to catch common issues.
## Examples
### Example 1: Task Management API
**Input spec:**
> "Build me a task management API. Users can create, list, update, and delete tasks. Tasks have a title, description, status (todo/in-progress/done), and due date. Use FastAPI with SQLite. Add basic auth with API keys."
**Output file tree:**
```
task-api/
├── README.md
├── .env.example # API_KEY, DATABASE_URL
├── .gitignore
├── .github/workflows/ci.yml
├── requirements.txt # fastapi, uvicorn, sqlalchemy, pytest
├── main.py # FastAPI app, CORS, lifespan
├── models.py # SQLAlchemy Task model
├── schemas.py # Pydantic request/response schemas
├── database.py # SQLite engine + session
├── auth.py # API key middleware
├── routers/
│ └── tasks.py # CRUD endpoints
└── tests/
└── test_tasks.py # Smoke tests for each endpoint
```
### Example 2: Recipe Sharing Web App
**Input spec:**
> "I want a recipe sharing website. Users sign up, post recipes with ingredients and steps, browse other recipes, and save favorites. Use Next.js with Tailwind. Store data in PostgreSQL."
**Output file tree:**
```
recipe-share/
├── README.md
├── .env.example # DATABASE_URL, NEXTAUTH_SECRET, NEXTAUTH_URL
├── .gitignore
├── .github/workflows/ci.yml
├── package.json # next, react, tailwindcss, prisma, next-auth
├── tailwind.config.ts
├── tsconfig.json
├── next.config.ts
├── prisma/
│ └── schema.prisma # User, Recipe, Ingredient, Favorite models
├── src/
│ ├── app/
│ │ ├── layout.tsx
│ │ ├── page.tsx # Homepage — recipe feed
│ │ ├── recipes/
│ │ │ ├── page.tsx # Browse recipes
│ │ │ ├── [id]/page.tsx # Recipe detail
│ │ │ └── new/page.tsx # Create recipe form
│ │ └── api/
│ │ ├── auth/[...nextauth]/route.ts
│ │ └── recipes/route.ts
│ ├── components/
│ │ ├── RecipeCard.tsx
│ │ ├── RecipeForm.tsx
│ │ └── Navbar.tsx
│ └── lib/
│ ├── prisma.ts
│ └── auth.ts
└── tests/
└── recipes.test.ts
```
### Example 3: CLI Expense Tracker
**Input spec:**
> "Python CLI tool for tracking expenses. Commands: add, list, summary, export-csv. Store in a local SQLite file. No external API."
**Output file tree:**
```
expense-tracker/
├── README.md
├── .gitignore
├── .github/workflows/ci.yml
├── pyproject.toml
├── src/
│ └── expense_tracker/
│ ├── __init__.py
│ ├── cli.py # argparse commands
│ ├── database.py # SQLite operations
│ ├── models.py # Expense dataclass
│ └── formatters.py # Table + CSV output
└── tests/
└── test_cli.py
```
## Anti-Patterns
| Anti-pattern | Fix |
|---|---|
| **Placeholder code**`// TODO: implement`, `pass`, empty function bodies | Every function has a real implementation. If complex, implement a working simplified version. |
| **Stack override** — picking Next.js when the user said Flask | Always honor explicit tech preferences. Only infer when the user doesn't specify. |
| **Missing .gitignore** — committing node_modules or .env | Generate stack-appropriate .gitignore as one of the first files. |
| **Phantom imports** — importing packages not in the manifest | Cross-check every import against package.json / requirements.txt before finishing. |
| **Over-engineering MVP** — adding Redis caching, rate limiting, WebSockets to a v1 | Build the minimum that works. The user can iterate. |
| **Ignoring stated preferences** — user says "PostgreSQL" and you generate MongoDB | Parse the spec carefully. Explicit preferences are non-negotiable. |
| **Missing env vars** — code reads `process.env.X` but `.env.example` doesn't list it | Every env var used in code must appear in `.env.example` with a comment. |
| **No tests** — shipping a repo with zero test files | At minimum: one smoke test per API endpoint or one test per core function. |
| **Hallucinated APIs** — generating code that calls library methods that don't exist | Stick to well-documented, stable APIs. When unsure, use the simplest approach. |
## Validation Script
### `scripts/validate_project.py`
Checks a generated project directory for common issues:
```bash
# Validate a generated project
python3 scripts/validate_project.py /path/to/generated-project
# JSON output
python3 scripts/validate_project.py /path/to/generated-project --format json
```
Checks performed:
- README.md exists and is non-empty
- .gitignore exists
- .env.example exists (if code references env vars)
- Package manifest exists (package.json, requirements.txt, go.mod, Cargo.toml, pubspec.yaml)
- No .env file committed (secrets leak)
- At least one test file exists
- No TODO/FIXME placeholders in generated code
## Progressive Enhancement
For complex specs, generate in stages:
1. **MVP** — Core feature only, working end-to-end
2. **Auth** — Add authentication if requested
3. **Polish** — Error handling, validation, loading states
4. **Deploy** — Docker, CI, deploy config
Ask the user after MVP: "Core is working. Want me to add auth/polish/deploy next, or iterate on what's here?"
## Cross-References
- Related: `product-team/saas-scaffolder` — SaaS-specific scaffolding (Next.js + Stripe + Auth)
- Related: `engineering/spec-driven-workflow` — spec-first development methodology
- Related: `engineering/database-designer` — database schema design patterns
- Related: `engineering-team/senior-fullstack` — full-stack implementation patterns

View File

@@ -0,0 +1,113 @@
# Spec Parsing Guide
How to extract structured requirements from ambiguous, incomplete, or conversational natural-language specifications.
---
## Parsing Strategy
Read the full spec once. On the second pass, extract fields into the structured interpretation table. Don't ask questions for anything you can reasonably infer.
### Extraction Priority
1. **Explicit statements** — "Use PostgreSQL", "Build with Next.js" — non-negotiable
2. **Strong signals** — "users can sign up" implies auth + user model + database
3. **Contextual inference** — "dashboard" implies web app; "track expenses" implies CRUD + database
4. **Defaults** — When nothing is specified, pick the most common choice for the domain
---
## Ambiguity Resolution
### Stack Not Specified
| Spec pattern | Default stack | Reasoning |
|---|---|---|
| Web app with UI | Next.js + TypeScript | Most versatile, SSR + API routes |
| API / backend only | FastAPI | Fast to scaffold, great DX, typed |
| Mobile app | Flutter | Cross-platform, single codebase |
| CLI tool | Python | Fastest to ship, stdlib-rich |
| "Simple" / "lightweight" | Express or Flask | Minimal overhead |
| "Fast" / "performance" | Go | Compiled, concurrent |
### Database Not Specified
| Signal | Default |
|---|---|
| User accounts, persistent data | PostgreSQL |
| Small project, local-only, CLI | SQLite |
| Document-oriented, flexible schema | MongoDB (only if user signals) |
| No data persistence mentioned | No database — don't add one |
### Auth Not Specified
| Signal | Default |
|---|---|
| "Users", "accounts", "login" | Yes — session-based or JWT |
| "Admin panel", "roles" | Yes — with role-based access |
| API with "API keys" | Yes — API key middleware |
| No user-facing features | No auth — don't add one |
---
## Common Spec Shapes
### Shape 1: Stream of Consciousness
> "I want an app where people can post recipes and other people can comment on them and save their favorites, maybe add a rating system too, and it should look nice on mobile"
**Extract:**
- Features: post recipes, comment, favorites, ratings
- UI: responsive / mobile-friendly
- Implies: auth (users), database (recipes, comments, favorites, ratings), web app
### Shape 2: Feature List
> "Features: 1. User registration 2. Create projects 3. Invite team members 4. Kanban board 5. File uploads"
**Extract:**
- Features: numbered list, each gets a route/component
- Auth: yes (registration)
- Database: yes (users, projects, teams, files)
- Complex features: kanban (drag-drop), file uploads (storage)
### Shape 3: Technical Spec
> "FastAPI backend with PostgreSQL. Endpoints: POST /items, GET /items, GET /items/{id}, PUT /items/{id}, DELETE /items/{id}. Use SQLAlchemy ORM. Add JWT auth."
**Extract:**
- Stack: explicit (FastAPI, PostgreSQL, SQLAlchemy, JWT)
- API: 5 CRUD endpoints, fully defined
- Minimal inference needed — generate exactly what's asked
### Shape 4: Existing PRD
> [Multi-page document with overview, user personas, feature requirements, acceptance criteria]
**Extract:**
- Read the overview first for scope
- Map feature requirements to files
- Use acceptance criteria as test case seeds
- Ignore personas, market analysis, timelines — they don't affect code generation
---
## What to Ask vs. What to Infer
**Ask (max 3 questions):**
- Stack preference when the spec is truly ambiguous and could go multiple ways
- Database choice when both SQL and NoSQL are equally valid
- Deploy target when it materially affects the code (serverless vs. container)
**Infer silently:**
- Auth method (JWT for APIs, session for web apps)
- Testing framework (most popular for the stack)
- Linter / formatter (stack default)
- CSS approach (Tailwind for React/Next, stack default otherwise)
- Package versions (latest stable)
**Never ask:**
- "What folder structure do you want?" — use the stack convention
- "Do you want TypeScript?" — yes, always for JS projects
- "Should I add error handling?" — yes, always
- "Do you want tests?" — yes, always

View File

@@ -0,0 +1,256 @@
# Stack Templates
Quick-reference templates for common tech stacks. Each template defines: file structure, manifest, entry point, and build/run commands.
---
## Next.js (TypeScript + Tailwind)
**When:** Web apps, dashboards, SaaS, landing pages with dynamic content.
**Manifest:** `package.json`
```json
{
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"test": "jest"
}
}
```
**Core deps:** `next`, `react`, `react-dom`, `tailwindcss`, `postcss`, `autoprefixer`
**Auth:** `next-auth` (default) or `clerk`
**Database:** `prisma` (ORM) + `@prisma/client`
**Testing:** `jest`, `@testing-library/react`
**File structure:**
```
src/app/layout.tsx — Root layout with providers
src/app/page.tsx — Homepage
src/app/api/*/route.ts — API routes
src/components/*.tsx — Shared components
src/lib/*.ts — Utilities, DB client
prisma/schema.prisma — Database schema (if DB)
```
**Config files:** `tsconfig.json`, `tailwind.config.ts`, `next.config.ts`, `postcss.config.mjs`
---
## FastAPI (Python)
**When:** REST APIs, backends, microservices, data-driven services.
**Manifest:** `requirements.txt`
```
fastapi>=0.110.0
uvicorn>=0.29.0
sqlalchemy>=2.0.0
pydantic>=2.0.0
pytest>=8.0.0
httpx>=0.27.0
```
**File structure:**
```
main.py — FastAPI app, CORS, lifespan
models.py — SQLAlchemy models
schemas.py — Pydantic schemas
database.py — Engine, session factory
routers/*.py — Route modules
tests/test_*.py — pytest tests
```
**Run:** `uvicorn main:app --reload`
**Test:** `pytest`
---
## Express (TypeScript)
**When:** Node.js APIs, middleware-heavy backends, real-time services.
**Manifest:** `package.json`
```json
{
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"test": "jest"
}
}
```
**Core deps:** `express`, `cors`, `dotenv`
**Dev deps:** `typescript`, `tsx`, `@types/express`, `@types/node`, `jest`, `ts-jest`
**File structure:**
```
src/index.ts — App setup, middleware, listen
src/routes/*.ts — Route handlers
src/middleware/*.ts — Auth, validation, error handling
src/models/*.ts — Data models / ORM entities
src/lib/*.ts — Utilities
tests/*.test.ts — Jest tests
```
---
## Go (net/http or Gin)
**When:** High-performance APIs, CLI tools, systems programming.
**Manifest:** `go.mod`
**File structure (API):**
```
main.go — Entry point, router setup
handlers/*.go — HTTP handlers
models/*.go — Data structs
middleware/*.go — Auth, logging
db/*.go — Database connection
*_test.go — Table-driven tests
```
**File structure (CLI):**
```
main.go — Entry point, flag parsing
cmd/*.go — Subcommands
internal/*.go — Business logic
*_test.go — Tests
```
**Run:** `go run .`
**Test:** `go test ./...`
**Build:** `go build -o app .`
---
## Rust (Actix-web or Axum)
**When:** High-performance, safety-critical APIs, systems.
**Manifest:** `Cargo.toml`
**File structure:**
```
src/main.rs — Entry point, server setup
src/routes/*.rs — Route handlers
src/models/*.rs — Data structs, serde
src/db.rs — Database pool
src/error.rs — Error types
tests/*.rs — Integration tests
```
**Run:** `cargo run`
**Test:** `cargo test`
---
## Flutter (Dart)
**When:** Cross-platform mobile apps (iOS + Android).
**Manifest:** `pubspec.yaml`
**File structure:**
```
lib/main.dart — Entry point, MaterialApp
lib/screens/*.dart — Screen widgets
lib/widgets/*.dart — Reusable components
lib/models/*.dart — Data classes
lib/services/*.dart — API clients, storage
lib/providers/*.dart — State management
test/*_test.dart — Widget tests
```
**Run:** `flutter run`
**Test:** `flutter test`
---
## Rails (Ruby)
**When:** Full-stack web apps, CRUD-heavy applications, rapid prototyping.
**Manifest:** `Gemfile`
**File structure:** Standard Rails conventions (`app/`, `config/`, `db/`, `spec/`).
**Run:** `bin/rails server`
**Test:** `bin/rspec`
---
## Django (Python)
**When:** Full-stack Python web apps, admin-heavy apps, content management.
**Manifest:** `requirements.txt`
```
django>=5.0
djangorestframework>=3.15
pytest-django>=4.8
```
**File structure:**
```
manage.py
config/settings.py — Settings
config/urls.py — Root URL config
apps/<name>/models.py — Models
apps/<name>/views.py — Views or ViewSets
apps/<name>/serializers.py — DRF serializers
apps/<name>/urls.py — App URL config
tests/test_*.py
```
**Run:** `python manage.py runserver`
**Test:** `pytest`
---
## CI Template (.github/workflows/ci.yml)
Adapt per stack:
```yaml
name: CI
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup [runtime]
uses: actions/setup-[runtime]@v5
with:
[runtime]-version: '[version]'
- name: Install
run: [install command]
- name: Lint
run: [lint command]
- name: Test
run: [test command]
- name: Build
run: [build command]
```
---
## .gitignore Essentials by Stack
| Stack | Must ignore |
|-------|------------|
| Node/Next.js | `node_modules/`, `.next/`, `.env`, `dist/`, `.turbo/` |
| Python | `__pycache__/`, `*.pyc`, `.venv/`, `.env`, `*.egg-info/` |
| Go | Binary name, `.env`, `vendor/` (if not committed) |
| Rust | `target/`, `.env` |
| Flutter | `.dart_tool/`, `build/`, `.env`, `*.iml` |
| Rails | `log/`, `tmp/`, `.env`, `storage/`, `node_modules/` |
All stacks: `.env`, `.DS_Store`, `*.log`, IDE folders (`.idea/`, `.vscode/`)

View File

@@ -0,0 +1,261 @@
#!/usr/bin/env python3
"""
validate_project.py — Validate a generated project directory for common issues.
Checks:
- README.md exists and is non-empty
- .gitignore exists
- .env.example exists (if code references env vars)
- Package manifest exists (package.json, requirements.txt, go.mod, etc.)
- No .env file committed (secrets leak)
- At least one test file exists
- No TODO/FIXME placeholders in generated code
Usage:
python3 validate_project.py /path/to/project
python3 validate_project.py /path/to/project --format json
python3 validate_project.py /path/to/project --strict
"""
import argparse
import json
import os
import re
import sys
MANIFESTS = [
"package.json",
"requirements.txt",
"pyproject.toml",
"go.mod",
"Cargo.toml",
"pubspec.yaml",
"Gemfile",
"pom.xml",
"build.gradle",
"build.gradle.kts",
]
CODE_EXTENSIONS = {
".py", ".js", ".ts", ".tsx", ".jsx", ".go", ".rs", ".rb",
".dart", ".java", ".kt", ".swift", ".cs", ".cpp", ".c",
}
TEST_PATTERNS = [
r"test_.*\.py$",
r".*_test\.py$",
r".*\.test\.[jt]sx?$",
r".*\.spec\.[jt]sx?$",
r".*_test\.go$",
r".*_test\.rs$",
r".*_test\.dart$",
r"test/.*",
r"tests/.*",
r"spec/.*",
r"__tests__/.*",
]
PLACEHOLDER_PATTERNS = [
r"\bTODO\b",
r"\bFIXME\b",
r"\bHACK\b",
r"//\s*implement",
r"#\s*implement",
r'raise NotImplementedError',
r"pass\s*$",
r"\.\.\. # placeholder",
]
ENV_VAR_PATTERNS = [
r"process\.env\.\w+",
r"os\.environ\[",
r"os\.getenv\(",
r"env\(",
r"std::env::var",
r"os\.Getenv\(",
r"ENV\[",
r"Platform\.environment\[",
]
def find_files(root):
"""Walk directory, skip hidden dirs and common vendor dirs."""
skip = {".git", "node_modules", ".next", "__pycache__", "target", ".dart_tool",
"build", "dist", ".venv", "venv", "vendor", ".turbo"}
for dirpath, dirnames, filenames in os.walk(root):
dirnames[:] = [d for d in dirnames if d not in skip]
for f in filenames:
yield os.path.join(dirpath, f)
def check_readme(root):
path = os.path.join(root, "README.md")
if not os.path.isfile(path):
return {"name": "readme", "status": "FAIL", "message": "README.md missing"}
size = os.path.getsize(path)
if size < 50:
return {"name": "readme", "status": "WARN", "message": f"README.md is only {size} bytes — likely incomplete"}
return {"name": "readme", "status": "PASS", "message": f"README.md exists ({size} bytes)"}
def check_gitignore(root):
path = os.path.join(root, ".gitignore")
if not os.path.isfile(path):
return {"name": "gitignore", "status": "FAIL", "message": ".gitignore missing"}
return {"name": "gitignore", "status": "PASS", "message": ".gitignore exists"}
def check_env_example(root, all_files):
uses_env = False
for filepath in all_files:
ext = os.path.splitext(filepath)[1]
if ext not in CODE_EXTENSIONS:
continue
try:
content = open(filepath, "r", encoding="utf-8", errors="ignore").read()
except (OSError, UnicodeDecodeError):
continue
for pattern in ENV_VAR_PATTERNS:
if re.search(pattern, content):
uses_env = True
break
if uses_env:
break
if not uses_env:
return {"name": "env_example", "status": "PASS", "message": "No env vars detected — .env.example not required"}
path = os.path.join(root, ".env.example")
if not os.path.isfile(path):
return {"name": "env_example", "status": "FAIL", "message": "Code references env vars but .env.example is missing"}
return {"name": "env_example", "status": "PASS", "message": ".env.example exists"}
def check_no_env_file(root):
path = os.path.join(root, ".env")
if os.path.isfile(path):
return {"name": "no_env_committed", "status": "FAIL", "message": ".env file found — secrets may be committed"}
return {"name": "no_env_committed", "status": "PASS", "message": "No .env file committed"}
def check_manifest(root):
for manifest in MANIFESTS:
if os.path.isfile(os.path.join(root, manifest)):
return {"name": "manifest", "status": "PASS", "message": f"Package manifest found: {manifest}"}
return {"name": "manifest", "status": "FAIL", "message": "No package manifest found (package.json, requirements.txt, go.mod, etc.)"}
def check_tests(all_files, root):
for filepath in all_files:
rel = os.path.relpath(filepath, root)
for pattern in TEST_PATTERNS:
if re.search(pattern, rel):
return {"name": "tests", "status": "PASS", "message": f"Test file found: {rel}"}
return {"name": "tests", "status": "FAIL", "message": "No test files found"}
def check_placeholders(all_files, root):
findings = []
for filepath in all_files:
ext = os.path.splitext(filepath)[1]
if ext not in CODE_EXTENSIONS:
continue
try:
lines = open(filepath, "r", encoding="utf-8", errors="ignore").readlines()
except (OSError, UnicodeDecodeError):
continue
for i, line in enumerate(lines, 1):
for pattern in PLACEHOLDER_PATTERNS:
if re.search(pattern, line):
rel = os.path.relpath(filepath, root)
findings.append(f"{rel}:{i}")
break
if not findings:
return {"name": "placeholders", "status": "PASS", "message": "No TODO/FIXME/placeholder code found"}
if len(findings) <= 3:
return {"name": "placeholders", "status": "WARN",
"message": f"{len(findings)} placeholder(s) found: {', '.join(findings)}"}
return {"name": "placeholders", "status": "FAIL",
"message": f"{len(findings)} placeholders found (showing first 5): {', '.join(findings[:5])}"}
def run_checks(root, strict):
all_files = list(find_files(root))
checks = [
check_readme(root),
check_gitignore(root),
check_manifest(root),
check_env_example(root, all_files),
check_no_env_file(root),
check_tests(all_files, root),
check_placeholders(all_files, root),
]
passes = sum(1 for c in checks if c["status"] == "PASS")
warns = sum(1 for c in checks if c["status"] == "WARN")
fails = sum(1 for c in checks if c["status"] == "FAIL")
if strict:
overall = "PASS" if fails == 0 and warns == 0 else "FAIL"
else:
overall = "PASS" if fails == 0 else "FAIL"
return {
"project": root,
"files_scanned": len(all_files),
"checks": checks,
"summary": {"pass": passes, "warn": warns, "fail": fails},
"overall": overall,
}
def print_report(result):
print("=" * 60)
print("PROJECT VALIDATION REPORT")
print("=" * 60)
print(f"Project: {result['project']}")
print(f"Files scanned: {result['files_scanned']}")
print()
for check in result["checks"]:
icon = {"PASS": " \u2705", "WARN": " \u26a0\ufe0f", "FAIL": " \u274c"}[check["status"]]
print(f"{icon} [{check['status']}] {check['name']}: {check['message']}")
s = result["summary"]
print()
print(f"Results: {s['pass']} pass, {s['warn']} warn, {s['fail']} fail")
indicator = "\u2705" if result["overall"] == "PASS" else "\u274c"
print(f"Overall: {indicator} {result['overall']}")
print("=" * 60)
def main():
parser = argparse.ArgumentParser(
description="Validate a generated project directory for common issues."
)
parser.add_argument("path", help="Path to the project directory to validate")
parser.add_argument("--format", choices=["text", "json"], default="text",
help="Output format (default: text)")
parser.add_argument("--strict", action="store_true",
help="Treat warnings as failures")
args = parser.parse_args()
if not os.path.isdir(args.path):
print(f"Error: not a directory: {args.path}", file=sys.stderr)
sys.exit(1)
result = run_checks(os.path.abspath(args.path), args.strict)
if args.format == "json":
print(json.dumps(result, indent=2))
else:
print_report(result)
sys.exit(0 if result["overall"] == "PASS" else 1)
if __name__ == "__main__":
main()