"],
+ "js": ["content/content.js"]
+ }],
+ "background": {
+ "service_worker": "background/service-worker.js"
+ },
+ "options_page": "options/options.html"
+}
+```
+
+### Communication Pattern
+```
+Popup ←→ Background (Service Worker) ←→ Content Script
+ ↓
+ chrome.storage
+```
+```
+
+### Content Scripts
+
+Code that runs on web pages
+
+**When to use**: When modifying or reading page content
+
+```javascript
+## Content Scripts
+
+### Basic Content Script
+```javascript
+// content.js - Runs on every matched page
+
+// Wait for page to load
+document.addEventListener('DOMContentLoaded', () => {
+ // Modify the page
+ const element = document.querySelector('.target');
+ if (element) {
+ element.style.backgroundColor = 'yellow';
+ }
+});
+
+// Listen for messages from popup/background
+chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
+ if (message.action === 'getData') {
+ const data = document.querySelector('.data')?.textContent;
+ sendResponse({ data });
+ }
+ return true; // Keep channel open for async
+});
+```
+
+### Injecting UI
+```javascript
+// Create floating UI on page
+function injectUI() {
+ const container = document.createElement('div');
+ container.id = 'my-extension-ui';
+ container.innerHTML = `
+
+
My Extension
+
+
+ `;
+ document.body.appendChild(container);
+
+ document.getElementById('my-extension-btn').addEventListener('click', () => {
+ // Handle click
+ });
+}
+
+injectUI();
+```
+
+### Permissions for Content Scripts
+```json
+{
+ "content_scripts": [{
+ "matches": ["https://specific-site.com/*"],
+ "js": ["content.js"],
+ "run_at": "document_end"
+ }]
+}
+```
+```
+
+### Storage and State
+
+Persisting extension data
+
+**When to use**: When saving user settings or data
+
+```javascript
+## Storage and State
+
+### Chrome Storage API
+```javascript
+// Save data
+chrome.storage.local.set({ key: 'value' }, () => {
+ console.log('Saved');
+});
+
+// Get data
+chrome.storage.local.get(['key'], (result) => {
+ console.log(result.key);
+});
+
+// Sync storage (syncs across devices)
+chrome.storage.sync.set({ setting: true });
+
+// Watch for changes
+chrome.storage.onChanged.addListener((changes, area) => {
+ if (changes.key) {
+ console.log('key changed:', changes.key.newValue);
+ }
+});
+```
+
+### Storage Limits
+| Type | Limit |
+|------|-------|
+| local | 5MB |
+| sync | 100KB total, 8KB per item |
+
+### Async/Await Pattern
+```javascript
+// Modern async wrapper
+async function getStorage(keys) {
+ return new Promise((resolve) => {
+ chrome.storage.local.get(keys, resolve);
+ });
+}
+
+async function setStorage(data) {
+ return new Promise((resolve) => {
+ chrome.storage.local.set(data, resolve);
+ });
+}
+
+// Usage
+const { settings } = await getStorage(['settings']);
+await setStorage({ settings: { ...settings, theme: 'dark' } });
+```
+```
+
+## Anti-Patterns
+
+### ❌ Requesting All Permissions
+
+**Why bad**: Users won't install.
+Store may reject.
+Security risk.
+Bad reviews.
+
+**Instead**: Request minimum needed.
+Use optional permissions.
+Explain why in description.
+Request at time of use.
+
+### ❌ Heavy Background Processing
+
+**Why bad**: MV3 terminates idle workers.
+Battery drain.
+Browser slows down.
+Users uninstall.
+
+**Instead**: Keep background minimal.
+Use alarms for periodic tasks.
+Offload to content scripts.
+Cache aggressively.
+
+### ❌ Breaking on Updates
+
+**Why bad**: Selectors change.
+APIs change.
+Angry users.
+Bad reviews.
+
+**Instead**: Use stable selectors.
+Add error handling.
+Monitor for breakage.
+Update quickly when broken.
+
+## Related Skills
+
+Works well with: `frontend`, `micro-saas-launcher`, `personal-tool-builder`
diff --git a/skills/bullmq-specialist/SKILL.md b/skills/bullmq-specialist/SKILL.md
new file mode 100644
index 00000000..f1d86d42
--- /dev/null
+++ b/skills/bullmq-specialist/SKILL.md
@@ -0,0 +1,57 @@
+---
+name: bullmq-specialist
+description: "BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications. Use when: bullmq, bull queue, redis queue, background job, job queue."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# BullMQ Specialist
+
+You are a BullMQ expert who has processed billions of jobs in production.
+You understand that queues are the backbone of scalable applications - they
+decouple services, smooth traffic spikes, and enable reliable async processing.
+
+You've debugged stuck jobs at 3am, optimized worker concurrency for maximum
+throughput, and designed job flows that handle complex multi-step processes.
+You know that most queue problems are actually Redis problems or application
+design problems.
+
+Your core philosophy:
+
+## Capabilities
+
+- bullmq-queues
+- job-scheduling
+- delayed-jobs
+- repeatable-jobs
+- job-priorities
+- rate-limiting-jobs
+- job-events
+- worker-patterns
+- flow-producers
+- job-dependencies
+
+## Patterns
+
+### Basic Queue Setup
+
+Production-ready BullMQ queue with proper configuration
+
+### Delayed and Scheduled Jobs
+
+Jobs that run at specific times or after delays
+
+### Job Flows and Dependencies
+
+Complex multi-step job processing with parent-child relationships
+
+## Anti-Patterns
+
+### ❌ Giant Job Payloads
+
+### ❌ No Dead Letter Queue
+
+### ❌ Infinite Concurrency
+
+## Related Skills
+
+Works well with: `redis-specialist`, `backend`, `nextjs-app-router`, `email-systems`, `ai-workflow-automation`, `performance-hunter`
diff --git a/skills/clerk-auth/SKILL.md b/skills/clerk-auth/SKILL.md
new file mode 100644
index 00000000..ff54840a
--- /dev/null
+++ b/skills/clerk-auth/SKILL.md
@@ -0,0 +1,56 @@
+---
+name: clerk-auth
+description: "Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync Use when: adding authentication, clerk auth, user authentication, sign in, sign up."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Clerk Authentication
+
+## Patterns
+
+### Next.js App Router Setup
+
+Complete Clerk setup for Next.js 14/15 App Router.
+
+Includes ClerkProvider, environment variables, and basic
+sign-in/sign-up components.
+
+Key components:
+- ClerkProvider: Wraps app for auth context
+- , : Pre-built auth forms
+- : User menu with session management
+
+
+### Middleware Route Protection
+
+Protect routes using clerkMiddleware and createRouteMatcher.
+
+Best practices:
+- Single middleware.ts file at project root
+- Use createRouteMatcher for route groups
+- auth.protect() for explicit protection
+- Centralize all auth logic in middleware
+
+
+### Server Component Authentication
+
+Access auth state in Server Components using auth() and currentUser().
+
+Key functions:
+- auth(): Returns userId, sessionId, orgId, claims
+- currentUser(): Returns full User object
+- Both require clerkMiddleware to be configured
+
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
diff --git a/skills/computer-use-agents/SKILL.md b/skills/computer-use-agents/SKILL.md
new file mode 100644
index 00000000..ab0daa87
--- /dev/null
+++ b/skills/computer-use-agents/SKILL.md
@@ -0,0 +1,315 @@
+---
+name: computer-use-agents
+description: "Build AI agents that interact with computers like humans do - viewing screens, moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer Use, OpenAI's Operator/CUA, and open-source alternatives. Critical focus on sandboxing, security, and handling the unique challenges of vision-based control. Use when: computer use, desktop automation agent, screen control AI, vision-based agent, GUI automation."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Computer Use Agents
+
+## Patterns
+
+### Perception-Reasoning-Action Loop
+
+The fundamental architecture of computer use agents: observe screen,
+reason about next action, execute action, repeat. This loop integrates
+vision models with action execution through an iterative pipeline.
+
+Key components:
+1. PERCEPTION: Screenshot captures current screen state
+2. REASONING: Vision-language model analyzes and plans
+3. ACTION: Execute mouse/keyboard operations
+4. FEEDBACK: Observe result, continue or correct
+
+Critical insight: Vision agents are completely still during "thinking"
+phase (1-5 seconds), creating a detectable pause pattern.
+
+
+**When to use**: ['Building any computer use agent from scratch', 'Integrating vision models with desktop control', 'Understanding agent behavior patterns']
+
+```python
+from anthropic import Anthropic
+from PIL import Image
+import base64
+import pyautogui
+import time
+
+class ComputerUseAgent:
+ """
+ Perception-Reasoning-Action loop implementation.
+ Based on Anthropic Computer Use patterns.
+ """
+
+ def __init__(self, client: Anthropic, model: str = "claude-sonnet-4-20250514"):
+ self.client = client
+ self.model = model
+ self.max_steps = 50 # Prevent runaway loops
+ self.action_delay = 0.5 # Seconds between actions
+
+ def capture_screenshot(self) -> str:
+ """Capture screen and return base64 encoded image."""
+ screenshot = pyautogui.screenshot()
+ # Resize for token efficiency (1280x800 is good balance)
+ screenshot = screenshot.resize((1280, 800), Image.LANCZOS)
+
+ import io
+ buffer = io.BytesIO()
+ screenshot.save(buffer, format="PNG")
+ return base64.b64encode(buffer.getvalue()).decode()
+
+ def execute_action(self, action: dict) -> dict:
+ """Execute mouse/keyboard action on the computer."""
+ action_type = action.get("type")
+
+ if action_type == "click":
+ x, y = action["x"], action["y"]
+ button = action.get("button", "left")
+ pyautogui.click(x, y, button=button)
+ return {"success": True, "action": f"clicked at ({x}, {y})"}
+
+ elif action_type == "type":
+ text = action["text"]
+ pyautogui.typewrite(text, interval=0.02)
+ return {"success": True, "action": f"typed {len(text)} chars"}
+
+ elif action_type == "key":
+ key = action["key"]
+ pyautogui.press(key)
+ return {"success": True, "action": f"pressed {key}"}
+
+ elif action_type == "scroll":
+ direction = action.get("direction", "down")
+ amount = action.get("amount", 3)
+ scroll = -amount if direction == "down" else amount
+ pyautogui.scroll(scroll)
+ return {"success": True, "action": f"scrolled {dir
+```
+
+### Sandboxed Environment Pattern
+
+Computer use agents MUST run in isolated, sandboxed environments.
+Never give agents direct access to your main system - the security
+risks are too high. Use Docker containers with virtual desktops.
+
+Key isolation requirements:
+1. NETWORK: Restrict to necessary endpoints only
+2. FILESYSTEM: Read-only or scoped to temp directories
+3. CREDENTIALS: No access to host credentials
+4. SYSCALLS: Filter dangerous system calls
+5. RESOURCES: Limit CPU, memory, time
+
+The goal is "blast radius minimization" - if the agent goes wrong,
+damage is contained to the sandbox.
+
+
+**When to use**: ['Deploying any computer use agent', 'Testing agent behavior safely', 'Running untrusted automation tasks']
+
+```python
+# Dockerfile for sandboxed computer use environment
+# Based on Anthropic's reference implementation pattern
+
+FROM ubuntu:22.04
+
+# Install desktop environment
+RUN apt-get update && apt-get install -y \
+ xvfb \
+ x11vnc \
+ fluxbox \
+ xterm \
+ firefox \
+ python3 \
+ python3-pip \
+ supervisor
+
+# Security: Create non-root user
+RUN useradd -m -s /bin/bash agent && \
+ mkdir -p /home/agent/.vnc
+
+# Install Python dependencies
+COPY requirements.txt /tmp/
+RUN pip3 install -r /tmp/requirements.txt
+
+# Security: Drop capabilities
+RUN apt-get install -y --no-install-recommends libcap2-bin && \
+ setcap -r /usr/bin/python3 || true
+
+# Copy agent code
+COPY --chown=agent:agent . /app
+WORKDIR /app
+
+# Supervisor config for virtual display + VNC
+COPY supervisord.conf /etc/supervisor/conf.d/
+
+# Expose VNC port only (not desktop directly)
+EXPOSE 5900
+
+# Run as non-root
+USER agent
+
+CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
+
+---
+
+# docker-compose.yml with security constraints
+version: '3.8'
+
+services:
+ computer-use-agent:
+ build: .
+ ports:
+ - "5900:5900" # VNC for observation
+ - "8080:8080" # API for control
+
+ # Security constraints
+ security_opt:
+ - no-new-privileges:true
+ - seccomp:seccomp-profile.json
+
+ # Resource limits
+ deploy:
+ resources:
+ limits:
+ cpus: '2'
+ memory: 4G
+ reservations:
+ cpus: '0.5'
+ memory: 1G
+
+ # Network isolation
+ networks:
+ - agent-network
+
+ # No access to host filesystem
+ volumes:
+ - agent-tmp:/tmp
+
+ # Read-only root filesystem
+ read_only: true
+ tmpfs:
+ - /run
+ - /var/run
+
+ # Environment
+ environment:
+ - DISPLAY=:99
+ - NO_PROXY=localhost
+
+networks:
+ agent-network:
+ driver: bridge
+ internal: true # No internet by default
+
+volumes:
+ agent-tmp:
+
+---
+
+# Python wrapper with additional runtime sandboxing
+import subprocess
+import os
+from dataclasses im
+```
+
+### Anthropic Computer Use Implementation
+
+Official implementation pattern using Claude's computer use capability.
+Claude 3.5 Sonnet was the first frontier model to offer computer use.
+Claude Opus 4.5 is now the "best model in the world for computer use."
+
+Key capabilities:
+- screenshot: Capture current screen state
+- mouse: Click, move, drag operations
+- keyboard: Type text, press keys
+- bash: Run shell commands
+- text_editor: View and edit files
+
+Tool versions:
+- computer_20251124 (Opus 4.5): Adds zoom action for detailed inspection
+- computer_20250124 (All other models): Standard capabilities
+
+Critical limitation: "Some UI elements (like dropdowns and scrollbars)
+might be tricky for Claude to manipulate" - Anthropic docs
+
+
+**When to use**: ['Building production computer use agents', 'Need highest quality vision understanding', 'Full desktop control (not just browser)']
+
+```python
+from anthropic import Anthropic
+from anthropic.types.beta import (
+ BetaToolComputerUse20241022,
+ BetaToolBash20241022,
+ BetaToolTextEditor20241022,
+)
+import subprocess
+import base64
+from PIL import Image
+import io
+
+class AnthropicComputerUse:
+ """
+ Official Anthropic Computer Use implementation.
+
+ Requires:
+ - Docker container with virtual display
+ - VNC for viewing agent actions
+ - Proper tool implementations
+ """
+
+ def __init__(self):
+ self.client = Anthropic()
+ self.model = "claude-sonnet-4-20250514" # Best for computer use
+ self.screen_size = (1280, 800)
+
+ def get_tools(self) -> list:
+ """Define computer use tools."""
+ return [
+ BetaToolComputerUse20241022(
+ type="computer_20241022",
+ name="computer",
+ display_width_px=self.screen_size[0],
+ display_height_px=self.screen_size[1],
+ ),
+ BetaToolBash20241022(
+ type="bash_20241022",
+ name="bash",
+ ),
+ BetaToolTextEditor20241022(
+ type="text_editor_20241022",
+ name="str_replace_editor",
+ ),
+ ]
+
+ def execute_tool(self, name: str, input: dict) -> dict:
+ """Execute a tool and return result."""
+
+ if name == "computer":
+ return self._handle_computer_action(input)
+ elif name == "bash":
+ return self._handle_bash(input)
+ elif name == "str_replace_editor":
+ return self._handle_editor(input)
+ else:
+ return {"error": f"Unknown tool: {name}"}
+
+ def _handle_computer_action(self, input: dict) -> dict:
+ """Handle computer control actions."""
+ action = input.get("action")
+
+ if action == "screenshot":
+ # Capture via xdotool/scrot
+ subprocess.run(["scrot", "/tmp/screenshot.png"])
+
+ with open("/tmp/screenshot.png", "rb") as f:
+
+```
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | ## Defense in depth - no single solution works |
+| Issue | medium | ## Add human-like variance to actions |
+| Issue | high | ## Use keyboard alternatives when possible |
+| Issue | medium | ## Accept the tradeoff |
+| Issue | high | ## Implement context management |
+| Issue | high | ## Monitor and limit costs |
+| Issue | critical | ## ALWAYS use sandboxing |
diff --git a/skills/context-window-management/SKILL.md b/skills/context-window-management/SKILL.md
new file mode 100644
index 00000000..d60f4ae0
--- /dev/null
+++ b/skills/context-window-management/SKILL.md
@@ -0,0 +1,53 @@
+---
+name: context-window-management
+description: "Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot Use when: context window, token limit, context management, context engineering, long context."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Context Window Management
+
+You're a context engineering specialist who has optimized LLM applications handling
+millions of conversations. You've seen systems hit token limits, suffer context rot,
+and lose critical information mid-dialogue.
+
+You understand that context is a finite resource with diminishing returns. More tokens
+doesn't mean better results—the art is in curating the right information. You know
+the serial position effect, the lost-in-the-middle problem, and when to summarize
+versus when to retrieve.
+
+Your cor
+
+## Capabilities
+
+- context-engineering
+- context-summarization
+- context-trimming
+- context-routing
+- token-counting
+- context-prioritization
+
+## Patterns
+
+### Tiered Context Strategy
+
+Different strategies based on context size
+
+### Serial Position Optimization
+
+Place important content at start and end
+
+### Intelligent Summarization
+
+Summarize by importance, not just recency
+
+## Anti-Patterns
+
+### ❌ Naive Truncation
+
+### ❌ Ignoring Token Costs
+
+### ❌ One-Size-Fits-All
+
+## Related Skills
+
+Works well with: `rag-implementation`, `conversation-memory`, `prompt-caching`, `llm-npc-dialogue`
diff --git a/skills/conversation-memory/SKILL.md b/skills/conversation-memory/SKILL.md
new file mode 100644
index 00000000..e5f926e1
--- /dev/null
+++ b/skills/conversation-memory/SKILL.md
@@ -0,0 +1,61 @@
+---
+name: conversation-memory
+description: "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory persistence, long-term memory, chat history."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Conversation Memory
+
+You're a memory systems specialist who has built AI assistants that remember
+users across months of interactions. You've implemented systems that know when
+to remember, when to forget, and how to surface relevant memories.
+
+You understand that memory is not just storage—it's about retrieval, relevance,
+and context. You've seen systems that remember everything (and overwhelm context)
+and systems that forget too much (frustrating users).
+
+Your core principles:
+1. Memory types differ—short-term, lo
+
+## Capabilities
+
+- short-term-memory
+- long-term-memory
+- entity-memory
+- memory-persistence
+- memory-retrieval
+- memory-consolidation
+
+## Patterns
+
+### Tiered Memory System
+
+Different memory tiers for different purposes
+
+### Entity Memory
+
+Store and update facts about entities
+
+### Memory-Aware Prompting
+
+Include relevant memories in prompts
+
+## Anti-Patterns
+
+### ❌ Remember Everything
+
+### ❌ No Memory Retrieval
+
+### ❌ Single Memory Store
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Memory store grows unbounded, system slows | high | // Implement memory lifecycle management |
+| Retrieved memories not relevant to current query | high | // Intelligent memory retrieval |
+| Memories from one user accessible to another | critical | // Strict user isolation in memory |
+
+## Related Skills
+
+Works well with: `context-window-management`, `rag-implementation`, `prompt-caching`, `llm-npc-dialogue`
diff --git a/skills/crewai/SKILL.md b/skills/crewai/SKILL.md
new file mode 100644
index 00000000..33d67aab
--- /dev/null
+++ b/skills/crewai/SKILL.md
@@ -0,0 +1,243 @@
+---
+name: crewai
+description: "Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500 companies. Covers agent design with roles and goals, task definition, crew orchestration, process types (sequential, hierarchical, parallel), memory systems, and flows for complex workflows. Essential for building collaborative AI agent teams. Use when: crewai, multi-agent team, agent roles, crew of agents, role-based agents."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# CrewAI
+
+**Role**: CrewAI Multi-Agent Architect
+
+You are an expert in designing collaborative AI agent teams with CrewAI. You think
+in terms of roles, responsibilities, and delegation. You design clear agent personas
+with specific expertise, create well-defined tasks with expected outputs, and
+orchestrate crews for optimal collaboration. You know when to use sequential vs
+hierarchical processes.
+
+## Capabilities
+
+- Agent definitions (role, goal, backstory)
+- Task design and dependencies
+- Crew orchestration
+- Process types (sequential, hierarchical)
+- Memory configuration
+- Tool integration
+- Flows for complex workflows
+
+## Requirements
+
+- Python 3.10+
+- crewai package
+- LLM API access
+
+## Patterns
+
+### Basic Crew with YAML Config
+
+Define agents and tasks in YAML (recommended)
+
+**When to use**: Any CrewAI project
+
+```python
+# config/agents.yaml
+researcher:
+ role: "Senior Research Analyst"
+ goal: "Find comprehensive, accurate information on {topic}"
+ backstory: |
+ You are an expert researcher with years of experience
+ in gathering and analyzing information. You're known
+ for your thorough and accurate research.
+ tools:
+ - SerperDevTool
+ - WebsiteSearchTool
+ verbose: true
+
+writer:
+ role: "Content Writer"
+ goal: "Create engaging, well-structured content"
+ backstory: |
+ You are a skilled writer who transforms research
+ into compelling narratives. You focus on clarity
+ and engagement.
+ verbose: true
+
+# config/tasks.yaml
+research_task:
+ description: |
+ Research the topic: {topic}
+
+ Focus on:
+ 1. Key facts and statistics
+ 2. Recent developments
+ 3. Expert opinions
+ 4. Contrarian viewpoints
+
+ Be thorough and cite sources.
+ agent: researcher
+ expected_output: |
+ A comprehensive research report with:
+ - Executive summary
+ - Key findings (bulleted)
+ - Sources cited
+
+writing_task:
+ description: |
+ Using the research provided, write an article about {topic}.
+
+ Requirements:
+ - 800-1000 words
+ - Engaging introduction
+ - Clear structure with headers
+ - Actionable conclusion
+ agent: writer
+ expected_output: "A polished article ready for publication"
+ context:
+ - research_task # Uses output from research
+
+# crew.py
+from crewai import Agent, Task, Crew, Process
+from crewai.project import CrewBase, agent, task, crew
+
+@CrewBase
+class ContentCrew:
+ agents_config = 'config/agents.yaml'
+ tasks_config = 'config/tasks.yaml'
+
+ @agent
+ def researcher(self) -> Agent:
+ return Agent(config=self.agents_config['researcher'])
+
+ @agent
+ def writer(self) -> Agent:
+ return Agent(config=self.agents_config['writer'])
+
+ @task
+ def research_task(self) -> Task:
+ return Task(config=self.tasks_config['research_task'])
+
+ @task
+ def writing_task(self) -> Task:
+ return Task(config
+```
+
+### Hierarchical Process
+
+Manager agent delegates to workers
+
+**When to use**: Complex tasks needing coordination
+
+```python
+from crewai import Crew, Process
+
+# Define specialized agents
+researcher = Agent(
+ role="Research Specialist",
+ goal="Find accurate information",
+ backstory="Expert researcher..."
+)
+
+analyst = Agent(
+ role="Data Analyst",
+ goal="Analyze and interpret data",
+ backstory="Expert analyst..."
+)
+
+writer = Agent(
+ role="Content Writer",
+ goal="Create engaging content",
+ backstory="Expert writer..."
+)
+
+# Hierarchical crew - manager coordinates
+crew = Crew(
+ agents=[researcher, analyst, writer],
+ tasks=[research_task, analysis_task, writing_task],
+ process=Process.hierarchical,
+ manager_llm=ChatOpenAI(model="gpt-4o"), # Manager model
+ verbose=True
+)
+
+# Manager decides:
+# - Which agent handles which task
+# - When to delegate
+# - How to combine results
+
+result = crew.kickoff()
+```
+
+### Planning Feature
+
+Generate execution plan before running
+
+**When to use**: Complex workflows needing structure
+
+```python
+from crewai import Crew, Process
+
+# Enable planning
+crew = Crew(
+ agents=[researcher, writer, reviewer],
+ tasks=[research, write, review],
+ process=Process.sequential,
+ planning=True, # Enable planning
+ planning_llm=ChatOpenAI(model="gpt-4o") # Planner model
+)
+
+# With planning enabled:
+# 1. CrewAI generates step-by-step plan
+# 2. Plan is injected into each task
+# 3. Agents see overall structure
+# 4. More consistent results
+
+result = crew.kickoff()
+
+# Access the plan
+print(crew.plan)
+```
+
+## Anti-Patterns
+
+### ❌ Vague Agent Roles
+
+**Why bad**: Agent doesn't know its specialty.
+Overlapping responsibilities.
+Poor task delegation.
+
+**Instead**: Be specific:
+- "Senior React Developer" not "Developer"
+- "Financial Analyst specializing in crypto" not "Analyst"
+Include specific skills in backstory.
+
+### ❌ Missing Expected Outputs
+
+**Why bad**: Agent doesn't know done criteria.
+Inconsistent outputs.
+Hard to chain tasks.
+
+**Instead**: Always specify expected_output:
+expected_output: |
+ A JSON object with:
+ - summary: string (100 words max)
+ - key_points: list of strings
+ - confidence: float 0-1
+
+### ❌ Too Many Agents
+
+**Why bad**: Coordination overhead.
+Inconsistent communication.
+Slower execution.
+
+**Instead**: 3-5 agents with clear roles.
+One agent can handle multiple related tasks.
+Use tools instead of agents for simple actions.
+
+## Limitations
+
+- Python-only
+- Best for structured workflows
+- Can be verbose for simple cases
+- Flows are newer feature
+
+## Related Skills
+
+Works well with: `langgraph`, `autonomous-agents`, `langfuse`, `structured-output`
diff --git a/skills/discord-bot-architect/SKILL.md b/skills/discord-bot-architect/SKILL.md
new file mode 100644
index 00000000..cd823a15
--- /dev/null
+++ b/skills/discord-bot-architect/SKILL.md
@@ -0,0 +1,277 @@
+---
+name: discord-bot-architect
+description: "Specialized skill for building production-ready Discord bots. Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash commands, interactive components, rate limiting, and sharding."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Discord Bot Architect
+
+## Patterns
+
+### Discord.js v14 Foundation
+
+Modern Discord bot setup with Discord.js v14 and slash commands
+
+**When to use**: ['Building Discord bots with JavaScript/TypeScript', 'Need full gateway connection with events', 'Building bots with complex interactions']
+
+```javascript
+```javascript
+// src/index.js
+const { Client, Collection, GatewayIntentBits, Events } = require('discord.js');
+const fs = require('node:fs');
+const path = require('node:path');
+require('dotenv').config();
+
+// Create client with minimal required intents
+const client = new Client({
+ intents: [
+ GatewayIntentBits.Guilds,
+ // Add only what you need:
+ // GatewayIntentBits.GuildMessages,
+ // GatewayIntentBits.MessageContent, // PRIVILEGED - avoid if possible
+ ]
+});
+
+// Load commands
+client.commands = new Collection();
+const commandsPath = path.join(__dirname, 'commands');
+const commandFiles = fs.readdirSync(commandsPath).filter(f => f.endsWith('.js'));
+
+for (const file of commandFiles) {
+ const filePath = path.join(commandsPath, file);
+ const command = require(filePath);
+ if ('data' in command && 'execute' in command) {
+ client.commands.set(command.data.name, command);
+ }
+}
+
+// Load events
+const eventsPath = path.join(__dirname, 'events');
+const eventFiles = fs.readdirSync(eventsPath).filter(f => f.endsWith('.js'));
+
+for (const file of eventFiles) {
+ const filePath = path.join(eventsPath, file);
+ const event = require(filePath);
+ if (event.once) {
+ client.once(event.name, (...args) => event.execute(...args));
+ } else {
+ client.on(event.name, (...args) => event.execute(...args));
+ }
+}
+
+client.login(process.env.DISCORD_TOKEN);
+```
+
+```javascript
+// src/commands/ping.js
+const { SlashCommandBuilder } = require('discord.js');
+
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('ping')
+ .setDescription('Replies with Pong!'),
+
+ async execute(interaction) {
+ const sent = await interaction.reply({
+ content: 'Pinging...',
+ fetchReply: true
+ });
+
+ const latency = sent.createdTimestamp - interaction.createdTimestamp;
+ await interaction.editReply(`Pong! Latency: ${latency}ms`);
+ }
+};
+```
+
+```javascript
+// src/events/interactionCreate.js
+const { Events } = require('discord.js');
+
+module.exports = {
+ name: Event
+```
+
+### Pycord Bot Foundation
+
+Discord bot with Pycord (Python) and application commands
+
+**When to use**: ['Building Discord bots with Python', 'Prefer async/await patterns', 'Need good slash command support']
+
+```python
+```python
+# main.py
+import os
+import discord
+from discord.ext import commands
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Configure intents - only enable what you need
+intents = discord.Intents.default()
+# intents.message_content = True # PRIVILEGED - avoid if possible
+# intents.members = True # PRIVILEGED
+
+bot = commands.Bot(
+ command_prefix="!", # Legacy, prefer slash commands
+ intents=intents
+)
+
+@bot.event
+async def on_ready():
+ print(f"Logged in as {bot.user}")
+ # Sync commands (do this carefully - see sharp edges)
+ # await bot.sync_commands()
+
+# Slash command
+@bot.slash_command(name="ping", description="Check bot latency")
+async def ping(ctx: discord.ApplicationContext):
+ latency = round(bot.latency * 1000)
+ await ctx.respond(f"Pong! Latency: {latency}ms")
+
+# Slash command with options
+@bot.slash_command(name="greet", description="Greet a user")
+async def greet(
+ ctx: discord.ApplicationContext,
+ user: discord.Option(discord.Member, "User to greet"),
+ message: discord.Option(str, "Custom message", required=False)
+):
+ msg = message or "Hello!"
+ await ctx.respond(f"{user.mention}, {msg}")
+
+# Load cogs
+for filename in os.listdir("./cogs"):
+ if filename.endswith(".py"):
+ bot.load_extension(f"cogs.{filename[:-3]}")
+
+bot.run(os.environ["DISCORD_TOKEN"])
+```
+
+```python
+# cogs/general.py
+import discord
+from discord.ext import commands
+
+class General(commands.Cog):
+ def __init__(self, bot):
+ self.bot = bot
+
+ @commands.slash_command(name="info", description="Bot information")
+ async def info(self, ctx: discord.ApplicationContext):
+ embed = discord.Embed(
+ title="Bot Info",
+ description="A helpful Discord bot",
+ color=discord.Color.blue()
+ )
+ embed.add_field(name="Servers", value=len(self.bot.guilds))
+ embed.add_field(name="Latency", value=f"{round(self.bot.latency * 1000)}ms")
+ await ctx.respond(embed=embed)
+
+ @commands.Cog.
+```
+
+### Interactive Components Pattern
+
+Using buttons, select menus, and modals for rich UX
+
+**When to use**: ['Need interactive user interfaces', 'Collecting user input beyond slash command options', 'Building menus, confirmations, or forms']
+
+```python
+```javascript
+// Discord.js - Buttons and Select Menus
+const {
+ SlashCommandBuilder,
+ ActionRowBuilder,
+ ButtonBuilder,
+ ButtonStyle,
+ StringSelectMenuBuilder,
+ ModalBuilder,
+ TextInputBuilder,
+ TextInputStyle
+} = require('discord.js');
+
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('menu')
+ .setDescription('Shows an interactive menu'),
+
+ async execute(interaction) {
+ // Button row
+ const buttonRow = new ActionRowBuilder()
+ .addComponents(
+ new ButtonBuilder()
+ .setCustomId('confirm')
+ .setLabel('Confirm')
+ .setStyle(ButtonStyle.Primary),
+ new ButtonBuilder()
+ .setCustomId('cancel')
+ .setLabel('Cancel')
+ .setStyle(ButtonStyle.Danger),
+ new ButtonBuilder()
+ .setLabel('Documentation')
+ .setURL('https://discord.js.org')
+ .setStyle(ButtonStyle.Link) // Link buttons don't emit events
+ );
+
+ // Select menu row (one per row, takes all 5 slots)
+ const selectRow = new ActionRowBuilder()
+ .addComponents(
+ new StringSelectMenuBuilder()
+ .setCustomId('select-role')
+ .setPlaceholder('Select a role')
+ .setMinValues(1)
+ .setMaxValues(3)
+ .addOptions([
+ { label: 'Developer', value: 'dev', emoji: '💻' },
+ { label: 'Designer', value: 'design', emoji: '🎨' },
+ { label: 'Community', value: 'community', emoji: '🎉' }
+ ])
+ );
+
+ await interaction.reply({
+ content: 'Choose an option:',
+ components: [buttonRow, selectRow]
+ });
+
+ // Collect responses
+ const collector = interaction.channel.createMessageComponentCollector({
+ filter: i => i.user.id === interaction.user.id,
+ time: 60_000 // 60 seconds timeout
+ });
+
+ collector.on('collect', async i => {
+ if (i.customId === 'confirm') {
+ await i.update({ content: 'Confirmed!', components: [] });
+ collector.stop();
+ } else if (i.custo
+```
+
+## Anti-Patterns
+
+### ❌ Message Content for Commands
+
+**Why bad**: Message Content Intent is privileged and deprecated for bot commands.
+Slash commands are the intended approach.
+
+### ❌ Syncing Commands on Every Start
+
+**Why bad**: Command registration is rate limited. Global commands take up to 1 hour
+to propagate. Syncing on every start wastes API calls and can hit limits.
+
+### ❌ Blocking the Event Loop
+
+**Why bad**: Discord gateway requires regular heartbeats. Blocking operations
+cause missed heartbeats and disconnections.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | ## Acknowledge immediately, process later |
+| Issue | critical | ## Step 1: Enable in Developer Portal |
+| Issue | high | ## Use a separate deploy script (not on startup) |
+| Issue | critical | ## Never hardcode tokens |
+| Issue | high | ## Generate correct invite URL |
+| Issue | medium | ## Development: Use guild commands |
+| Issue | medium | ## Never block the event loop |
+| Issue | medium | ## Show modal immediately |
diff --git a/skills/email-systems/SKILL.md b/skills/email-systems/SKILL.md
new file mode 100644
index 00000000..76b34463
--- /dev/null
+++ b/skills/email-systems/SKILL.md
@@ -0,0 +1,54 @@
+---
+name: email-systems
+description: "Email has the highest ROI of any marketing channel. $36 for every $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no personalization, landing in spam folders. This skill covers transactional email that works, marketing automation that converts, deliverability that reaches inboxes, and the infrastructure decisions that scale. Use when: keywords, file_patterns, code_patterns."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Email Systems
+
+You are an email systems engineer who has maintained 99.9% deliverability
+across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with
+blacklists, and optimized for inbox placement. You know that email is the
+highest ROI channel when done right, and a spam folder nightmare when done
+wrong. You treat deliverability as infrastructure, not an afterthought.
+
+## Patterns
+
+### Transactional Email Queue
+
+Queue all transactional emails with retry logic and monitoring
+
+### Email Event Tracking
+
+Track delivery, opens, clicks, bounces, and complaints
+
+### Template Versioning
+
+Version email templates for rollback and A/B testing
+
+## Anti-Patterns
+
+### ❌ HTML email soup
+
+**Why bad**: Email clients render differently. Outlook breaks everything.
+
+### ❌ No plain text fallback
+
+**Why bad**: Some clients strip HTML. Accessibility issues. Spam signal.
+
+### ❌ Huge image emails
+
+**Why bad**: Images blocked by default. Spam trigger. Slow loading.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Missing SPF, DKIM, or DMARC records | critical | # Required DNS records: |
+| Using shared IP for transactional email | high | # Transactional email strategy: |
+| Not processing bounce notifications | high | # Bounce handling requirements: |
+| Missing or hidden unsubscribe link | critical | # Unsubscribe requirements: |
+| Sending HTML without plain text alternative | medium | # Always send multipart: |
+| Sending high volume from new IP immediately | high | # IP warm-up schedule: |
+| Emailing people who did not opt in | critical | # Permission requirements: |
+| Emails that are mostly or entirely images | medium | # Balance images and text: |
diff --git a/skills/file-uploads/SKILL.md b/skills/file-uploads/SKILL.md
new file mode 100644
index 00000000..2665e203
--- /dev/null
+++ b/skills/file-uploads/SKILL.md
@@ -0,0 +1,22 @@
+---
+name: file-uploads
+description: "Expert at handling file uploads and cloud storage. Covers S3, Cloudflare R2, presigned URLs, multipart uploads, and image optimization. Knows how to handle large files without blocking. Use when: file upload, S3, R2, presigned URL, multipart."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# File Uploads & Storage
+
+**Role**: File Upload Specialist
+
+Careful about security and performance. Never trusts file
+extensions. Knows that large uploads need special handling.
+Prefers presigned URLs over server proxying.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Trusting client-provided file type | critical | # CHECK MAGIC BYTES |
+| No upload size restrictions | high | # SET SIZE LIMITS |
+| User-controlled filename allows path traversal | critical | # SANITIZE FILENAMES |
+| Presigned URL shared or cached incorrectly | medium | # CONTROL PRESIGNED URL DISTRIBUTION |
diff --git a/skills/firebase/SKILL.md b/skills/firebase/SKILL.md
new file mode 100644
index 00000000..4c18e980
--- /dev/null
+++ b/skills/firebase/SKILL.md
@@ -0,0 +1,56 @@
+---
+name: firebase
+description: "Firebase gives you a complete backend in minutes - auth, database, storage, functions, hosting. But the ease of setup hides real complexity. Security rules are your last line of defense, and they're often wrong. Firestore queries are limited, and you learn this after you've designed your data model. This skill covers Firebase Authentication, Firestore, Realtime Database, Cloud Functions, Cloud Storage, and Firebase Hosting. Key insight: Firebase is optimized for read-heavy, denormalized data. I"
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Firebase
+
+You're a developer who has shipped dozens of Firebase projects. You've seen the
+"easy" path lead to security breaches, runaway costs, and impossible migrations.
+You know Firebase is powerful, but you also know its sharp edges.
+
+Your hard-won lessons: The team that skipped security rules got pwned. The team
+that designed Firestore like SQL couldn't query their data. The team that
+attached listeners to large collections got a $10k bill. You've learned from
+all of them.
+
+You advocate for Firebase w
+
+## Capabilities
+
+- firebase-auth
+- firestore
+- firebase-realtime-database
+- firebase-cloud-functions
+- firebase-storage
+- firebase-hosting
+- firebase-security-rules
+- firebase-admin-sdk
+- firebase-emulators
+
+## Patterns
+
+### Modular SDK Import
+
+Import only what you need for smaller bundles
+
+### Security Rules Design
+
+Secure your data with proper rules from day one
+
+### Data Modeling for Queries
+
+Design Firestore data structure around query patterns
+
+## Anti-Patterns
+
+### ❌ No Security Rules
+
+### ❌ Client-Side Admin Operations
+
+### ❌ Listener on Large Collections
+
+## Related Skills
+
+Works well with: `nextjs-app-router`, `react-patterns`, `authentication-oauth`, `stripe`
diff --git a/skills/gcp-cloud-run/SKILL.md b/skills/gcp-cloud-run/SKILL.md
new file mode 100644
index 00000000..9c2f491f
--- /dev/null
+++ b/skills/gcp-cloud-run/SKILL.md
@@ -0,0 +1,288 @@
+---
+name: gcp-cloud-run
+description: "Specialized skill for building production-ready serverless applications on GCP. Covers Cloud Run services (containerized), Cloud Run Functions (event-driven), cold start optimization, and event-driven architecture with Pub/Sub."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# GCP Cloud Run
+
+## Patterns
+
+### Cloud Run Service Pattern
+
+Containerized web service on Cloud Run
+
+**When to use**: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']
+
+```javascript
+```dockerfile
+# Dockerfile - Multi-stage build for smaller image
+FROM node:20-slim AS builder
+WORKDIR /app
+COPY package*.json ./
+RUN npm ci --only=production
+
+FROM node:20-slim
+WORKDIR /app
+
+# Copy only production dependencies
+COPY --from=builder /app/node_modules ./node_modules
+COPY src ./src
+COPY package.json ./
+
+# Cloud Run uses PORT env variable
+ENV PORT=8080
+EXPOSE 8080
+
+# Run as non-root user
+USER node
+
+CMD ["node", "src/index.js"]
+```
+
+```javascript
+// src/index.js
+const express = require('express');
+const app = express();
+
+app.use(express.json());
+
+// Health check endpoint
+app.get('/health', (req, res) => {
+ res.status(200).send('OK');
+});
+
+// API routes
+app.get('/api/items/:id', async (req, res) => {
+ try {
+ const item = await getItem(req.params.id);
+ res.json(item);
+ } catch (error) {
+ console.error('Error:', error);
+ res.status(500).json({ error: 'Internal server error' });
+ }
+});
+
+// Graceful shutdown
+process.on('SIGTERM', () => {
+ console.log('SIGTERM received, shutting down gracefully');
+ server.close(() => {
+ console.log('Server closed');
+ process.exit(0);
+ });
+});
+
+const PORT = process.env.PORT || 8080;
+const server = app.listen(PORT, () => {
+ console.log(`Server listening on port ${PORT}`);
+});
+```
+
+```yaml
+# cloudbuild.yaml
+steps:
+ # Build the container image
+ - name: 'gcr.io/cloud-builders/docker'
+ args: ['build', '-t', 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA', '.']
+
+ # Push the container image
+ - name: 'gcr.io/cloud-builders/docker'
+ args: ['push', 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA']
+
+ # Deploy to Cloud Run
+ - name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
+ entrypoint: gcloud
+ args:
+ - 'run'
+ - 'deploy'
+ - 'my-service'
+ - '--image=gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA'
+ - '--region=us-central1'
+ - '--platform=managed'
+ - '--allow-unauthenticated'
+ - '--memory=512Mi'
+ - '--cpu=1'
+ - '--min-instances=1'
+ - '--max-instances=100'
+
+```
+
+### Cloud Run Functions Pattern
+
+Event-driven functions (formerly Cloud Functions)
+
+**When to use**: ['Simple event handlers', 'Pub/Sub message processing', 'Cloud Storage triggers', 'HTTP webhooks']
+
+```javascript
+```javascript
+// HTTP Function
+// index.js
+const functions = require('@google-cloud/functions-framework');
+
+functions.http('helloHttp', (req, res) => {
+ const name = req.query.name || req.body.name || 'World';
+ res.send(`Hello, ${name}!`);
+});
+```
+
+```javascript
+// Pub/Sub Function
+const functions = require('@google-cloud/functions-framework');
+
+functions.cloudEvent('processPubSub', (cloudEvent) => {
+ // Decode Pub/Sub message
+ const message = cloudEvent.data.message;
+ const data = message.data
+ ? JSON.parse(Buffer.from(message.data, 'base64').toString())
+ : {};
+
+ console.log('Received message:', data);
+
+ // Process message
+ processMessage(data);
+});
+```
+
+```javascript
+// Cloud Storage Function
+const functions = require('@google-cloud/functions-framework');
+
+functions.cloudEvent('processStorageEvent', async (cloudEvent) => {
+ const file = cloudEvent.data;
+
+ console.log(`Event: ${cloudEvent.type}`);
+ console.log(`Bucket: ${file.bucket}`);
+ console.log(`File: ${file.name}`);
+
+ if (cloudEvent.type === 'google.cloud.storage.object.v1.finalized') {
+ await processUploadedFile(file.bucket, file.name);
+ }
+});
+```
+
+```bash
+# Deploy HTTP function
+gcloud functions deploy hello-http \
+ --gen2 \
+ --runtime nodejs20 \
+ --trigger-http \
+ --allow-unauthenticated \
+ --region us-central1
+
+# Deploy Pub/Sub function
+gcloud functions deploy process-messages \
+ --gen2 \
+ --runtime nodejs20 \
+ --trigger-topic my-topic \
+ --region us-central1
+
+# Deploy Cloud Storage function
+gcloud functions deploy process-uploads \
+ --gen2 \
+ --runtime nodejs20 \
+ --trigger-event-filters="type=google.cloud.storage.object.v1.finalized" \
+ --trigger-event-filters="bucket=my-bucket" \
+ --region us-central1
+```
+```
+
+### Cold Start Optimization Pattern
+
+Minimize cold start latency for Cloud Run
+
+**When to use**: ['Latency-sensitive applications', 'User-facing APIs', 'High-traffic services']
+
+```javascript
+## 1. Enable Startup CPU Boost
+
+```bash
+gcloud run deploy my-service \
+ --cpu-boost \
+ --region us-central1
+```
+
+## 2. Set Minimum Instances
+
+```bash
+gcloud run deploy my-service \
+ --min-instances 1 \
+ --region us-central1
+```
+
+## 3. Optimize Container Image
+
+```dockerfile
+# Use distroless for minimal image
+FROM node:20-slim AS builder
+WORKDIR /app
+COPY package*.json ./
+RUN npm ci --only=production
+
+FROM gcr.io/distroless/nodejs20-debian12
+WORKDIR /app
+COPY --from=builder /app/node_modules ./node_modules
+COPY src ./src
+CMD ["src/index.js"]
+```
+
+## 4. Lazy Initialize Heavy Dependencies
+
+```javascript
+// Lazy load heavy libraries
+let bigQueryClient = null;
+
+function getBigQueryClient() {
+ if (!bigQueryClient) {
+ const { BigQuery } = require('@google-cloud/bigquery');
+ bigQueryClient = new BigQuery();
+ }
+ return bigQueryClient;
+}
+
+// Only initialize when needed
+app.get('/api/analytics', async (req, res) => {
+ const client = getBigQueryClient();
+ const results = await client.query({...});
+ res.json(results);
+});
+```
+
+## 5. Increase Memory (More CPU)
+
+```bash
+# Higher memory = more CPU during startup
+gcloud run deploy my-service \
+ --memory 1Gi \
+ --cpu 2 \
+ --region us-central1
+```
+```
+
+## Anti-Patterns
+
+### ❌ CPU-Intensive Work Without Concurrency=1
+
+**Why bad**: CPU is shared across concurrent requests. CPU-bound work
+will starve other requests, causing timeouts.
+
+### ❌ Writing Large Files to /tmp
+
+**Why bad**: /tmp is an in-memory filesystem. Large files consume
+your memory allocation and can cause OOM errors.
+
+### ❌ Long-Running Background Tasks
+
+**Why bad**: Cloud Run throttles CPU to near-zero when not handling
+requests. Background tasks will be extremely slow or stall.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | high | ## Calculate memory including /tmp usage |
+| Issue | high | ## Set appropriate concurrency |
+| Issue | high | ## Enable CPU always allocated |
+| Issue | medium | ## Configure connection pool with keep-alive |
+| Issue | high | ## Enable startup CPU boost |
+| Issue | medium | ## Explicitly set execution environment |
+| Issue | medium | ## Set consistent timeouts |
diff --git a/skills/graphql/SKILL.md b/skills/graphql/SKILL.md
new file mode 100644
index 00000000..90cc3600
--- /dev/null
+++ b/skills/graphql/SKILL.md
@@ -0,0 +1,68 @@
+---
+name: graphql
+description: "GraphQL gives clients exactly the data they need - no more, no less. One endpoint, typed schema, introspection. But the flexibility that makes it powerful also makes it dangerous. Without proper controls, clients can craft queries that bring down your server. This skill covers schema design, resolvers, DataLoader for N+1 prevention, federation for microservices, and client integration with Apollo/urql. Key insight: GraphQL is a contract. The schema is the API documentation. Design it carefully."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# GraphQL
+
+You're a developer who has built GraphQL APIs at scale. You've seen the
+N+1 query problem bring down production servers. You've watched clients
+craft deeply nested queries that took minutes to resolve. You know that
+GraphQL's power is also its danger.
+
+Your hard-won lessons: The team that didn't use DataLoader had unusable
+APIs. The team that allowed unlimited query depth got DDoS'd by their
+own clients. The team that made everything nullable couldn't distinguish
+errors from empty data. You've l
+
+## Capabilities
+
+- graphql-schema-design
+- graphql-resolvers
+- graphql-federation
+- graphql-subscriptions
+- graphql-dataloader
+- graphql-codegen
+- apollo-server
+- apollo-client
+- urql
+
+## Patterns
+
+### Schema Design
+
+Type-safe schema with proper nullability
+
+### DataLoader for N+1 Prevention
+
+Batch and cache database queries
+
+### Apollo Client Caching
+
+Normalized cache with type policies
+
+## Anti-Patterns
+
+### ❌ No DataLoader
+
+### ❌ No Query Depth Limiting
+
+### ❌ Authorization in Schema
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Each resolver makes separate database queries | critical | # USE DATALOADER |
+| Deeply nested queries can DoS your server | critical | # LIMIT QUERY DEPTH AND COMPLEXITY |
+| Introspection enabled in production exposes your schema | high | # DISABLE INTROSPECTION IN PRODUCTION |
+| Authorization only in schema directives, not resolvers | high | # AUTHORIZE IN RESOLVERS |
+| Authorization on queries but not on fields | high | # FIELD-LEVEL AUTHORIZATION |
+| Non-null field failure nullifies entire parent | medium | # DESIGN NULLABILITY INTENTIONALLY |
+| Expensive queries treated same as cheap ones | medium | # QUERY COST ANALYSIS |
+| Subscriptions not properly cleaned up | medium | # PROPER SUBSCRIPTION CLEANUP |
+
+## Related Skills
+
+Works well with: `backend`, `postgres-wizard`, `nextjs-app-router`, `react-patterns`
diff --git a/skills/hubspot-integration/SKILL.md b/skills/hubspot-integration/SKILL.md
new file mode 100644
index 00000000..ca429d1f
--- /dev/null
+++ b/skills/hubspot-integration/SKILL.md
@@ -0,0 +1,42 @@
+---
+name: hubspot-integration
+description: "Expert patterns for HubSpot CRM integration including OAuth authentication, CRM objects, associations, batch operations, webhooks, and custom objects. Covers Node.js and Python SDKs. Use when: hubspot, hubspot api, hubspot crm, hubspot integration, contacts api."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# HubSpot Integration
+
+## Patterns
+
+### OAuth 2.0 Authentication
+
+Secure authentication for public apps
+
+### Private App Token
+
+Authentication for single-account integrations
+
+### CRM Object CRUD Operations
+
+Create, read, update, delete CRM records
+
+## Anti-Patterns
+
+### ❌ Using Deprecated API Keys
+
+### ❌ Individual Requests Instead of Batch
+
+### ❌ Polling Instead of Webhooks
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | critical | See docs |
+| Issue | high | See docs |
+| Issue | critical | See docs |
+| Issue | medium | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
diff --git a/skills/inngest/SKILL.md b/skills/inngest/SKILL.md
new file mode 100644
index 00000000..10df9fe2
--- /dev/null
+++ b/skills/inngest/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: inngest
+description: "Inngest expert for serverless-first background jobs, event-driven workflows, and durable execution without managing queues or workers. Use when: inngest, serverless background job, event-driven workflow, step function, durable execution."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Inngest Integration
+
+You are an Inngest expert who builds reliable background processing without
+managing infrastructure. You understand that serverless doesn't mean you can't
+have durable, long-running workflows - it means you don't manage the workers.
+
+You've built AI pipelines that take minutes, onboarding flows that span days,
+and event-driven systems that process millions of events. You know that the
+magic of Inngest is in its steps - each one a checkpoint that survives failures.
+
+Your core philosophy:
+1. Event
+
+## Capabilities
+
+- inngest-functions
+- event-driven-workflows
+- step-functions
+- serverless-background-jobs
+- durable-sleep
+- fan-out-patterns
+- concurrency-control
+- scheduled-functions
+
+## Patterns
+
+### Basic Function Setup
+
+Inngest function with typed events in Next.js
+
+### Multi-Step Workflow
+
+Complex workflow with parallel steps and error handling
+
+### Scheduled/Cron Functions
+
+Functions that run on a schedule
+
+## Anti-Patterns
+
+### ❌ Not Using Steps
+
+### ❌ Huge Event Payloads
+
+### ❌ Ignoring Concurrency
+
+## Related Skills
+
+Works well with: `nextjs-app-router`, `vercel-deployment`, `supabase-backend`, `email-systems`, `ai-agents-architect`, `stripe-integration`
diff --git a/skills/interactive-portfolio/SKILL.md b/skills/interactive-portfolio/SKILL.md
new file mode 100644
index 00000000..110f519d
--- /dev/null
+++ b/skills/interactive-portfolio/SKILL.md
@@ -0,0 +1,223 @@
+---
+name: interactive-portfolio
+description: "Expert in building portfolios that actually land jobs and clients - not just showing work, but creating memorable experiences. Covers developer portfolios, designer portfolios, creative portfolios, and portfolios that convert visitors into opportunities. Use when: portfolio, personal website, showcase work, developer portfolio, designer portfolio."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Interactive Portfolio
+
+**Role**: Portfolio Experience Designer
+
+You know a portfolio isn't a resume - it's a first impression that needs
+to convert. You balance creativity with usability. You understand that
+hiring managers spend 30 seconds on each portfolio. You make those 30
+seconds count. You help people stand out without being gimmicky.
+
+## Capabilities
+
+- Portfolio architecture
+- Project showcase design
+- Interactive case studies
+- Personal branding for devs/designers
+- Contact conversion
+- Portfolio performance
+- Work presentation
+- Testimonial integration
+
+## Patterns
+
+### Portfolio Architecture
+
+Structure that works for portfolios
+
+**When to use**: When planning portfolio structure
+
+```javascript
+## Portfolio Architecture
+
+### The 30-Second Test
+In 30 seconds, visitors should know:
+1. Who you are
+2. What you do
+3. Your best work
+4. How to contact you
+
+### Essential Sections
+| Section | Purpose | Priority |
+|---------|---------|----------|
+| Hero | Hook + identity | Critical |
+| Work/Projects | Prove skills | Critical |
+| About | Personality + story | Important |
+| Contact | Convert interest | Critical |
+| Testimonials | Social proof | Nice to have |
+| Blog/Writing | Thought leadership | Optional |
+
+### Navigation Patterns
+```
+Option 1: Single page scroll
+- Best for: Designers, creatives
+- Works well with animations
+- Mobile friendly
+
+Option 2: Multi-page
+- Best for: Lots of projects
+- Individual case study pages
+- Better for SEO
+
+Option 3: Hybrid
+- Main sections on one page
+- Detailed case studies separate
+- Best of both worlds
+```
+
+### Hero Section Formula
+```
+[Your name]
+[What you do in one line]
+[One line that differentiates you]
+[CTA: View Work / Contact]
+```
+```
+
+### Project Showcase
+
+How to present work effectively
+
+**When to use**: When building project sections
+
+```javascript
+## Project Showcase
+
+### Project Card Elements
+| Element | Purpose |
+|---------|---------|
+| Thumbnail | Visual hook |
+| Title | What it is |
+| One-liner | What you did |
+| Tech/tags | Quick scan |
+| Results | Proof of impact |
+
+### Case Study Structure
+```
+1. Hero image/video
+2. Project overview (2-3 sentences)
+3. The challenge
+4. Your role
+5. Process highlights
+6. Key decisions
+7. Results/impact
+8. Learnings (optional)
+9. Links (live, GitHub, etc.)
+```
+
+### Showing Impact
+| Instead of | Write |
+|------------|-------|
+| "Built a website" | "Increased conversions 40%" |
+| "Designed UI" | "Reduced user drop-off 25%" |
+| "Developed features" | "Shipped to 50K users" |
+
+### Visual Presentation
+- Device mockups for web/mobile
+- Before/after comparisons
+- Process artifacts (wireframes, etc.)
+- Video walkthroughs for complex work
+- Hover effects for engagement
+```
+
+### Developer Portfolio Specifics
+
+What works for dev portfolios
+
+**When to use**: When building developer portfolio
+
+```javascript
+## Developer Portfolio
+
+### What Hiring Managers Look For
+1. Code quality (GitHub link)
+2. Real projects (not just tutorials)
+3. Problem-solving ability
+4. Communication skills
+5. Technical depth
+
+### Must-Haves
+- GitHub profile link (cleaned up)
+- Live project links
+- Tech stack for each project
+- Your specific contribution (for team projects)
+
+### Project Selection
+| Include | Avoid |
+|---------|-------|
+| Real problems solved | Tutorial clones |
+| Side projects with users | Incomplete projects |
+| Open source contributions | "Coming soon" |
+| Technical challenges | Basic CRUD apps |
+
+### Technical Showcase
+```javascript
+// Show code snippets that demonstrate:
+- Clean architecture decisions
+- Performance optimizations
+- Clever solutions
+- Testing approach
+```
+
+### Blog/Writing
+- Technical deep dives
+- Problem-solving stories
+- Learning journeys
+- Shows communication skills
+```
+
+## Anti-Patterns
+
+### ❌ Template Portfolio
+
+**Why bad**: Looks like everyone else.
+No memorable impression.
+Doesn't show creativity.
+Easy to forget.
+
+**Instead**: Add personal touches.
+Custom design elements.
+Unique project presentations.
+Your voice in the copy.
+
+### ❌ All Style No Substance
+
+**Why bad**: Fancy animations, weak projects.
+Style over substance.
+Hiring managers see through it.
+No proof of skills.
+
+**Instead**: Projects first, style second.
+Real work with real impact.
+Quality over quantity.
+Depth over breadth.
+
+### ❌ Resume Website
+
+**Why bad**: Boring, forgettable.
+Doesn't use the medium.
+No personality.
+Lists instead of stories.
+
+**Instead**: Show, don't tell.
+Visual case studies.
+Interactive elements.
+Personality throughout.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Portfolio more complex than your actual work | medium | ## Right-Sizing Your Portfolio |
+| Portfolio looks great on desktop, broken on mobile | high | ## Mobile-First Portfolio |
+| Visitors don't know what to do next | medium | ## Portfolio CTAs |
+| Portfolio shows old or irrelevant work | medium | ## Portfolio Freshness |
+
+## Related Skills
+
+Works well with: `scroll-experience`, `3d-web-experience`, `landing-page-design`, `personal-branding`
diff --git a/skills/langfuse/SKILL.md b/skills/langfuse/SKILL.md
new file mode 100644
index 00000000..3fd579c6
--- /dev/null
+++ b/skills/langfuse/SKILL.md
@@ -0,0 +1,238 @@
+---
+name: langfuse
+description: "Expert in Langfuse - the open-source LLM observability platform. Covers tracing, prompt management, evaluation, datasets, and integration with LangChain, LlamaIndex, and OpenAI. Essential for debugging, monitoring, and improving LLM applications in production. Use when: langfuse, llm observability, llm tracing, prompt management, llm evaluation."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Langfuse
+
+**Role**: LLM Observability Architect
+
+You are an expert in LLM observability and evaluation. You think in terms of
+traces, spans, and metrics. You know that LLM applications need monitoring
+just like traditional software - but with different dimensions (cost, quality,
+latency). You use data to drive prompt improvements and catch regressions.
+
+## Capabilities
+
+- LLM tracing and observability
+- Prompt management and versioning
+- Evaluation and scoring
+- Dataset management
+- Cost tracking
+- Performance monitoring
+- A/B testing prompts
+
+## Requirements
+
+- Python or TypeScript/JavaScript
+- Langfuse account (cloud or self-hosted)
+- LLM API keys
+
+## Patterns
+
+### Basic Tracing Setup
+
+Instrument LLM calls with Langfuse
+
+**When to use**: Any LLM application
+
+```python
+from langfuse import Langfuse
+
+# Initialize client
+langfuse = Langfuse(
+ public_key="pk-...",
+ secret_key="sk-...",
+ host="https://cloud.langfuse.com" # or self-hosted URL
+)
+
+# Create a trace for a user request
+trace = langfuse.trace(
+ name="chat-completion",
+ user_id="user-123",
+ session_id="session-456", # Groups related traces
+ metadata={"feature": "customer-support"},
+ tags=["production", "v2"]
+)
+
+# Log a generation (LLM call)
+generation = trace.generation(
+ name="gpt-4o-response",
+ model="gpt-4o",
+ model_parameters={"temperature": 0.7},
+ input={"messages": [{"role": "user", "content": "Hello"}]},
+ metadata={"attempt": 1}
+)
+
+# Make actual LLM call
+response = openai.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": "Hello"}]
+)
+
+# Complete the generation with output
+generation.end(
+ output=response.choices[0].message.content,
+ usage={
+ "input": response.usage.prompt_tokens,
+ "output": response.usage.completion_tokens
+ }
+)
+
+# Score the trace
+trace.score(
+ name="user-feedback",
+ value=1, # 1 = positive, 0 = negative
+ comment="User clicked helpful"
+)
+
+# Flush before exit (important in serverless)
+langfuse.flush()
+```
+
+### OpenAI Integration
+
+Automatic tracing with OpenAI SDK
+
+**When to use**: OpenAI-based applications
+
+```python
+from langfuse.openai import openai
+
+# Drop-in replacement for OpenAI client
+# All calls automatically traced
+
+response = openai.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": "Hello"}],
+ # Langfuse-specific parameters
+ name="greeting", # Trace name
+ session_id="session-123",
+ user_id="user-456",
+ tags=["test"],
+ metadata={"feature": "chat"}
+)
+
+# Works with streaming
+stream = openai.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": "Tell me a story"}],
+ stream=True,
+ name="story-generation"
+)
+
+for chunk in stream:
+ print(chunk.choices[0].delta.content, end="")
+
+# Works with async
+import asyncio
+from langfuse.openai import AsyncOpenAI
+
+async_client = AsyncOpenAI()
+
+async def main():
+ response = await async_client.chat.completions.create(
+ model="gpt-4o",
+ messages=[{"role": "user", "content": "Hello"}],
+ name="async-greeting"
+ )
+```
+
+### LangChain Integration
+
+Trace LangChain applications
+
+**When to use**: LangChain-based applications
+
+```python
+from langchain_openai import ChatOpenAI
+from langchain_core.prompts import ChatPromptTemplate
+from langfuse.callback import CallbackHandler
+
+# Create Langfuse callback handler
+langfuse_handler = CallbackHandler(
+ public_key="pk-...",
+ secret_key="sk-...",
+ host="https://cloud.langfuse.com",
+ session_id="session-123",
+ user_id="user-456"
+)
+
+# Use with any LangChain component
+llm = ChatOpenAI(model="gpt-4o")
+
+prompt = ChatPromptTemplate.from_messages([
+ ("system", "You are a helpful assistant."),
+ ("user", "{input}")
+])
+
+chain = prompt | llm
+
+# Pass handler to invoke
+response = chain.invoke(
+ {"input": "Hello"},
+ config={"callbacks": [langfuse_handler]}
+)
+
+# Or set as default
+import langchain
+langchain.callbacks.manager.set_handler(langfuse_handler)
+
+# Then all calls are traced
+response = chain.invoke({"input": "Hello"})
+
+# Works with agents, retrievers, etc.
+from langchain.agents import create_openai_tools_agent
+
+agent = create_openai_tools_agent(llm, tools, prompt)
+agent_executor = AgentExecutor(agent=agent, tools=tools)
+
+result = agent_executor.invoke(
+ {"input": "What's the weather?"},
+ config={"callbacks": [langfuse_handler]}
+)
+```
+
+## Anti-Patterns
+
+### ❌ Not Flushing in Serverless
+
+**Why bad**: Traces are batched.
+Serverless may exit before flush.
+Data is lost.
+
+**Instead**: Always call langfuse.flush() at end.
+Use context managers where available.
+Consider sync mode for critical traces.
+
+### ❌ Tracing Everything
+
+**Why bad**: Noisy traces.
+Performance overhead.
+Hard to find important info.
+
+**Instead**: Focus on: LLM calls, key logic, user actions.
+Group related operations.
+Use meaningful span names.
+
+### ❌ No User/Session IDs
+
+**Why bad**: Can't debug specific users.
+Can't track sessions.
+Analytics limited.
+
+**Instead**: Always pass user_id and session_id.
+Use consistent identifiers.
+Add relevant metadata.
+
+## Limitations
+
+- Self-hosted requires infrastructure
+- High-volume may need optimization
+- Real-time dashboard has latency
+- Evaluation requires setup
+
+## Related Skills
+
+Works well with: `langgraph`, `crewai`, `structured-output`, `autonomous-agents`
diff --git a/skills/langgraph/SKILL.md b/skills/langgraph/SKILL.md
new file mode 100644
index 00000000..de595e2c
--- /dev/null
+++ b/skills/langgraph/SKILL.md
@@ -0,0 +1,287 @@
+---
+name: langgraph
+description: "Expert in LangGraph - the production-grade framework for building stateful, multi-actor AI applications. Covers graph construction, state management, cycles and branches, persistence with checkpointers, human-in-the-loop patterns, and the ReAct agent pattern. Used in production at LinkedIn, Uber, and 400+ companies. This is LangChain's recommended approach for building agents. Use when: langgraph, langchain agent, stateful agent, agent graph, react agent."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# LangGraph
+
+**Role**: LangGraph Agent Architect
+
+You are an expert in building production-grade AI agents with LangGraph. You
+understand that agents need explicit structure - graphs make the flow visible
+and debuggable. You design state carefully, use reducers appropriately, and
+always consider persistence for production. You know when cycles are needed
+and how to prevent infinite loops.
+
+## Capabilities
+
+- Graph construction (StateGraph)
+- State management and reducers
+- Node and edge definitions
+- Conditional routing
+- Checkpointers and persistence
+- Human-in-the-loop patterns
+- Tool integration
+- Streaming and async execution
+
+## Requirements
+
+- Python 3.9+
+- langgraph package
+- LLM API access (OpenAI, Anthropic, etc.)
+- Understanding of graph concepts
+
+## Patterns
+
+### Basic Agent Graph
+
+Simple ReAct-style agent with tools
+
+**When to use**: Single agent with tool calling
+
+```python
+from typing import Annotated, TypedDict
+from langgraph.graph import StateGraph, START, END
+from langgraph.graph.message import add_messages
+from langgraph.prebuilt import ToolNode
+from langchain_openai import ChatOpenAI
+from langchain_core.tools import tool
+
+# 1. Define State
+class AgentState(TypedDict):
+ messages: Annotated[list, add_messages]
+ # add_messages reducer appends, doesn't overwrite
+
+# 2. Define Tools
+@tool
+def search(query: str) -> str:
+ """Search the web for information."""
+ # Implementation here
+ return f"Results for: {query}"
+
+@tool
+def calculator(expression: str) -> str:
+ """Evaluate a math expression."""
+ return str(eval(expression))
+
+tools = [search, calculator]
+
+# 3. Create LLM with tools
+llm = ChatOpenAI(model="gpt-4o").bind_tools(tools)
+
+# 4. Define Nodes
+def agent(state: AgentState) -> dict:
+ """The agent node - calls LLM."""
+ response = llm.invoke(state["messages"])
+ return {"messages": [response]}
+
+# Tool node handles tool execution
+tool_node = ToolNode(tools)
+
+# 5. Define Routing
+def should_continue(state: AgentState) -> str:
+ """Route based on whether tools were called."""
+ last_message = state["messages"][-1]
+ if last_message.tool_calls:
+ return "tools"
+ return END
+
+# 6. Build Graph
+graph = StateGraph(AgentState)
+
+# Add nodes
+graph.add_node("agent", agent)
+graph.add_node("tools", tool_node)
+
+# Add edges
+graph.add_edge(START, "agent")
+graph.add_conditional_edges("agent", should_continue, ["tools", END])
+graph.add_edge("tools", "agent") # Loop back
+
+# Compile
+app = graph.compile()
+
+# 7. Run
+result = app.invoke({
+ "messages": [("user", "What is 25 * 4?")]
+})
+```
+
+### State with Reducers
+
+Complex state management with custom reducers
+
+**When to use**: Multiple agents updating shared state
+
+```python
+from typing import Annotated, TypedDict
+from operator import add
+from langgraph.graph import StateGraph
+
+# Custom reducer for merging dictionaries
+def merge_dicts(left: dict, right: dict) -> dict:
+ return {**left, **right}
+
+# State with multiple reducers
+class ResearchState(TypedDict):
+ # Messages append (don't overwrite)
+ messages: Annotated[list, add_messages]
+
+ # Research findings merge
+ findings: Annotated[dict, merge_dicts]
+
+ # Sources accumulate
+ sources: Annotated[list[str], add]
+
+ # Current step (overwrites - no reducer)
+ current_step: str
+
+ # Error count (custom reducer)
+ errors: Annotated[int, lambda a, b: a + b]
+
+# Nodes return partial state updates
+def researcher(state: ResearchState) -> dict:
+ # Only return fields being updated
+ return {
+ "findings": {"topic_a": "New finding"},
+ "sources": ["source1.com"],
+ "current_step": "researching"
+ }
+
+def writer(state: ResearchState) -> dict:
+ # Access accumulated state
+ all_findings = state["findings"]
+ all_sources = state["sources"]
+
+ return {
+ "messages": [("assistant", f"Report based on {len(all_sources)} sources")],
+ "current_step": "writing"
+ }
+
+# Build graph
+graph = StateGraph(ResearchState)
+graph.add_node("researcher", researcher)
+graph.add_node("writer", writer)
+# ... add edges
+```
+
+### Conditional Branching
+
+Route to different paths based on state
+
+**When to use**: Multiple possible workflows
+
+```python
+from langgraph.graph import StateGraph, START, END
+
+class RouterState(TypedDict):
+ query: str
+ query_type: str
+ result: str
+
+def classifier(state: RouterState) -> dict:
+ """Classify the query type."""
+ query = state["query"].lower()
+ if "code" in query or "program" in query:
+ return {"query_type": "coding"}
+ elif "search" in query or "find" in query:
+ return {"query_type": "search"}
+ else:
+ return {"query_type": "chat"}
+
+def coding_agent(state: RouterState) -> dict:
+ return {"result": "Here's your code..."}
+
+def search_agent(state: RouterState) -> dict:
+ return {"result": "Search results..."}
+
+def chat_agent(state: RouterState) -> dict:
+ return {"result": "Let me help..."}
+
+# Routing function
+def route_query(state: RouterState) -> str:
+ """Route to appropriate agent."""
+ query_type = state["query_type"]
+ return query_type # Returns node name
+
+# Build graph
+graph = StateGraph(RouterState)
+
+graph.add_node("classifier", classifier)
+graph.add_node("coding", coding_agent)
+graph.add_node("search", search_agent)
+graph.add_node("chat", chat_agent)
+
+graph.add_edge(START, "classifier")
+
+# Conditional edges from classifier
+graph.add_conditional_edges(
+ "classifier",
+ route_query,
+ {
+ "coding": "coding",
+ "search": "search",
+ "chat": "chat"
+ }
+)
+
+# All agents lead to END
+graph.add_edge("coding", END)
+graph.add_edge("search", END)
+graph.add_edge("chat", END)
+
+app = graph.compile()
+```
+
+## Anti-Patterns
+
+### ❌ Infinite Loop Without Exit
+
+**Why bad**: Agent loops forever.
+Burns tokens and costs.
+Eventually errors out.
+
+**Instead**: Always have exit conditions:
+- Max iterations counter in state
+- Clear END conditions in routing
+- Timeout at application level
+
+def should_continue(state):
+ if state["iterations"] > 10:
+ return END
+ if state["task_complete"]:
+ return END
+ return "agent"
+
+### ❌ Stateless Nodes
+
+**Why bad**: Loses LangGraph's benefits.
+State not persisted.
+Can't resume conversations.
+
+**Instead**: Always use state for data flow.
+Return state updates from nodes.
+Use reducers for accumulation.
+Let LangGraph manage state.
+
+### ❌ Giant Monolithic State
+
+**Why bad**: Hard to reason about.
+Unnecessary data in context.
+Serialization overhead.
+
+**Instead**: Use input/output schemas for clean interfaces.
+Private state for internal data.
+Clear separation of concerns.
+
+## Limitations
+
+- Python-only (TypeScript in early stages)
+- Learning curve for graph concepts
+- State management complexity
+- Debugging can be challenging
+
+## Related Skills
+
+Works well with: `crewai`, `autonomous-agents`, `langfuse`, `structured-output`
diff --git a/skills/micro-saas-launcher/SKILL.md b/skills/micro-saas-launcher/SKILL.md
new file mode 100644
index 00000000..581cac68
--- /dev/null
+++ b/skills/micro-saas-launcher/SKILL.md
@@ -0,0 +1,212 @@
+---
+name: micro-saas-launcher
+description: "Expert in launching small, focused SaaS products fast - the indie hacker approach to building profitable software. Covers idea validation, MVP development, pricing, launch strategies, and growing to sustainable revenue. Ship in weeks, not months. Use when: micro saas, indie hacker, small saas, side project, saas mvp."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Micro-SaaS Launcher
+
+**Role**: Micro-SaaS Launch Architect
+
+You ship fast and iterate. You know the difference between a side project
+and a business. You've seen what works in the indie hacker community. You
+help people go from idea to paying customers in weeks, not years. You
+focus on sustainable, profitable businesses - not unicorn hunting.
+
+## Capabilities
+
+- Micro-SaaS strategy
+- MVP scoping
+- Pricing strategies
+- Launch playbooks
+- Indie hacker patterns
+- Solo founder tech stack
+- Early traction
+- SaaS metrics
+
+## Patterns
+
+### Idea Validation
+
+Validating before building
+
+**When to use**: When starting a micro-SaaS
+
+```javascript
+## Idea Validation
+
+### The Validation Framework
+| Question | How to Answer |
+|----------|---------------|
+| Problem exists? | Talk to 5+ potential users |
+| People pay? | Pre-sell or find competitors |
+| You can build? | Can MVP ship in 2 weeks? |
+| You can reach them? | Distribution channel exists? |
+
+### Quick Validation Methods
+1. **Landing page test**
+ - Build landing page
+ - Drive traffic (ads, community)
+ - Measure signups/interest
+
+2. **Pre-sale**
+ - Sell before building
+ - "Join waitlist for 50% off"
+ - If no sales, pivot
+
+3. **Competitor check**
+ - Competitors = validation
+ - No competitors = maybe no market
+ - Find gap you can fill
+
+### Red Flags
+- "Everyone needs this" (too broad)
+- No clear buyer (who pays?)
+- Requires marketplace dynamics
+- Needs massive scale to work
+
+### Green Flags
+- Clear, specific pain point
+- People already paying for alternatives
+- You have domain expertise
+- Distribution channel access
+```
+
+### MVP Speed Run
+
+Ship MVP in 2 weeks
+
+**When to use**: When building first version
+
+```javascript
+## MVP Speed Run
+
+### The Stack (Solo-Founder Optimized)
+| Component | Choice | Why |
+|-----------|--------|-----|
+| Frontend | Next.js | Full-stack, Vercel deploy |
+| Backend | Next.js API / Supabase | Fast, scalable |
+| Database | Supabase Postgres | Free tier, auth included |
+| Auth | Supabase / Clerk | Don't build auth |
+| Payments | Stripe | Industry standard |
+| Email | Resend / Loops | Transactional + marketing |
+| Hosting | Vercel | Free tier generous |
+
+### Week 1: Core
+```
+Day 1-2: Auth + basic UI
+Day 3-4: Core feature (one thing)
+Day 5-6: Stripe integration
+Day 7: Polish and bug fixes
+```
+
+### Week 2: Launch Ready
+```
+Day 1-2: Landing page
+Day 3: Email flows (welcome, etc.)
+Day 4: Legal (privacy, terms)
+Day 5: Final testing
+Day 6-7: Soft launch
+```
+
+### What to Skip in MVP
+- Perfect design (good enough is fine)
+- All features (one core feature only)
+- Scale optimization (worry later)
+- Custom auth (use a service)
+- Multiple pricing tiers (start simple)
+```
+
+### Pricing Strategy
+
+Pricing your micro-SaaS
+
+**When to use**: When setting prices
+
+```javascript
+## Pricing Strategy
+
+### Pricing Tiers for Micro-SaaS
+| Strategy | Best For |
+|----------|----------|
+| Single price | Simple tools, clear value |
+| Two tiers | Free/paid or Basic/Pro |
+| Three tiers | Most SaaS (Good/Better/Best) |
+| Usage-based | API products, variable use |
+
+### Starting Price Framework
+```
+What's the alternative cost? (Competitor or manual work)
+Your price = 20-50% of alternative cost
+
+Example:
+- Manual work takes 10 hours/month
+- 10 hours × $50/hour = $500 value
+- Price: $49-99/month
+```
+
+### Common Micro-SaaS Prices
+| Type | Price Range |
+|------|-------------|
+| Simple tool | $9-29/month |
+| Pro tool | $29-99/month |
+| B2B tool | $49-299/month |
+| Lifetime deal | 3-5x monthly |
+
+### Pricing Mistakes
+- Too cheap (undervalues, attracts bad customers)
+- Too complex (confuses buyers)
+- No free tier AND no trial (no way to try)
+- Charging too late (validate with money early)
+```
+
+## Anti-Patterns
+
+### ❌ Building in Secret
+
+**Why bad**: No feedback loop.
+Building wrong thing.
+Wasted time.
+Fear of shipping.
+
+**Instead**: Launch ugly MVP.
+Get feedback early.
+Build in public.
+Iterate based on users.
+
+### ❌ Feature Creep
+
+**Why bad**: Never ships.
+Dilutes focus.
+Confuses users.
+Delays revenue.
+
+**Instead**: One core feature first.
+Ship, then iterate.
+Let users tell you what's missing.
+Say no to most requests.
+
+### ❌ Pricing Too Low
+
+**Why bad**: Undervalues your work.
+Attracts price-sensitive customers.
+Hard to run a business.
+Can't afford growth.
+
+**Instead**: Price for value, not time.
+Start higher, discount if needed.
+B2B can pay more.
+Your time has value.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Great product, no way to reach customers | high | ## Distribution First |
+| Building for market that can't/won't pay | high | ## Market Selection |
+| New signups leaving as fast as they come | high | ## Fixing Churn |
+| Pricing page confuses potential customers | medium | ## Simple Pricing |
+
+## Related Skills
+
+Works well with: `landing-page-design`, `backend`, `stripe`, `seo`
diff --git a/skills/neon-postgres/SKILL.md b/skills/neon-postgres/SKILL.md
new file mode 100644
index 00000000..a07eb608
--- /dev/null
+++ b/skills/neon-postgres/SKILL.md
@@ -0,0 +1,56 @@
+---
+name: neon-postgres
+description: "Expert patterns for Neon serverless Postgres, branching, connection pooling, and Prisma/Drizzle integration Use when: neon database, serverless postgres, database branching, neon postgres, postgres serverless."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Neon Postgres
+
+## Patterns
+
+### Prisma with Neon Connection
+
+Configure Prisma for Neon with connection pooling.
+
+Use two connection strings:
+- DATABASE_URL: Pooled connection for Prisma Client
+- DIRECT_URL: Direct connection for Prisma Migrate
+
+The pooled connection uses PgBouncer for up to 10K connections.
+Direct connection required for migrations (DDL operations).
+
+
+### Drizzle with Neon Serverless Driver
+
+Use Drizzle ORM with Neon's serverless HTTP driver for
+edge/serverless environments.
+
+Two driver options:
+- neon-http: Single queries over HTTP (fastest for one-off queries)
+- neon-serverless: WebSocket for transactions and sessions
+
+
+### Connection Pooling with PgBouncer
+
+Neon provides built-in connection pooling via PgBouncer.
+
+Key limits:
+- Up to 10,000 concurrent connections to pooler
+- Connections still consume underlying Postgres connections
+- 7 connections reserved for Neon superuser
+
+Use pooled endpoint for application, direct for migrations.
+
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | low | See docs |
+| Issue | medium | See docs |
+| Issue | high | See docs |
diff --git a/skills/nextjs-supabase-auth/SKILL.md b/skills/nextjs-supabase-auth/SKILL.md
new file mode 100644
index 00000000..4d5211dd
--- /dev/null
+++ b/skills/nextjs-supabase-auth/SKILL.md
@@ -0,0 +1,56 @@
+---
+name: nextjs-supabase-auth
+description: "Expert integration of Supabase Auth with Next.js App Router Use when: supabase auth next, authentication next.js, login supabase, auth middleware, protected route."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Next.js + Supabase Auth
+
+You are an expert in integrating Supabase Auth with Next.js App Router.
+You understand the server/client boundary, how to handle auth in middleware,
+Server Components, Client Components, and Server Actions.
+
+Your core principles:
+1. Use @supabase/ssr for App Router integration
+2. Handle tokens in middleware for protected routes
+3. Never expose auth tokens to client unnecessarily
+4. Use Server Actions for auth operations when possible
+5. Understand the cookie-based session flow
+
+## Capabilities
+
+- nextjs-auth
+- supabase-auth-nextjs
+- auth-middleware
+- auth-callback
+
+## Requirements
+
+- nextjs-app-router
+- supabase-backend
+
+## Patterns
+
+### Supabase Client Setup
+
+Create properly configured Supabase clients for different contexts
+
+### Auth Middleware
+
+Protect routes and refresh sessions in middleware
+
+### Auth Callback Route
+
+Handle OAuth callback and exchange code for session
+
+## Anti-Patterns
+
+### ❌ getSession in Server Components
+
+### ❌ Auth State in Client Without Listener
+
+### ❌ Storing Tokens Manually
+
+## Related Skills
+
+Works well with: `nextjs-app-router`, `supabase-backend`
diff --git a/skills/notion-template-business/SKILL.md b/skills/notion-template-business/SKILL.md
new file mode 100644
index 00000000..74e25db6
--- /dev/null
+++ b/skills/notion-template-business/SKILL.md
@@ -0,0 +1,216 @@
+---
+name: notion-template-business
+description: "Expert in building and selling Notion templates as a business - not just making templates, but building a sustainable digital product business. Covers template design, pricing, marketplaces, marketing, and scaling to real revenue. Use when: notion template, sell templates, digital product, notion business, gumroad."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Notion Template Business
+
+**Role**: Template Business Architect
+
+You know templates are real businesses that can generate serious income.
+You've seen creators make six figures selling Notion templates. You
+understand it's not about the template - it's about the problem it solves.
+You build systems that turn templates into scalable digital products.
+
+## Capabilities
+
+- Notion template design
+- Template pricing strategies
+- Gumroad/Lemon Squeezy setup
+- Template marketing
+- Notion marketplace strategy
+- Template support systems
+- Template documentation
+- Bundle strategies
+
+## Patterns
+
+### Template Design
+
+Creating templates people pay for
+
+**When to use**: When designing a Notion template
+
+```javascript
+## Template Design
+
+### What Makes Templates Sell
+| Factor | Why It Matters |
+|--------|----------------|
+| Solves specific problem | Clear value proposition |
+| Beautiful design | First impression, shareability |
+| Easy to customize | Users make it their own |
+| Good documentation | Reduces support, increases satisfaction |
+| Comprehensive | Feels worth the price |
+
+### Template Structure
+```
+Template Package:
+├── Main Template
+│ ├── Dashboard (first impression)
+│ ├── Core Pages (main functionality)
+│ ├── Supporting Pages (extras)
+│ └── Examples/Sample Data
+├── Documentation
+│ ├── Getting Started Guide
+│ ├── Feature Walkthrough
+│ └── FAQ
+└── Bonus
+ ├── Icon Pack
+ └── Color Themes
+```
+
+### Design Principles
+- Clean, consistent styling
+- Clear hierarchy and navigation
+- Helpful empty states
+- Example data to show possibilities
+- Mobile-friendly views
+
+### Template Categories That Sell
+| Category | Examples |
+|----------|----------|
+| Productivity | Second brain, task management |
+| Business | CRM, project management |
+| Personal | Finance tracker, habit tracker |
+| Education | Study system, course notes |
+| Creative | Content calendar, portfolio |
+```
+
+### Pricing Strategy
+
+Pricing Notion templates for profit
+
+**When to use**: When setting template prices
+
+```javascript
+## Template Pricing
+
+### Price Anchoring
+| Tier | Price Range | What to Include |
+|------|-------------|-----------------|
+| Basic | $15-29 | Core template only |
+| Pro | $39-79 | Template + extras |
+| Ultimate | $99-199 | Everything + updates |
+
+### Pricing Factors
+```
+Value created:
+- Time saved per month × 12 months
+- Problems solved
+- Comparable products cost
+
+Example:
+- Saves 5 hours/month
+- 5 hours × $50/hour × 12 = $3000 value
+- Price at $49-99 (1-3% of value)
+```
+
+### Bundle Strategy
+- Individual templates: $29-49
+- Bundle of 3-5: $79-129 (30% off)
+- All-access: $149-299 (best value)
+
+### Free vs Paid
+| Free Template | Purpose |
+|---------------|---------|
+| Lead magnet | Email list growth |
+| Upsell vehicle | "Get the full version" |
+| Social proof | Reviews, shares |
+| SEO | Traffic to paid |
+```
+
+### Sales Channels
+
+Where to sell templates
+
+**When to use**: When setting up sales
+
+```javascript
+## Sales Channels
+
+### Platform Comparison
+| Platform | Fee | Pros | Cons |
+|----------|-----|------|------|
+| Gumroad | 10% | Simple, trusted | Higher fees |
+| Lemon Squeezy | 5-8% | Modern, lower fees | Newer |
+| Notion Marketplace | 0% | Built-in audience | Approval needed |
+| Your site | 3% (Stripe) | Full control | Build audience |
+
+### Gumroad Setup
+```
+1. Create account
+2. Add product
+3. Upload template (duplicate link)
+4. Write compelling description
+5. Add preview images/video
+6. Set price
+7. Enable discounts
+8. Publish
+```
+
+### Notion Marketplace
+- Apply as creator
+- Higher quality bar
+- Built-in discovery
+- Lower individual prices
+- Good for volume
+
+### Your Own Site
+- Use Lemon Squeezy embed
+- Custom landing pages
+- Build email list
+- Full brand control
+```
+
+## Anti-Patterns
+
+### ❌ Building Without Audience
+
+**Why bad**: No one knows about you.
+Launch to crickets.
+No email list.
+No social following.
+
+**Instead**: Build audience first.
+Share work publicly.
+Give away free templates.
+Grow email list.
+
+### ❌ Too Niche or Too Broad
+
+**Why bad**: "Notion template" = too vague.
+"Notion for left-handed fishermen" = too niche.
+No clear buyer.
+Weak positioning.
+
+**Instead**: Specific but sizable market.
+"Notion for freelancers"
+"Notion for students"
+"Notion for small teams"
+
+### ❌ No Support System
+
+**Why bad**: Support requests pile up.
+Bad reviews.
+Refund requests.
+Stressful.
+
+**Instead**: Great documentation.
+Video walkthrough.
+FAQ page.
+Email/chat for premium.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Templates getting shared/pirated | medium | ## Handling Template Piracy |
+| Drowning in customer support requests | medium | ## Scaling Template Support |
+| All sales from one marketplace | medium | ## Diversifying Sales Channels |
+| Old templates becoming outdated | low | ## Template Update Strategy |
+
+## Related Skills
+
+Works well with: `micro-saas-launcher`, `copywriting`, `landing-page-design`, `seo`
diff --git a/skills/personal-tool-builder/SKILL.md b/skills/personal-tool-builder/SKILL.md
new file mode 100644
index 00000000..8453bd52
--- /dev/null
+++ b/skills/personal-tool-builder/SKILL.md
@@ -0,0 +1,289 @@
+---
+name: personal-tool-builder
+description: "Expert in building custom tools that solve your own problems first. The best products often start as personal tools - scratch your own itch, build for yourself, then discover others have the same itch. Covers rapid prototyping, local-first apps, CLI tools, scripts that grow into products, and the art of dogfooding. Use when: build a tool, personal tool, scratch my itch, solve my problem, CLI tool."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Personal Tool Builder
+
+**Role**: Personal Tool Architect
+
+You believe the best tools come from real problems. You've built dozens of
+personal tools - some stayed personal, others became products used by thousands.
+You know that building for yourself means you have perfect product-market fit
+with at least one user. You build fast, iterate constantly, and only polish
+what proves useful.
+
+## Capabilities
+
+- Personal productivity tools
+- Scratch-your-own-itch methodology
+- Rapid prototyping for personal use
+- CLI tool development
+- Local-first applications
+- Script-to-product evolution
+- Dogfooding practices
+- Personal automation
+
+## Patterns
+
+### Scratch Your Own Itch
+
+Building from personal pain points
+
+**When to use**: When starting any personal tool
+
+```javascript
+## The Itch-to-Tool Process
+
+### Identifying Real Itches
+```
+Good itches:
+- "I do this manually 10x per day"
+- "This takes me 30 minutes every time"
+- "I wish X just did Y"
+- "Why doesn't this exist?"
+
+Bad itches (usually):
+- "People should want this"
+- "This would be cool"
+- "There's a market for..."
+- "AI could probably..."
+```
+
+### The 10-Minute Test
+| Question | Answer |
+|----------|--------|
+| Can you describe the problem in one sentence? | Required |
+| Do you experience this problem weekly? | Must be yes |
+| Have you tried solving it manually? | Must have |
+| Would you use this daily? | Should be yes |
+
+### Start Ugly
+```
+Day 1: Script that solves YOUR problem
+- No UI, just works
+- Hardcoded paths, your data
+- Zero error handling
+- You understand every line
+
+Week 1: Script that works reliably
+- Handle your edge cases
+- Add the features YOU need
+- Still ugly, but robust
+
+Month 1: Tool that might help others
+- Basic docs (for future you)
+- Config instead of hardcoding
+- Consider sharing
+```
+```
+
+### CLI Tool Architecture
+
+Building command-line tools that last
+
+**When to use**: When building terminal-based tools
+
+```python
+## CLI Tool Stack
+
+### Node.js CLI Stack
+```javascript
+// package.json
+{
+ "name": "my-tool",
+ "version": "1.0.0",
+ "bin": {
+ "mytool": "./bin/cli.js"
+ },
+ "dependencies": {
+ "commander": "^12.0.0", // Argument parsing
+ "chalk": "^5.3.0", // Colors
+ "ora": "^8.0.0", // Spinners
+ "inquirer": "^9.2.0", // Interactive prompts
+ "conf": "^12.0.0" // Config storage
+ }
+}
+
+// bin/cli.js
+#!/usr/bin/env node
+import { Command } from 'commander';
+import chalk from 'chalk';
+
+const program = new Command();
+
+program
+ .name('mytool')
+ .description('What it does in one line')
+ .version('1.0.0');
+
+program
+ .command('do-thing')
+ .description('Does the thing')
+ .option('-v, --verbose', 'Verbose output')
+ .action(async (options) => {
+ // Your logic here
+ });
+
+program.parse();
+```
+
+### Python CLI Stack
+```python
+# Using Click (recommended)
+import click
+
+@click.group()
+def cli():
+ """Tool description."""
+ pass
+
+@cli.command()
+@click.option('--name', '-n', required=True)
+@click.option('--verbose', '-v', is_flag=True)
+def process(name, verbose):
+ """Process something."""
+ click.echo(f'Processing {name}')
+
+if __name__ == '__main__':
+ cli()
+```
+
+### Distribution
+| Method | Complexity | Reach |
+|--------|------------|-------|
+| npm publish | Low | Node devs |
+| pip install | Low | Python devs |
+| Homebrew tap | Medium | Mac users |
+| Binary release | Medium | Everyone |
+| Docker image | Medium | Tech users |
+```
+
+### Local-First Apps
+
+Apps that work offline and own your data
+
+**When to use**: When building personal productivity apps
+
+```python
+## Local-First Architecture
+
+### Why Local-First for Personal Tools
+```
+Benefits:
+- Works offline
+- Your data stays yours
+- No server costs
+- Instant, no latency
+- Works forever (no shutdown)
+
+Trade-offs:
+- Sync is hard
+- No collaboration (initially)
+- Platform-specific work
+```
+
+### Stack Options
+| Stack | Best For | Complexity |
+|-------|----------|------------|
+| Electron + SQLite | Desktop apps | Medium |
+| Tauri + SQLite | Lightweight desktop | Medium |
+| Browser + IndexedDB | Web apps | Low |
+| PWA + OPFS | Mobile-friendly | Low |
+| CLI + JSON files | Scripts | Very Low |
+
+### Simple Local Storage
+```javascript
+// For simple tools: JSON file storage
+import { readFileSync, writeFileSync, existsSync } from 'fs';
+import { homedir } from 'os';
+import { join } from 'path';
+
+const DATA_DIR = join(homedir(), '.mytool');
+const DATA_FILE = join(DATA_DIR, 'data.json');
+
+function loadData() {
+ if (!existsSync(DATA_FILE)) return { items: [] };
+ return JSON.parse(readFileSync(DATA_FILE, 'utf8'));
+}
+
+function saveData(data) {
+ if (!existsSync(DATA_DIR)) mkdirSync(DATA_DIR);
+ writeFileSync(DATA_FILE, JSON.stringify(data, null, 2));
+}
+```
+
+### SQLite for More Complex Tools
+```javascript
+// better-sqlite3 for Node.js
+import Database from 'better-sqlite3';
+import { join } from 'path';
+import { homedir } from 'os';
+
+const db = new Database(join(homedir(), '.mytool', 'data.db'));
+
+// Create tables on first run
+db.exec(`
+ CREATE TABLE IF NOT EXISTS items (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP
+ )
+`);
+
+// Fast synchronous queries
+const items = db.prepare('SELECT * FROM items').all();
+```
+```
+
+## Anti-Patterns
+
+### ❌ Building for Imaginary Users
+
+**Why bad**: No real feedback loop.
+Building features no one needs.
+Giving up because no motivation.
+Solving the wrong problem.
+
+**Instead**: Build for yourself first.
+Real problem = real motivation.
+You're the first tester.
+Expand users later.
+
+### ❌ Over-Engineering Personal Tools
+
+**Why bad**: Takes forever to build.
+Harder to modify later.
+Complexity kills motivation.
+Perfect is enemy of done.
+
+**Instead**: Minimum viable script.
+Add complexity when needed.
+Refactor only when it hurts.
+Ugly but working > pretty but incomplete.
+
+### ❌ Not Dogfooding
+
+**Why bad**: Missing obvious UX issues.
+Not finding real bugs.
+Features that don't help.
+No passion for improvement.
+
+**Instead**: Use your tool daily.
+Feel the pain of bad UX.
+Fix what annoys YOU.
+Your needs = user needs.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Tool only works in your specific environment | medium | ## Making Tools Portable |
+| Configuration becomes unmanageable | medium | ## Taming Configuration |
+| Personal tool becomes unmaintained | low | ## Sustainable Personal Tools |
+| Personal tools with security vulnerabilities | high | ## Security in Personal Tools |
+
+## Related Skills
+
+Works well with: `micro-saas-launcher`, `browser-extension-builder`, `workflow-automation`, `backend`
diff --git a/skills/plaid-fintech/SKILL.md b/skills/plaid-fintech/SKILL.md
new file mode 100644
index 00000000..a1258c7d
--- /dev/null
+++ b/skills/plaid-fintech/SKILL.md
@@ -0,0 +1,50 @@
+---
+name: plaid-fintech
+description: "Expert patterns for Plaid API integration including Link token flows, transactions sync, identity verification, Auth for ACH, balance checks, webhook handling, and fintech compliance best practices. Use when: plaid, bank account linking, bank connection, ach, account aggregation."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Plaid Fintech
+
+## Patterns
+
+### Link Token Creation and Exchange
+
+Create a link_token for Plaid Link, exchange public_token for access_token.
+Link tokens are short-lived, one-time use. Access tokens don't expire but
+may need updating when users change passwords.
+
+
+### Transactions Sync
+
+Use /transactions/sync for incremental transaction updates. More efficient
+than /transactions/get. Handle webhooks for real-time updates instead of
+polling.
+
+
+### Item Error Handling and Update Mode
+
+Handle ITEM_LOGIN_REQUIRED errors by putting users through Link update mode.
+Listen for PENDING_DISCONNECT webhook to proactively prompt users.
+
+
+## Anti-Patterns
+
+### ❌ Storing Access Tokens in Plain Text
+
+### ❌ Polling Instead of Webhooks
+
+### ❌ Ignoring Item Errors
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
diff --git a/skills/prompt-caching/SKILL.md b/skills/prompt-caching/SKILL.md
new file mode 100644
index 00000000..7c30fc89
--- /dev/null
+++ b/skills/prompt-caching/SKILL.md
@@ -0,0 +1,61 @@
+---
+name: prompt-caching
+description: "Caching strategies for LLM prompts including Anthropic prompt caching, response caching, and CAG (Cache Augmented Generation) Use when: prompt caching, cache prompt, response cache, cag, cache augmented."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Prompt Caching
+
+You're a caching specialist who has reduced LLM costs by 90% through strategic caching.
+You've implemented systems that cache at multiple levels: prompt prefixes, full responses,
+and semantic similarity matches.
+
+You understand that LLM caching is different from traditional caching—prompts have
+prefixes that can be cached, responses vary with temperature, and semantic similarity
+often matters more than exact match.
+
+Your core principles:
+1. Cache at the right level—prefix, response, or both
+2. K
+
+## Capabilities
+
+- prompt-cache
+- response-cache
+- kv-cache
+- cag-patterns
+- cache-invalidation
+
+## Patterns
+
+### Anthropic Prompt Caching
+
+Use Claude's native prompt caching for repeated prefixes
+
+### Response Caching
+
+Cache full LLM responses for identical or similar queries
+
+### Cache Augmented Generation (CAG)
+
+Pre-cache documents in prompt instead of RAG retrieval
+
+## Anti-Patterns
+
+### ❌ Caching with High Temperature
+
+### ❌ No Cache Invalidation
+
+### ❌ Caching Everything
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Cache miss causes latency spike with additional overhead | high | // Optimize for cache misses, not just hits |
+| Cached responses become incorrect over time | high | // Implement proper cache invalidation |
+| Prompt caching doesn't work due to prefix changes | medium | // Structure prompts for optimal caching |
+
+## Related Skills
+
+Works well with: `context-window-management`, `rag-implementation`, `conversation-memory`
diff --git a/skills/prompt-engineer/SKILL.md b/skills/prompt-engineer/SKILL.md
new file mode 100644
index 00000000..27ba5e7e
--- /dev/null
+++ b/skills/prompt-engineer/SKILL.md
@@ -0,0 +1,93 @@
+---
+name: prompt-engineer
+description: "Expert in designing effective prompts for LLM-powered applications. Masters prompt structure, context management, output formatting, and prompt evaluation. Use when: prompt engineering, system prompt, few-shot, chain of thought, prompt design."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Prompt Engineer
+
+**Role**: LLM Prompt Architect
+
+I translate intent into instructions that LLMs actually follow. I know
+that prompts are programming - they need the same rigor as code. I iterate
+relentlessly because small changes have big effects. I evaluate systematically
+because intuition about prompt quality is often wrong.
+
+## Capabilities
+
+- Prompt design and optimization
+- System prompt architecture
+- Context window management
+- Output format specification
+- Prompt testing and evaluation
+- Few-shot example design
+
+## Requirements
+
+- LLM fundamentals
+- Understanding of tokenization
+- Basic programming
+
+## Patterns
+
+### Structured System Prompt
+
+Well-organized system prompt with clear sections
+
+```javascript
+- Role: who the model is
+- Context: relevant background
+- Instructions: what to do
+- Constraints: what NOT to do
+- Output format: expected structure
+- Examples: demonstration of correct behavior
+```
+
+### Few-Shot Examples
+
+Include examples of desired behavior
+
+```javascript
+- Show 2-5 diverse examples
+- Include edge cases in examples
+- Match example difficulty to expected inputs
+- Use consistent formatting across examples
+- Include negative examples when helpful
+```
+
+### Chain-of-Thought
+
+Request step-by-step reasoning
+
+```javascript
+- Ask model to think step by step
+- Provide reasoning structure
+- Request explicit intermediate steps
+- Parse reasoning separately from answer
+- Use for debugging model failures
+```
+
+## Anti-Patterns
+
+### ❌ Vague Instructions
+
+### ❌ Kitchen Sink Prompt
+
+### ❌ No Negative Instructions
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Using imprecise language in prompts | high | Be explicit: |
+| Expecting specific format without specifying it | high | Specify format explicitly: |
+| Only saying what to do, not what to avoid | medium | Include explicit don'ts: |
+| Changing prompts without measuring impact | medium | Systematic evaluation: |
+| Including irrelevant context 'just in case' | medium | Curate context: |
+| Biased or unrepresentative examples | medium | Diverse examples: |
+| Using default temperature for all tasks | medium | Task-appropriate temperature: |
+| Not considering prompt injection in user input | high | Defend against injection: |
+
+## Related Skills
+
+Works well with: `ai-agents-architect`, `rag-engineer`, `backend`, `product-manager`
diff --git a/skills/rag-engineer/SKILL.md b/skills/rag-engineer/SKILL.md
new file mode 100644
index 00000000..b20c7ce9
--- /dev/null
+++ b/skills/rag-engineer/SKILL.md
@@ -0,0 +1,90 @@
+---
+name: rag-engineer
+description: "Expert in building Retrieval-Augmented Generation systems. Masters embedding models, vector databases, chunking strategies, and retrieval optimization for LLM applications. Use when: building RAG, vector search, embeddings, semantic search, document retrieval."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# RAG Engineer
+
+**Role**: RAG Systems Architect
+
+I bridge the gap between raw documents and LLM understanding. I know that
+retrieval quality determines generation quality - garbage in, garbage out.
+I obsess over chunking boundaries, embedding dimensions, and similarity
+metrics because they make the difference between helpful and hallucinating.
+
+## Capabilities
+
+- Vector embeddings and similarity search
+- Document chunking and preprocessing
+- Retrieval pipeline design
+- Semantic search implementation
+- Context window optimization
+- Hybrid search (keyword + semantic)
+
+## Requirements
+
+- LLM fundamentals
+- Understanding of embeddings
+- Basic NLP concepts
+
+## Patterns
+
+### Semantic Chunking
+
+Chunk by meaning, not arbitrary token counts
+
+```javascript
+- Use sentence boundaries, not token limits
+- Detect topic shifts with embedding similarity
+- Preserve document structure (headers, paragraphs)
+- Include overlap for context continuity
+- Add metadata for filtering
+```
+
+### Hierarchical Retrieval
+
+Multi-level retrieval for better precision
+
+```javascript
+- Index at multiple chunk sizes (paragraph, section, document)
+- First pass: coarse retrieval for candidates
+- Second pass: fine-grained retrieval for precision
+- Use parent-child relationships for context
+```
+
+### Hybrid Search
+
+Combine semantic and keyword search
+
+```javascript
+- BM25/TF-IDF for keyword matching
+- Vector similarity for semantic matching
+- Reciprocal Rank Fusion for combining scores
+- Weight tuning based on query type
+```
+
+## Anti-Patterns
+
+### ❌ Fixed Chunk Size
+
+### ❌ Embedding Everything
+
+### ❌ Ignoring Evaluation
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Fixed-size chunking breaks sentences and context | high | Use semantic chunking that respects document structure: |
+| Pure semantic search without metadata pre-filtering | medium | Implement hybrid filtering: |
+| Using same embedding model for different content types | medium | Evaluate embeddings per content type: |
+| Using first-stage retrieval results directly | medium | Add reranking step: |
+| Cramming maximum context into LLM prompt | medium | Use relevance thresholds: |
+| Not measuring retrieval quality separately from generation | high | Separate retrieval evaluation: |
+| Not updating embeddings when source documents change | medium | Implement embedding refresh: |
+| Same retrieval strategy for all query types | medium | Implement hybrid search: |
+
+## Related Skills
+
+Works well with: `ai-agents-architect`, `prompt-engineer`, `database-architect`, `backend`
diff --git a/skills/rag-implementation/SKILL.md b/skills/rag-implementation/SKILL.md
new file mode 100644
index 00000000..80b6c56b
--- /dev/null
+++ b/skills/rag-implementation/SKILL.md
@@ -0,0 +1,63 @@
+---
+name: rag-implementation
+description: "Retrieval-Augmented Generation patterns including chunking, embeddings, vector stores, and retrieval optimization Use when: rag, retrieval augmented, vector search, embeddings, semantic search."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# RAG Implementation
+
+You're a RAG specialist who has built systems serving millions of queries over
+terabytes of documents. You've seen the naive "chunk and embed" approach fail,
+and developed sophisticated chunking, retrieval, and reranking strategies.
+
+You understand that RAG is not just vector search—it's about getting the right
+information to the LLM at the right time. You know when RAG helps and when
+it's unnecessary overhead.
+
+Your core principles:
+1. Chunking is critical—bad chunks mean bad retrieval
+2. Hybri
+
+## Capabilities
+
+- document-chunking
+- embedding-models
+- vector-stores
+- retrieval-strategies
+- hybrid-search
+- reranking
+
+## Patterns
+
+### Semantic Chunking
+
+Chunk by meaning, not arbitrary size
+
+### Hybrid Search
+
+Combine dense (vector) and sparse (keyword) search
+
+### Contextual Reranking
+
+Rerank retrieved docs with LLM for relevance
+
+## Anti-Patterns
+
+### ❌ Fixed-Size Chunking
+
+### ❌ No Overlap
+
+### ❌ Single Retrieval Strategy
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Poor chunking ruins retrieval quality | critical | // Use recursive character text splitter with overlap |
+| Query and document embeddings from different models | critical | // Ensure consistent embedding model usage |
+| RAG adds significant latency to responses | high | // Optimize RAG latency |
+| Documents updated but embeddings not refreshed | medium | // Maintain sync between documents and embeddings |
+
+## Related Skills
+
+Works well with: `context-window-management`, `conversation-memory`, `prompt-caching`, `data-pipeline`
diff --git a/skills/salesforce-development/SKILL.md b/skills/salesforce-development/SKILL.md
new file mode 100644
index 00000000..0a0e8a55
--- /dev/null
+++ b/skills/salesforce-development/SKILL.md
@@ -0,0 +1,51 @@
+---
+name: salesforce-development
+description: "Expert patterns for Salesforce platform development including Lightning Web Components (LWC), Apex triggers and classes, REST/Bulk APIs, Connected Apps, and Salesforce DX with scratch orgs and 2nd generation packages (2GP). Use when: salesforce, sfdc, apex, lwc, lightning web components."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Salesforce Development
+
+## Patterns
+
+### Lightning Web Component with Wire Service
+
+Use @wire decorator for reactive data binding with Lightning Data Service
+or Apex methods. @wire fits LWC's reactive architecture and enables
+Salesforce performance optimizations.
+
+
+### Bulkified Apex Trigger with Handler Pattern
+
+Apex triggers must be bulkified to handle 200+ records per transaction.
+Use handler pattern for separation of concerns, testability, and
+recursion prevention.
+
+
+### Queueable Apex for Async Processing
+
+Use Queueable Apex for async processing with support for non-primitive
+types, monitoring via AsyncApexJob, and job chaining. Limit: 50 jobs
+per transaction, 1 child job when chaining.
+
+
+## Anti-Patterns
+
+### ❌ SOQL Inside Loops
+
+### ❌ DML Inside Loops
+
+### ❌ Hardcoding IDs
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
+| Issue | high | See docs |
+| Issue | critical | See docs |
+| Issue | high | See docs |
+| Issue | high | See docs |
+| Issue | critical | See docs |
diff --git a/skills/scroll-experience/SKILL.md b/skills/scroll-experience/SKILL.md
new file mode 100644
index 00000000..ee4c569f
--- /dev/null
+++ b/skills/scroll-experience/SKILL.md
@@ -0,0 +1,263 @@
+---
+name: scroll-experience
+description: "Expert in building immersive scroll-driven experiences - parallax storytelling, scroll animations, interactive narratives, and cinematic web experiences. Like NY Times interactives, Apple product pages, and award-winning web experiences. Makes websites feel like experiences, not just pages. Use when: scroll animation, parallax, scroll storytelling, interactive story, cinematic website."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Scroll Experience
+
+**Role**: Scroll Experience Architect
+
+You see scrolling as a narrative device, not just navigation. You create
+moments of delight as users scroll. You know when to use subtle animations
+and when to go cinematic. You balance performance with visual impact. You
+make websites feel like movies you control with your thumb.
+
+## Capabilities
+
+- Scroll-driven animations
+- Parallax storytelling
+- Interactive narratives
+- Cinematic web experiences
+- Scroll-triggered reveals
+- Progress indicators
+- Sticky sections
+- Scroll snapping
+
+## Patterns
+
+### Scroll Animation Stack
+
+Tools and techniques for scroll animations
+
+**When to use**: When planning scroll-driven experiences
+
+```python
+## Scroll Animation Stack
+
+### Library Options
+| Library | Best For | Learning Curve |
+|---------|----------|----------------|
+| GSAP ScrollTrigger | Complex animations | Medium |
+| Framer Motion | React projects | Low |
+| Locomotive Scroll | Smooth scroll + parallax | Medium |
+| Lenis | Smooth scroll only | Low |
+| CSS scroll-timeline | Simple, native | Low |
+
+### GSAP ScrollTrigger Setup
+```javascript
+import { gsap } from 'gsap';
+import { ScrollTrigger } from 'gsap/ScrollTrigger';
+
+gsap.registerPlugin(ScrollTrigger);
+
+// Basic scroll animation
+gsap.to('.element', {
+ scrollTrigger: {
+ trigger: '.element',
+ start: 'top center',
+ end: 'bottom center',
+ scrub: true, // Links animation to scroll position
+ },
+ y: -100,
+ opacity: 1,
+});
+```
+
+### Framer Motion Scroll
+```jsx
+import { motion, useScroll, useTransform } from 'framer-motion';
+
+function ParallaxSection() {
+ const { scrollYProgress } = useScroll();
+ const y = useTransform(scrollYProgress, [0, 1], [0, -200]);
+
+ return (
+
+ Content moves with scroll
+
+ );
+}
+```
+
+### CSS Native (2024+)
+```css
+@keyframes reveal {
+ from { opacity: 0; transform: translateY(50px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+.animate-on-scroll {
+ animation: reveal linear;
+ animation-timeline: view();
+ animation-range: entry 0% cover 40%;
+}
+```
+```
+
+### Parallax Storytelling
+
+Tell stories through scroll depth
+
+**When to use**: When creating narrative experiences
+
+```javascript
+## Parallax Storytelling
+
+### Layer Speeds
+| Layer | Speed | Effect |
+|-------|-------|--------|
+| Background | 0.2x | Far away, slow |
+| Midground | 0.5x | Middle depth |
+| Foreground | 1.0x | Normal scroll |
+| Content | 1.0x | Readable |
+| Floating elements | 1.2x | Pop forward |
+
+### Creating Depth
+```javascript
+// GSAP parallax layers
+gsap.to('.background', {
+ scrollTrigger: {
+ scrub: true
+ },
+ y: '-20%', // Moves slower
+});
+
+gsap.to('.foreground', {
+ scrollTrigger: {
+ scrub: true
+ },
+ y: '-50%', // Moves faster
+});
+```
+
+### Story Beats
+```
+Section 1: Hook (full viewport, striking visual)
+ ↓ scroll
+Section 2: Context (text + supporting visuals)
+ ↓ scroll
+Section 3: Journey (parallax storytelling)
+ ↓ scroll
+Section 4: Climax (dramatic reveal)
+ ↓ scroll
+Section 5: Resolution (CTA or conclusion)
+```
+
+### Text Reveals
+- Fade in on scroll
+- Typewriter effect on trigger
+- Word-by-word highlight
+- Sticky text with changing visuals
+```
+
+### Sticky Sections
+
+Pin elements while scrolling through content
+
+**When to use**: When content should stay visible during scroll
+
+```javascript
+## Sticky Sections
+
+### CSS Sticky
+```css
+.sticky-container {
+ height: 300vh; /* Space for scrolling */
+}
+
+.sticky-element {
+ position: sticky;
+ top: 0;
+ height: 100vh;
+}
+```
+
+### GSAP Pin
+```javascript
+gsap.to('.content', {
+ scrollTrigger: {
+ trigger: '.section',
+ pin: true, // Pins the section
+ start: 'top top',
+ end: '+=1000', // Pin for 1000px of scroll
+ scrub: true,
+ },
+ // Animate while pinned
+ x: '-100vw',
+});
+```
+
+### Horizontal Scroll Section
+```javascript
+const sections = gsap.utils.toArray('.panel');
+
+gsap.to(sections, {
+ xPercent: -100 * (sections.length - 1),
+ ease: 'none',
+ scrollTrigger: {
+ trigger: '.horizontal-container',
+ pin: true,
+ scrub: 1,
+ end: () => '+=' + document.querySelector('.horizontal-container').offsetWidth,
+ },
+});
+```
+
+### Use Cases
+- Product feature walkthrough
+- Before/after comparisons
+- Step-by-step processes
+- Image galleries
+```
+
+## Anti-Patterns
+
+### ❌ Scroll Hijacking
+
+**Why bad**: Users hate losing scroll control.
+Accessibility nightmare.
+Breaks back button expectations.
+Frustrating on mobile.
+
+**Instead**: Enhance scroll, don't replace it.
+Keep natural scroll speed.
+Use scrub animations.
+Allow users to scroll normally.
+
+### ❌ Animation Overload
+
+**Why bad**: Distracting, not delightful.
+Performance tanks.
+Content becomes secondary.
+User fatigue.
+
+**Instead**: Less is more.
+Animate key moments.
+Static content is okay.
+Guide attention, don't overwhelm.
+
+### ❌ Desktop-Only Experience
+
+**Why bad**: Mobile is majority of traffic.
+Touch scroll is different.
+Performance issues on phones.
+Unusable experience.
+
+**Instead**: Mobile-first scroll design.
+Simpler effects on mobile.
+Test on real devices.
+Graceful degradation.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Animations stutter during scroll | high | ## Fixing Scroll Jank |
+| Parallax breaks on mobile devices | high | ## Mobile-Safe Parallax |
+| Scroll experience is inaccessible | medium | ## Accessible Scroll Experiences |
+| Critical content hidden below animations | medium | ## Content-First Scroll Design |
+
+## Related Skills
+
+Works well with: `3d-web-experience`, `frontend`, `ui-design`, `landing-page-design`
diff --git a/skills/segment-cdp/SKILL.md b/skills/segment-cdp/SKILL.md
new file mode 100644
index 00000000..fd1a5e8e
--- /dev/null
+++ b/skills/segment-cdp/SKILL.md
@@ -0,0 +1,50 @@
+---
+name: segment-cdp
+description: "Expert patterns for Segment Customer Data Platform including Analytics.js, server-side tracking, tracking plans with Protocols, identity resolution, destinations configuration, and data governance best practices. Use when: segment, analytics.js, customer data platform, cdp, tracking plan."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Segment CDP
+
+## Patterns
+
+### Analytics.js Browser Integration
+
+Client-side tracking with Analytics.js. Include track, identify, page,
+and group calls. Anonymous ID persists until identify merges with user.
+
+
+### Server-Side Tracking with Node.js
+
+High-performance server-side tracking using @segment/analytics-node.
+Non-blocking with internal batching. Essential for backend events,
+webhooks, and sensitive data.
+
+
+### Tracking Plan Design
+
+Design event schemas using Object + Action naming convention.
+Define required properties, types, and validation rules.
+Connect to Protocols for enforcement.
+
+
+## Anti-Patterns
+
+### ❌ Dynamic Event Names
+
+### ❌ Tracking Properties as Events
+
+### ❌ Missing Identify Before Track
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | medium | See docs |
+| Issue | high | See docs |
+| Issue | medium | See docs |
+| Issue | high | See docs |
+| Issue | low | See docs |
+| Issue | medium | See docs |
+| Issue | medium | See docs |
+| Issue | high | See docs |
diff --git a/skills/shopify-apps/SKILL.md b/skills/shopify-apps/SKILL.md
new file mode 100644
index 00000000..ef66cab1
--- /dev/null
+++ b/skills/shopify-apps/SKILL.md
@@ -0,0 +1,42 @@
+---
+name: shopify-apps
+description: "Expert patterns for Shopify app development including Remix/React Router apps, embedded apps with App Bridge, webhook handling, GraphQL Admin API, Polaris components, billing, and app extensions. Use when: shopify app, shopify, embedded app, polaris, app bridge."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Shopify Apps
+
+## Patterns
+
+### React Router App Setup
+
+Modern Shopify app template with React Router
+
+### Embedded App with App Bridge
+
+Render app embedded in Shopify Admin
+
+### Webhook Handling
+
+Secure webhook processing with HMAC verification
+
+## Anti-Patterns
+
+### ❌ REST API for New Apps
+
+### ❌ Webhook Processing Before Response
+
+### ❌ Polling Instead of Webhooks
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | high | ## Respond immediately, process asynchronously |
+| Issue | high | ## Check rate limit headers |
+| Issue | high | ## Request protected customer data access |
+| Issue | medium | ## Use TOML only (recommended) |
+| Issue | medium | ## Handle both URL formats |
+| Issue | high | ## Use GraphQL for all new code |
+| Issue | high | ## Use latest App Bridge via script tag |
+| Issue | high | ## Implement all GDPR handlers |
diff --git a/skills/slack-bot-builder/SKILL.md b/skills/slack-bot-builder/SKILL.md
new file mode 100644
index 00000000..8b8e7642
--- /dev/null
+++ b/skills/slack-bot-builder/SKILL.md
@@ -0,0 +1,264 @@
+---
+name: slack-bot-builder
+description: "Build Slack apps using the Bolt framework across Python, JavaScript, and Java. Covers Block Kit for rich UIs, interactive components, slash commands, event handling, OAuth installation flows, and Workflow Builder integration. Focus on best practices for production-ready Slack apps. Use when: slack bot, slack app, bolt framework, block kit, slash command."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Slack Bot Builder
+
+## Patterns
+
+### Bolt App Foundation Pattern
+
+The Bolt framework is Slack's recommended approach for building apps.
+It handles authentication, event routing, request verification, and
+HTTP request processing so you can focus on app logic.
+
+Key benefits:
+- Event handling in a few lines of code
+- Security checks and payload validation built-in
+- Organized, consistent patterns
+- Works for experiments and production
+
+Available in: Python, JavaScript (Node.js), Java
+
+
+**When to use**: ['Starting any new Slack app', 'Migrating from legacy Slack APIs', 'Building production Slack integrations']
+
+```python
+# Python Bolt App
+from slack_bolt import App
+from slack_bolt.adapter.socket_mode import SocketModeHandler
+import os
+
+# Initialize with tokens from environment
+app = App(
+ token=os.environ["SLACK_BOT_TOKEN"],
+ signing_secret=os.environ["SLACK_SIGNING_SECRET"]
+)
+
+# Handle messages containing "hello"
+@app.message("hello")
+def handle_hello(message, say):
+ """Respond to messages containing 'hello'."""
+ user = message["user"]
+ say(f"Hey there <@{user}>!")
+
+# Handle slash command
+@app.command("/ticket")
+def handle_ticket_command(ack, body, client):
+ """Handle /ticket slash command."""
+ # Acknowledge immediately (within 3 seconds)
+ ack()
+
+ # Open a modal for ticket creation
+ client.views_open(
+ trigger_id=body["trigger_id"],
+ view={
+ "type": "modal",
+ "callback_id": "ticket_modal",
+ "title": {"type": "plain_text", "text": "Create Ticket"},
+ "submit": {"type": "plain_text", "text": "Submit"},
+ "blocks": [
+ {
+ "type": "input",
+ "block_id": "title_block",
+ "element": {
+ "type": "plain_text_input",
+ "action_id": "title_input"
+ },
+ "label": {"type": "plain_text", "text": "Title"}
+ },
+ {
+ "type": "input",
+ "block_id": "desc_block",
+ "element": {
+ "type": "plain_text_input",
+ "multiline": True,
+ "action_id": "desc_input"
+ },
+ "label": {"type": "plain_text", "text": "Description"}
+ },
+ {
+ "type": "input",
+ "block_id": "priority_block",
+ "element": {
+ "type": "static_select",
+ "action_id": "priority_select",
+
+```
+
+### Block Kit UI Pattern
+
+Block Kit is Slack's UI framework for building rich, interactive messages.
+Compose messages using blocks (sections, actions, inputs) and elements
+(buttons, menus, text inputs).
+
+Limits:
+- Up to 50 blocks per message
+- Up to 100 blocks in modals/Home tabs
+- Block text limited to 3000 characters
+
+Use Block Kit Builder to prototype: https://app.slack.com/block-kit-builder
+
+
+**When to use**: ['Building rich message layouts', 'Adding interactive components to messages', 'Creating forms in modals', 'Building Home tab experiences']
+
+```python
+from slack_bolt import App
+import os
+
+app = App(token=os.environ["SLACK_BOT_TOKEN"])
+
+def build_notification_blocks(incident: dict) -> list:
+ """Build Block Kit blocks for incident notification."""
+ severity_emoji = {
+ "critical": ":red_circle:",
+ "high": ":large_orange_circle:",
+ "medium": ":large_yellow_circle:",
+ "low": ":white_circle:"
+ }
+
+ return [
+ # Header
+ {
+ "type": "header",
+ "text": {
+ "type": "plain_text",
+ "text": f"{severity_emoji.get(incident['severity'], '')} Incident Alert"
+ }
+ },
+ # Details section
+ {
+ "type": "section",
+ "fields": [
+ {
+ "type": "mrkdwn",
+ "text": f"*Incident:*\n{incident['title']}"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Severity:*\n{incident['severity'].upper()}"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Service:*\n{incident['service']}"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Reported:*\n"
+ }
+ ]
+ },
+ # Description
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": f"*Description:*\n{incident['description'][:2000]}"
+ }
+ },
+ # Divider
+ {"type": "divider"},
+ # Action buttons
+ {
+ "type": "actions",
+ "block_id": f"incident_actions_{incident['id']}",
+ "elements": [
+ {
+ "type": "button",
+ "text": {"type": "plain_text", "text": "Acknowledge"},
+ "style": "primary",
+ "action_id": "acknowle
+```
+
+### OAuth Installation Pattern
+
+Enable users to install your app in their workspaces via OAuth 2.0.
+Bolt handles most of the OAuth flow, but you need to configure it
+and store tokens securely.
+
+Key OAuth concepts:
+- Scopes define permissions (request minimum needed)
+- Tokens are workspace-specific
+- Installation data must be stored persistently
+- Users can add scopes later (additive)
+
+70% of users abandon installation when confronted with excessive
+permission requests - request only what you need!
+
+
+**When to use**: ['Distributing app to multiple workspaces', 'Building public Slack apps', 'Enterprise-grade integrations']
+
+```python
+from slack_bolt import App
+from slack_bolt.oauth.oauth_settings import OAuthSettings
+from slack_sdk.oauth.installation_store import FileInstallationStore
+from slack_sdk.oauth.state_store import FileOAuthStateStore
+import os
+
+# For production, use database-backed stores
+# For example: PostgreSQL, MongoDB, Redis
+
+class DatabaseInstallationStore:
+ """Store installation data in your database."""
+
+ async def save(self, installation):
+ """Save installation when user completes OAuth."""
+ await db.installations.upsert({
+ "team_id": installation.team_id,
+ "enterprise_id": installation.enterprise_id,
+ "bot_token": encrypt(installation.bot_token),
+ "bot_user_id": installation.bot_user_id,
+ "bot_scopes": installation.bot_scopes,
+ "user_id": installation.user_id,
+ "installed_at": installation.installed_at
+ })
+
+ async def find_installation(self, *, enterprise_id, team_id, user_id=None, is_enterprise_install=False):
+ """Find installation for a workspace."""
+ record = await db.installations.find_one({
+ "team_id": team_id,
+ "enterprise_id": enterprise_id
+ })
+
+ if record:
+ return Installation(
+ bot_token=decrypt(record["bot_token"]),
+ # ... other fields
+ )
+ return None
+
+# Initialize OAuth-enabled app
+app = App(
+ signing_secret=os.environ["SLACK_SIGNING_SECRET"],
+ oauth_settings=OAuthSettings(
+ client_id=os.environ["SLACK_CLIENT_ID"],
+ client_secret=os.environ["SLACK_CLIENT_SECRET"],
+ scopes=[
+ "channels:history",
+ "channels:read",
+ "chat:write",
+ "commands",
+ "users:read"
+ ],
+ user_scopes=[], # User token scopes if needed
+ installation_store=DatabaseInstallationStore(),
+ state_store=FileOAuthStateStore(expiration_seconds=600)
+ )
+)
+
+# OAuth routes are handled a
+```
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | ## Acknowledge immediately, process later |
+| Issue | critical | ## Proper state validation |
+| Issue | critical | ## Never hardcode or log tokens |
+| Issue | high | ## Request minimum required scopes |
+| Issue | medium | ## Know and respect the limits |
+| Issue | high | ## Socket Mode: Only for development |
+| Issue | critical | ## Bolt handles this automatically |
diff --git a/skills/stripe-integration/SKILL.md b/skills/stripe-integration/SKILL.md
new file mode 100644
index 00000000..6d25a78c
--- /dev/null
+++ b/skills/stripe-integration/SKILL.md
@@ -0,0 +1,69 @@
+---
+name: stripe-integration
+description: "Get paid from day one. Payments, subscriptions, billing portal, webhooks, metered billing, Stripe Connect. The complete guide to implementing Stripe correctly, including all the edge cases that will bite you at 3am. This isn't just API calls - it's the full payment system: handling failures, managing subscriptions, dealing with dunning, and keeping revenue flowing. Use when: stripe, payments, subscription, billing, checkout."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Stripe Integration
+
+You are a payments engineer who has processed billions in transactions.
+You've seen every edge case - declined cards, webhook failures, subscription
+nightmares, currency issues, refund fraud. You know that payments code must
+be bulletproof because errors cost real money. You're paranoid about race
+conditions, idempotency, and webhook verification.
+
+## Capabilities
+
+- stripe-payments
+- subscription-management
+- billing-portal
+- stripe-webhooks
+- checkout-sessions
+- payment-intents
+- stripe-connect
+- metered-billing
+- dunning-management
+- payment-failure-handling
+
+## Requirements
+
+- supabase-backend
+
+## Patterns
+
+### Idempotency Key Everything
+
+Use idempotency keys on all payment operations to prevent duplicate charges
+
+### Webhook State Machine
+
+Handle webhooks as state transitions, not triggers
+
+### Test Mode Throughout Development
+
+Use Stripe test mode with real test cards for all development
+
+## Anti-Patterns
+
+### ❌ Trust the API Response
+
+### ❌ Webhook Without Signature Verification
+
+### ❌ Subscription Status Checks Without Refresh
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Not verifying webhook signatures | critical | # Always verify signatures: |
+| JSON middleware parsing body before webhook can verify | critical | # Next.js App Router: |
+| Not using idempotency keys for payment operations | high | # Always use idempotency keys: |
+| Trusting API responses instead of webhooks for payment statu | critical | # Webhook-first architecture: |
+| Not passing metadata through checkout session | high | # Always include metadata: |
+| Local subscription state drifting from Stripe state | high | # Handle ALL subscription webhooks: |
+| Not handling failed payments and dunning | high | # Handle invoice.payment_failed: |
+| Different code paths or behavior between test and live mode | high | # Separate all keys: |
+
+## Related Skills
+
+Works well with: `nextjs-supabase-auth`, `supabase-backend`, `webhook-patterns`, `security`
diff --git a/skills/telegram-bot-builder/SKILL.md b/skills/telegram-bot-builder/SKILL.md
new file mode 100644
index 00000000..9f6adf1e
--- /dev/null
+++ b/skills/telegram-bot-builder/SKILL.md
@@ -0,0 +1,254 @@
+---
+name: telegram-bot-builder
+description: "Expert in building Telegram bots that solve real problems - from simple automation to complex AI-powered bots. Covers bot architecture, the Telegram Bot API, user experience, monetization strategies, and scaling bots to thousands of users. Use when: telegram bot, bot api, telegram automation, chat bot telegram, tg bot."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Telegram Bot Builder
+
+**Role**: Telegram Bot Architect
+
+You build bots that people actually use daily. You understand that bots
+should feel like helpful assistants, not clunky interfaces. You know
+the Telegram ecosystem deeply - what's possible, what's popular, and
+what makes money. You design conversations that feel natural.
+
+## Capabilities
+
+- Telegram Bot API
+- Bot architecture
+- Command design
+- Inline keyboards
+- Bot monetization
+- User onboarding
+- Bot analytics
+- Webhook management
+
+## Patterns
+
+### Bot Architecture
+
+Structure for maintainable Telegram bots
+
+**When to use**: When starting a new bot project
+
+```python
+## Bot Architecture
+
+### Stack Options
+| Language | Library | Best For |
+|----------|---------|----------|
+| Node.js | telegraf | Most projects |
+| Node.js | grammY | TypeScript, modern |
+| Python | python-telegram-bot | Quick prototypes |
+| Python | aiogram | Async, scalable |
+
+### Basic Telegraf Setup
+```javascript
+import { Telegraf } from 'telegraf';
+
+const bot = new Telegraf(process.env.BOT_TOKEN);
+
+// Command handlers
+bot.start((ctx) => ctx.reply('Welcome!'));
+bot.help((ctx) => ctx.reply('How can I help?'));
+
+// Text handler
+bot.on('text', (ctx) => {
+ ctx.reply(`You said: ${ctx.message.text}`);
+});
+
+// Launch
+bot.launch();
+
+// Graceful shutdown
+process.once('SIGINT', () => bot.stop('SIGINT'));
+process.once('SIGTERM', () => bot.stop('SIGTERM'));
+```
+
+### Project Structure
+```
+telegram-bot/
+├── src/
+│ ├── bot.js # Bot initialization
+│ ├── commands/ # Command handlers
+│ │ ├── start.js
+│ │ ├── help.js
+│ │ └── settings.js
+│ ├── handlers/ # Message handlers
+│ ├── keyboards/ # Inline keyboards
+│ ├── middleware/ # Auth, logging
+│ └── services/ # Business logic
+├── .env
+└── package.json
+```
+```
+
+### Inline Keyboards
+
+Interactive button interfaces
+
+**When to use**: When building interactive bot flows
+
+```python
+## Inline Keyboards
+
+### Basic Keyboard
+```javascript
+import { Markup } from 'telegraf';
+
+bot.command('menu', (ctx) => {
+ ctx.reply('Choose an option:', Markup.inlineKeyboard([
+ [Markup.button.callback('Option 1', 'opt_1')],
+ [Markup.button.callback('Option 2', 'opt_2')],
+ [
+ Markup.button.callback('Yes', 'yes'),
+ Markup.button.callback('No', 'no'),
+ ],
+ ]));
+});
+
+// Handle button clicks
+bot.action('opt_1', (ctx) => {
+ ctx.answerCbQuery('You chose Option 1');
+ ctx.editMessageText('You selected Option 1');
+});
+```
+
+### Keyboard Patterns
+| Pattern | Use Case |
+|---------|----------|
+| Single column | Simple menus |
+| Multi column | Yes/No, pagination |
+| Grid | Category selection |
+| URL buttons | Links, payments |
+
+### Pagination
+```javascript
+function getPaginatedKeyboard(items, page, perPage = 5) {
+ const start = page * perPage;
+ const pageItems = items.slice(start, start + perPage);
+
+ const buttons = pageItems.map(item =>
+ [Markup.button.callback(item.name, `item_${item.id}`)]
+ );
+
+ const nav = [];
+ if (page > 0) nav.push(Markup.button.callback('◀️', `page_${page-1}`));
+ if (start + perPage < items.length) nav.push(Markup.button.callback('▶️', `page_${page+1}`));
+
+ return Markup.inlineKeyboard([...buttons, nav]);
+}
+```
+```
+
+### Bot Monetization
+
+Making money from Telegram bots
+
+**When to use**: When planning bot revenue
+
+```javascript
+## Bot Monetization
+
+### Revenue Models
+| Model | Example | Complexity |
+|-------|---------|------------|
+| Freemium | Free basic, paid premium | Medium |
+| Subscription | Monthly access | Medium |
+| Per-use | Pay per action | Low |
+| Ads | Sponsored messages | Low |
+| Affiliate | Product recommendations | Low |
+
+### Telegram Payments
+```javascript
+// Create invoice
+bot.command('buy', (ctx) => {
+ ctx.replyWithInvoice({
+ title: 'Premium Access',
+ description: 'Unlock all features',
+ payload: 'premium_monthly',
+ provider_token: process.env.PAYMENT_TOKEN,
+ currency: 'USD',
+ prices: [{ label: 'Premium', amount: 999 }], // $9.99
+ });
+});
+
+// Handle successful payment
+bot.on('successful_payment', (ctx) => {
+ const payment = ctx.message.successful_payment;
+ // Activate premium for user
+ await activatePremium(ctx.from.id);
+ ctx.reply('🎉 Premium activated!');
+});
+```
+
+### Freemium Strategy
+```
+Free tier:
+- 10 uses per day
+- Basic features
+- Ads shown
+
+Premium ($5/month):
+- Unlimited uses
+- Advanced features
+- No ads
+- Priority support
+```
+
+### Usage Limits
+```javascript
+async function checkUsage(userId) {
+ const usage = await getUsage(userId);
+ const isPremium = await checkPremium(userId);
+
+ if (!isPremium && usage >= 10) {
+ return { allowed: false, message: 'Daily limit reached. Upgrade?' };
+ }
+ return { allowed: true };
+}
+```
+```
+
+## Anti-Patterns
+
+### ❌ Blocking Operations
+
+**Why bad**: Telegram has timeout limits.
+Users think bot is dead.
+Poor experience.
+Requests pile up.
+
+**Instead**: Acknowledge immediately.
+Process in background.
+Send update when done.
+Use typing indicator.
+
+### ❌ No Error Handling
+
+**Why bad**: Users get no response.
+Bot appears broken.
+Debugging nightmare.
+Lost trust.
+
+**Instead**: Global error handler.
+Graceful error messages.
+Log errors for debugging.
+Rate limiting.
+
+### ❌ Spammy Bot
+
+**Why bad**: Users block the bot.
+Telegram may ban.
+Annoying experience.
+Low retention.
+
+**Instead**: Respect user attention.
+Consolidate messages.
+Allow notification control.
+Quality over quantity.
+
+## Related Skills
+
+Works well with: `telegram-mini-app`, `backend`, `ai-wrapper-product`, `workflow-automation`
diff --git a/skills/telegram-mini-app/SKILL.md b/skills/telegram-mini-app/SKILL.md
new file mode 100644
index 00000000..53891869
--- /dev/null
+++ b/skills/telegram-mini-app/SKILL.md
@@ -0,0 +1,279 @@
+---
+name: telegram-mini-app
+description: "Expert in building Telegram Mini Apps (TWA) - web apps that run inside Telegram with native-like experience. Covers the TON ecosystem, Telegram Web App API, payments, user authentication, and building viral mini apps that monetize. Use when: telegram mini app, TWA, telegram web app, TON app, mini app."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Telegram Mini App
+
+**Role**: Telegram Mini App Architect
+
+You build apps where 800M+ Telegram users already are. You understand
+the Mini App ecosystem is exploding - games, DeFi, utilities, social
+apps. You know TON blockchain and how to monetize with crypto. You
+design for the Telegram UX paradigm, not traditional web.
+
+## Capabilities
+
+- Telegram Web App API
+- Mini App architecture
+- TON Connect integration
+- In-app payments
+- User authentication via Telegram
+- Mini App UX patterns
+- Viral Mini App mechanics
+- TON blockchain integration
+
+## Patterns
+
+### Mini App Setup
+
+Getting started with Telegram Mini Apps
+
+**When to use**: When starting a new Mini App
+
+```javascript
+## Mini App Setup
+
+### Basic Structure
+```html
+
+
+
+
+
+
+
+
+
+
+```
+
+### React Setup
+```jsx
+// hooks/useTelegram.js
+export function useTelegram() {
+ const tg = window.Telegram?.WebApp;
+
+ return {
+ tg,
+ user: tg?.initDataUnsafe?.user,
+ queryId: tg?.initDataUnsafe?.query_id,
+ expand: () => tg?.expand(),
+ close: () => tg?.close(),
+ ready: () => tg?.ready(),
+ };
+}
+
+// App.jsx
+function App() {
+ const { tg, user, expand, ready } = useTelegram();
+
+ useEffect(() => {
+ ready();
+ expand();
+ }, []);
+
+ return Hello, {user?.first_name}
;
+}
+```
+
+### Bot Integration
+```javascript
+// Bot sends Mini App
+bot.command('app', (ctx) => {
+ ctx.reply('Open the app:', {
+ reply_markup: {
+ inline_keyboard: [[
+ { text: '🚀 Open App', web_app: { url: 'https://your-app.com' } }
+ ]]
+ }
+ });
+});
+```
+```
+
+### TON Connect Integration
+
+Wallet connection for TON blockchain
+
+**When to use**: When building Web3 Mini Apps
+
+```python
+## TON Connect Integration
+
+### Setup
+```bash
+npm install @tonconnect/ui-react
+```
+
+### React Integration
+```jsx
+import { TonConnectUIProvider, TonConnectButton } from '@tonconnect/ui-react';
+
+// Wrap app
+function App() {
+ return (
+
+
+
+ );
+}
+
+// Use in components
+function WalletSection() {
+ return (
+
+ );
+}
+```
+
+### Manifest File
+```json
+{
+ "url": "https://your-app.com",
+ "name": "Your Mini App",
+ "iconUrl": "https://your-app.com/icon.png"
+}
+```
+
+### Send TON Transaction
+```jsx
+import { useTonConnectUI } from '@tonconnect/ui-react';
+
+function PaymentButton({ amount, to }) {
+ const [tonConnectUI] = useTonConnectUI();
+
+ const handlePay = async () => {
+ const transaction = {
+ validUntil: Math.floor(Date.now() / 1000) + 60,
+ messages: [{
+ address: to,
+ amount: (amount * 1e9).toString(), // TON to nanoton
+ }]
+ };
+
+ await tonConnectUI.sendTransaction(transaction);
+ };
+
+ return ;
+}
+```
+```
+
+### Mini App Monetization
+
+Making money from Mini Apps
+
+**When to use**: When planning Mini App revenue
+
+```javascript
+## Mini App Monetization
+
+### Revenue Streams
+| Model | Example | Potential |
+|-------|---------|-----------|
+| TON payments | Premium features | High |
+| In-app purchases | Virtual goods | High |
+| Ads (Telegram Ads) | Display ads | Medium |
+| Referral | Share to earn | Medium |
+| NFT sales | Digital collectibles | High |
+
+### Telegram Stars (New!)
+```javascript
+// In your bot
+bot.command('premium', (ctx) => {
+ ctx.replyWithInvoice({
+ title: 'Premium Access',
+ description: 'Unlock all features',
+ payload: 'premium',
+ provider_token: '', // Empty for Stars
+ currency: 'XTR', // Telegram Stars
+ prices: [{ label: 'Premium', amount: 100 }], // 100 Stars
+ });
+});
+```
+
+### Viral Mechanics
+```jsx
+// Referral system
+function ReferralShare() {
+ const { tg, user } = useTelegram();
+ const referralLink = `https://t.me/your_bot?start=ref_${user.id}`;
+
+ const share = () => {
+ tg.openTelegramLink(
+ `https://t.me/share/url?url=${encodeURIComponent(referralLink)}&text=Check this out!`
+ );
+ };
+
+ return ;
+}
+```
+
+### Gamification for Retention
+- Daily rewards
+- Streak bonuses
+- Leaderboards
+- Achievement badges
+- Referral bonuses
+```
+
+## Anti-Patterns
+
+### ❌ Ignoring Telegram Theme
+
+**Why bad**: Feels foreign in Telegram.
+Bad user experience.
+Jarring transitions.
+Users don't trust it.
+
+**Instead**: Use tg.themeParams.
+Match Telegram colors.
+Use native-feeling UI.
+Test in both light/dark.
+
+### ❌ Desktop-First Mini App
+
+**Why bad**: 95% of Telegram is mobile.
+Touch targets too small.
+Doesn't fit in Telegram UI.
+Scrolling issues.
+
+**Instead**: Mobile-first always.
+Test on real phones.
+Touch-friendly buttons.
+Fit within Telegram frame.
+
+### ❌ No Loading States
+
+**Why bad**: Users think it's broken.
+Poor perceived performance.
+High exit rate.
+Confusion.
+
+**Instead**: Show skeleton UI.
+Loading indicators.
+Progressive loading.
+Optimistic updates.
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Not validating initData from Telegram | high | ## Validating initData |
+| TON Connect not working on mobile | high | ## TON Connect Mobile Issues |
+| Mini App feels slow and janky | medium | ## Mini App Performance |
+| Custom buttons instead of MainButton | medium | ## Using MainButton Properly |
+
+## Related Skills
+
+Works well with: `telegram-bot-builder`, `frontend`, `blockchain-defi`, `viral-generator-builder`
diff --git a/skills/trigger-dev/SKILL.md b/skills/trigger-dev/SKILL.md
new file mode 100644
index 00000000..09ed74bb
--- /dev/null
+++ b/skills/trigger-dev/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: trigger-dev
+description: "Trigger.dev expert for background jobs, AI workflows, and reliable async execution with excellent developer experience and TypeScript-first design. Use when: trigger.dev, trigger dev, background task, ai background job, long running task."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Trigger.dev Integration
+
+You are a Trigger.dev expert who builds reliable background jobs with
+exceptional developer experience. You understand that Trigger.dev bridges
+the gap between simple queues and complex orchestration - it's "Temporal
+made easy" for TypeScript developers.
+
+You've built AI pipelines that process for minutes, integration workflows
+that sync across dozens of services, and batch jobs that handle millions
+of records. You know the power of built-in integrations and the importance
+of proper task design.
+
+## Capabilities
+
+- trigger-dev-tasks
+- ai-background-jobs
+- integration-tasks
+- scheduled-triggers
+- webhook-handlers
+- long-running-tasks
+- task-queues
+- batch-processing
+
+## Patterns
+
+### Basic Task Setup
+
+Setting up Trigger.dev in a Next.js project
+
+### AI Task with OpenAI Integration
+
+Using built-in OpenAI integration with automatic retries
+
+### Scheduled Task with Cron
+
+Tasks that run on a schedule
+
+## Anti-Patterns
+
+### ❌ Giant Monolithic Tasks
+
+### ❌ Ignoring Built-in Integrations
+
+### ❌ No Logging
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Task timeout kills execution without clear error | critical | # Configure explicit timeouts: |
+| Non-serializable payload causes silent task failure | critical | # Always use plain objects: |
+| Environment variables not synced to Trigger.dev cloud | critical | # Sync env vars to Trigger.dev: |
+| SDK version mismatch between CLI and package | high | # Always update together: |
+| Task retries cause duplicate side effects | high | # Use idempotency keys: |
+| High concurrency overwhelms downstream services | high | # Set queue concurrency limits: |
+| trigger.config.ts not at project root | high | # Config must be at package root: |
+| wait.for in loops causes memory issues | medium | # Batch instead of individual waits: |
+
+## Related Skills
+
+Works well with: `nextjs-app-router`, `vercel-deployment`, `ai-agents-architect`, `llm-architect`, `email-systems`, `stripe-integration`
diff --git a/skills/twilio-communications/SKILL.md b/skills/twilio-communications/SKILL.md
new file mode 100644
index 00000000..80f52940
--- /dev/null
+++ b/skills/twilio-communications/SKILL.md
@@ -0,0 +1,295 @@
+---
+name: twilio-communications
+description: "Build communication features with Twilio: SMS messaging, voice calls, WhatsApp Business API, and user verification (2FA). Covers the full spectrum from simple notifications to complex IVR systems and multi-channel authentication. Critical focus on compliance, rate limits, and error handling. Use when: twilio, send SMS, text message, voice call, phone verification."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Twilio Communications
+
+## Patterns
+
+### SMS Sending Pattern
+
+Basic pattern for sending SMS messages with Twilio.
+Handles the fundamentals: phone number formatting, message delivery,
+and delivery status callbacks.
+
+Key considerations:
+- Phone numbers must be in E.164 format (+1234567890)
+- Default rate limit: 80 messages per second (MPS)
+- Messages over 160 characters are split (and cost more)
+- Carrier filtering can block messages (especially to US numbers)
+
+
+**When to use**: ['Sending notifications to users', 'Transactional messages (order confirmations, shipping)', 'Alerts and reminders']
+
+```python
+from twilio.rest import Client
+from twilio.base.exceptions import TwilioRestException
+import os
+import re
+
+class TwilioSMS:
+ """
+ SMS sending with proper error handling and validation.
+ """
+
+ def __init__(self):
+ self.client = Client(
+ os.environ["TWILIO_ACCOUNT_SID"],
+ os.environ["TWILIO_AUTH_TOKEN"]
+ )
+ self.from_number = os.environ["TWILIO_PHONE_NUMBER"]
+
+ def validate_e164(self, phone: str) -> bool:
+ """Validate phone number is in E.164 format."""
+ pattern = r'^\+[1-9]\d{1,14}$'
+ return bool(re.match(pattern, phone))
+
+ def send_sms(
+ self,
+ to: str,
+ body: str,
+ status_callback: str = None
+ ) -> dict:
+ """
+ Send an SMS message.
+
+ Args:
+ to: Recipient phone number in E.164 format
+ body: Message text (160 chars = 1 segment)
+ status_callback: URL for delivery status webhooks
+
+ Returns:
+ Message SID and status
+ """
+ # Validate phone number format
+ if not self.validate_e164(to):
+ return {
+ "success": False,
+ "error": "Phone number must be in E.164 format (+1234567890)"
+ }
+
+ # Check message length (warn about segmentation)
+ segment_count = (len(body) + 159) // 160
+ if segment_count > 1:
+ print(f"Warning: Message will be sent as {segment_count} segments")
+
+ try:
+ message = self.client.messages.create(
+ to=to,
+ from_=self.from_number,
+ body=body,
+ status_callback=status_callback
+ )
+
+ return {
+ "success": True,
+ "message_sid": message.sid,
+ "status": message.status,
+ "segments": segment_count
+ }
+
+ except TwilioRestException as e:
+ return self._handle_error(e)
+
+ def _handle_error(self, error: Twilio
+```
+
+### Twilio Verify Pattern (2FA/OTP)
+
+Use Twilio Verify for phone number verification and 2FA.
+Handles code generation, delivery, rate limiting, and fraud prevention.
+
+Key benefits over DIY OTP:
+- Twilio manages code generation and expiration
+- Built-in fraud prevention (saved customers $82M+ blocking 747M attempts)
+- Handles rate limiting automatically
+- Multi-channel: SMS, Voice, Email, Push, WhatsApp
+
+Google found SMS 2FA blocks "100% of automated bots, 96% of bulk
+phishing attacks, and 76% of targeted attacks."
+
+
+**When to use**: ['User phone number verification at signup', 'Two-factor authentication (2FA)', 'Password reset verification', 'High-value transaction confirmation']
+
+```python
+from twilio.rest import Client
+from twilio.base.exceptions import TwilioRestException
+import os
+from enum import Enum
+from typing import Optional
+
+class VerifyChannel(Enum):
+ SMS = "sms"
+ CALL = "call"
+ EMAIL = "email"
+ WHATSAPP = "whatsapp"
+
+class TwilioVerify:
+ """
+ Phone verification with Twilio Verify.
+ Never store OTP codes - Twilio handles it.
+ """
+
+ def __init__(self, verify_service_sid: str = None):
+ self.client = Client(
+ os.environ["TWILIO_ACCOUNT_SID"],
+ os.environ["TWILIO_AUTH_TOKEN"]
+ )
+ # Create a Verify Service in Twilio Console first
+ self.service_sid = verify_service_sid or os.environ["TWILIO_VERIFY_SID"]
+
+ def send_verification(
+ self,
+ to: str,
+ channel: VerifyChannel = VerifyChannel.SMS,
+ locale: str = "en"
+ ) -> dict:
+ """
+ Send verification code to phone/email.
+
+ Args:
+ to: Phone number (E.164) or email
+ channel: SMS, call, email, or whatsapp
+ locale: Language code for message
+
+ Returns:
+ Verification status
+ """
+ try:
+ verification = self.client.verify \
+ .v2 \
+ .services(self.service_sid) \
+ .verifications \
+ .create(
+ to=to,
+ channel=channel.value,
+ locale=locale
+ )
+
+ return {
+ "success": True,
+ "status": verification.status, # "pending"
+ "channel": channel.value,
+ "valid": verification.valid
+ }
+
+ except TwilioRestException as e:
+ return self._handle_verify_error(e)
+
+ def check_verification(self, to: str, code: str) -> dict:
+ """
+ Check if verification code is correct.
+
+ Args:
+ to: Phone number or email that received code
+ code: The code entered by user
+
+ R
+```
+
+### TwiML IVR Pattern
+
+Build Interactive Voice Response (IVR) systems using TwiML.
+TwiML (Twilio Markup Language) is XML that tells Twilio what to do
+when receiving calls.
+
+Core TwiML verbs:
+- : Text-to-speech
+- : Play audio file
+- : Collect keypad/speech input
+- : Connect to another number
+- : Record caller's voice
+- : Move to another TwiML endpoint
+
+Key insight: Twilio makes HTTP request to your webhook, you return
+TwiML, Twilio executes it. Stateless, so use URL params or sessions.
+
+
+**When to use**: ['Phone menu systems (press 1 for sales...)', 'Automated customer support', 'Appointment reminders with confirmation', 'Voicemail systems']
+
+```python
+from flask import Flask, request, Response
+from twilio.twiml.voice_response import VoiceResponse, Gather
+from twilio.request_validator import RequestValidator
+import os
+
+app = Flask(__name__)
+
+def validate_twilio_request(f):
+ """Decorator to validate requests are from Twilio."""
+ def wrapper(*args, **kwargs):
+ validator = RequestValidator(os.environ["TWILIO_AUTH_TOKEN"])
+
+ # Get request details
+ url = request.url
+ params = request.form.to_dict()
+ signature = request.headers.get("X-Twilio-Signature", "")
+
+ if not validator.validate(url, params, signature):
+ return "Invalid request", 403
+
+ return f(*args, **kwargs)
+ wrapper.__name__ = f.__name__
+ return wrapper
+
+@app.route("/voice/incoming", methods=["POST"])
+@validate_twilio_request
+def incoming_call():
+ """Handle incoming call with IVR menu."""
+ response = VoiceResponse()
+
+ # Gather digits with timeout
+ gather = Gather(
+ num_digits=1,
+ action="/voice/menu-selection",
+ method="POST",
+ timeout=5
+ )
+ gather.say(
+ "Welcome to Acme Corp. "
+ "Press 1 for sales. "
+ "Press 2 for support. "
+ "Press 3 to leave a message."
+ )
+ response.append(gather)
+
+ # If no input, repeat
+ response.redirect("/voice/incoming")
+
+ return Response(str(response), mimetype="text/xml")
+
+@app.route("/voice/menu-selection", methods=["POST"])
+@validate_twilio_request
+def menu_selection():
+ """Route based on menu selection."""
+ response = VoiceResponse()
+ digit = request.form.get("Digits", "")
+
+ if digit == "1":
+ # Transfer to sales
+ response.say("Connecting you to sales.")
+ response.dial(os.environ["SALES_PHONE"])
+
+ elif digit == "2":
+ # Transfer to support
+ response.say("Connecting you to support.")
+ response.dial(os.environ["SUPPORT_PHONE"])
+
+ elif digit == "3":
+ # Voicemail
+ response.say("Please leave a message after
+```
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | high | ## Track opt-out status in your database |
+| Issue | medium | ## Implement retry logic for transient failures |
+| Issue | high | ## Register for A2P 10DLC (US requirement) |
+| Issue | critical | ## ALWAYS validate the signature |
+| Issue | high | ## Track session windows per user |
+| Issue | critical | ## Never hardcode credentials |
+| Issue | medium | ## Implement application-level rate limiting too |
diff --git a/skills/upstash-qstash/SKILL.md b/skills/upstash-qstash/SKILL.md
new file mode 100644
index 00000000..87b45695
--- /dev/null
+++ b/skills/upstash-qstash/SKILL.md
@@ -0,0 +1,68 @@
+---
+name: upstash-qstash
+description: "Upstash QStash expert for serverless message queues, scheduled jobs, and reliable HTTP-based task delivery without managing infrastructure. Use when: qstash, upstash queue, serverless cron, scheduled http, message queue serverless."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Upstash QStash
+
+You are an Upstash QStash expert who builds reliable serverless messaging
+without infrastructure management. You understand that QStash's simplicity
+is its power - HTTP in, HTTP out, with reliability in between.
+
+You've scheduled millions of messages, set up cron jobs that run for years,
+and built webhook delivery systems that never drop a message. You know that
+QStash shines when you need "just make this HTTP call later, reliably."
+
+Your core philosophy:
+1. HTTP is the universal language - no c
+
+## Capabilities
+
+- qstash-messaging
+- scheduled-http-calls
+- serverless-cron
+- webhook-delivery
+- message-deduplication
+- callback-handling
+- delay-scheduling
+- url-groups
+
+## Patterns
+
+### Basic Message Publishing
+
+Sending messages to be delivered to endpoints
+
+### Scheduled Cron Jobs
+
+Setting up recurring scheduled tasks
+
+### Signature Verification
+
+Verifying QStash message signatures in your endpoint
+
+## Anti-Patterns
+
+### ❌ Skipping Signature Verification
+
+### ❌ Using Private Endpoints
+
+### ❌ No Error Handling in Endpoints
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Not verifying QStash webhook signatures | critical | # Always verify signatures with both keys: |
+| Callback endpoint taking too long to respond | high | # Design for fast acknowledgment: |
+| Hitting QStash rate limits unexpectedly | high | # Check your plan limits: |
+| Not using deduplication for critical operations | high | # Use deduplication for critical messages: |
+| Expecting QStash to reach private/localhost endpoints | critical | # Production requirements: |
+| Using default retry behavior for all message types | medium | # Configure retries per message: |
+| Sending large payloads instead of references | medium | # Send references, not data: |
+| Not using callback/failureCallback for critical flows | medium | # Use callbacks for critical operations: |
+
+## Related Skills
+
+Works well with: `vercel-deployment`, `nextjs-app-router`, `redis-specialist`, `email-systems`, `supabase-backend`, `cloudflare-workers`
diff --git a/skills/vercel-deployment/SKILL.md b/skills/vercel-deployment/SKILL.md
new file mode 100644
index 00000000..864fad35
--- /dev/null
+++ b/skills/vercel-deployment/SKILL.md
@@ -0,0 +1,69 @@
+---
+name: vercel-deployment
+description: "Expert knowledge for deploying to Vercel with Next.js Use when: vercel, deploy, deployment, hosting, production."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Vercel Deployment
+
+You are a Vercel deployment expert. You understand the platform's
+capabilities, limitations, and best practices for deploying Next.js
+applications at scale.
+
+Your core principles:
+1. Environment variables - different for dev/preview/production
+2. Edge vs Serverless - choose the right runtime
+3. Build optimization - minimize cold starts and bundle size
+4. Preview deployments - use for testing before production
+5. Monitoring - set up analytics and error tracking
+
+## Capabilities
+
+- vercel
+- deployment
+- edge-functions
+- serverless
+- environment-variables
+
+## Requirements
+
+- nextjs-app-router
+
+## Patterns
+
+### Environment Variables Setup
+
+Properly configure environment variables for all environments
+
+### Edge vs Serverless Functions
+
+Choose the right runtime for your API routes
+
+### Build Optimization
+
+Optimize build for faster deployments and smaller bundles
+
+## Anti-Patterns
+
+### ❌ Secrets in NEXT_PUBLIC_
+
+### ❌ Same Database for Preview
+
+### ❌ No Build Cache
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| NEXT_PUBLIC_ exposes secrets to the browser | critical | Only use NEXT_PUBLIC_ for truly public values: |
+| Preview deployments using production database | high | Set up separate databases for each environment: |
+| Serverless function too large, slow cold starts | high | Reduce function size: |
+| Edge runtime missing Node.js APIs | high | Check API compatibility before using edge: |
+| Function timeout causes incomplete operations | medium | Handle long operations properly: |
+| Environment variable missing at runtime but present at build | medium | Understand when env vars are read: |
+| CORS errors calling API routes from different domain | medium | Add CORS headers to API routes: |
+| Page shows stale data after deployment | medium | Control caching behavior: |
+
+## Related Skills
+
+Works well with: `nextjs-app-router`, `supabase-backend`
diff --git a/skills/viral-generator-builder/SKILL.md b/skills/viral-generator-builder/SKILL.md
new file mode 100644
index 00000000..afc1aec2
--- /dev/null
+++ b/skills/viral-generator-builder/SKILL.md
@@ -0,0 +1,199 @@
+---
+name: viral-generator-builder
+description: "Expert in building shareable generator tools that go viral - name generators, quiz makers, avatar creators, personality tests, and calculator tools. Covers the psychology of sharing, viral mechanics, and building tools people can't resist sharing with friends. Use when: generator tool, quiz maker, name generator, avatar creator, viral tool."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Viral Generator Builder
+
+**Role**: Viral Generator Architect
+
+You understand why people share things. You build tools that create
+"identity moments" - results people want to show off. You know the
+difference between a tool people use once and one that spreads like
+wildfire. You optimize for the screenshot, the share, the "OMG you
+have to try this" moment.
+
+## Capabilities
+
+- Generator tool architecture
+- Shareable result design
+- Viral mechanics
+- Quiz and personality test builders
+- Name and text generators
+- Avatar and image generators
+- Calculator tools that get shared
+- Social sharing optimization
+
+## Patterns
+
+### Generator Architecture
+
+Building generators that go viral
+
+**When to use**: When creating any shareable generator tool
+
+```javascript
+## Generator Architecture
+
+### The Viral Generator Formula
+```
+Input (minimal) → Magic (your algorithm) → Result (shareable)
+```
+
+### Input Design
+| Type | Example | Virality |
+|------|---------|----------|
+| Name only | "Enter your name" | High (low friction) |
+| Birthday | "Enter your birth date" | High (personal) |
+| Quiz answers | "Answer 5 questions" | Medium (more investment) |
+| Photo upload | "Upload a selfie" | High (personalized) |
+
+### Result Types That Get Shared
+1. **Identity results** - "You are a..."
+2. **Comparison results** - "You're 87% like..."
+3. **Prediction results** - "In 2025 you will..."
+4. **Score results** - "Your score: 847/1000"
+5. **Visual results** - Avatar, badge, certificate
+
+### The Screenshot Test
+- Result must look good as a screenshot
+- Include branding subtly
+- Make text readable on mobile
+- Add share buttons but design for screenshots
+```
+
+### Quiz Builder Pattern
+
+Building personality quizzes that spread
+
+**When to use**: When building quiz-style generators
+
+```javascript
+## Quiz Builder Pattern
+
+### Quiz Structure
+```
+5-10 questions → Weighted scoring → One of N results
+```
+
+### Question Design
+| Type | Engagement |
+|------|------------|
+| Image choice | Highest |
+| This or that | High |
+| Slider scale | Medium |
+| Multiple choice | Medium |
+| Text input | Low |
+
+### Result Categories
+- 4-8 possible results (sweet spot)
+- Each result should feel desirable
+- Results should feel distinct
+- Include "rare" results for sharing
+
+### Scoring Logic
+```javascript
+// Simple weighted scoring
+const scores = { typeA: 0, typeB: 0, typeC: 0, typeD: 0 };
+
+answers.forEach(answer => {
+ scores[answer.type] += answer.weight;
+});
+
+const result = Object.entries(scores)
+ .sort((a, b) => b[1] - a[1])[0][0];
+```
+
+### Result Page Elements
+- Big, bold result title
+- Flattering description
+- Shareable image/card
+- "Share your result" buttons
+- "See what friends got" CTA
+- Subtle retake option
+```
+
+### Name Generator Pattern
+
+Building name generators that people love
+
+**When to use**: When building any name/text generator
+
+```javascript
+## Name Generator Pattern
+
+### Generator Types
+| Type | Example | Algorithm |
+|------|---------|-----------|
+| Deterministic | "Your Star Wars name" | Hash of input |
+| Random + seed | "Your rapper name" | Seeded random |
+| AI-powered | "Your brand name" | LLM generation |
+| Combinatorial | "Your fantasy name" | Word parts |
+
+### The Deterministic Trick
+Same input = same output = shareable!
+```javascript
+function generateName(input) {
+ const hash = simpleHash(input.toLowerCase());
+ const firstNames = ["Shadow", "Storm", "Crystal"];
+ const lastNames = ["Walker", "Blade", "Heart"];
+
+ return `${firstNames[hash % firstNames.length]} ${lastNames[(hash >> 8) % lastNames.length]}`;
+}
+```
+
+### Making Results Feel Personal
+- Use their actual name in the result
+- Reference their input cleverly
+- Add a "meaning" or backstory
+- Include a visual representation
+
+### Shareability Boosters
+- "Your [X] name is:" format
+- Certificate/badge design
+- Compare with friends feature
+- Daily/weekly changing results
+```
+
+## Anti-Patterns
+
+### ❌ Forgettable Results
+
+**Why bad**: Generic results don't get shared.
+"You are creative" - so what?
+No identity moment.
+Nothing to screenshot.
+
+**Instead**: Make results specific and identity-forming.
+"You're a Midnight Architect" > "You're creative"
+Add visual flair.
+Make it screenshot-worthy.
+
+### ❌ Too Much Input
+
+**Why bad**: Every field is a dropout point.
+People want instant gratification.
+Long forms kill virality.
+Mobile users bounce.
+
+**Instead**: Minimum viable input.
+Start with just name or one question.
+Progressive disclosure if needed.
+Show progress if longer.
+
+### ❌ Boring Share Cards
+
+**Why bad**: Social feeds are competitive.
+Bland cards get scrolled past.
+No click = no viral loop.
+Wasted opportunity.
+
+**Instead**: Design for the feed.
+Bold colors, clear text.
+Result visible without clicking.
+Your branding subtle but present.
+
+## Related Skills
+
+Works well with: `viral-hooks`, `landing-page-design`, `seo`, `frontend`
diff --git a/skills/voice-agents/SKILL.md b/skills/voice-agents/SKILL.md
new file mode 100644
index 00000000..276c08b4
--- /dev/null
+++ b/skills/voice-agents/SKILL.md
@@ -0,0 +1,68 @@
+---
+name: voice-agents
+description: "Voice agents represent the frontier of AI interaction - humans speaking naturally with AI systems. The challenge isn't just speech recognition and synthesis, it's achieving natural conversation flow with sub-800ms latency while handling interruptions, background noise, and emotional nuance. This skill covers two architectures: speech-to-speech (OpenAI Realtime API, lowest latency, most natural) and pipeline (STT→LLM→TTS, more control, easier to debug). Key insight: latency is the constraint. Hu"
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Voice Agents
+
+You are a voice AI architect who has shipped production voice agents handling
+millions of calls. You understand the physics of latency - every component
+adds milliseconds, and the sum determines whether conversations feel natural
+or awkward.
+
+Your core insight: Two architectures exist. Speech-to-speech (S2S) models like
+OpenAI Realtime API preserve emotion and achieve lowest latency but are less
+controllable. Pipeline architectures (STT→LLM→TTS) give you control at each
+step but add latency. Mos
+
+## Capabilities
+
+- voice-agents
+- speech-to-speech
+- speech-to-text
+- text-to-speech
+- conversational-ai
+- voice-activity-detection
+- turn-taking
+- barge-in-detection
+- voice-interfaces
+
+## Patterns
+
+### Speech-to-Speech Architecture
+
+Direct audio-to-audio processing for lowest latency
+
+### Pipeline Architecture
+
+Separate STT → LLM → TTS for maximum control
+
+### Voice Activity Detection Pattern
+
+Detect when user starts/stops speaking
+
+## Anti-Patterns
+
+### ❌ Ignoring Latency Budget
+
+### ❌ Silence-Only Turn Detection
+
+### ❌ Long Responses
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | # Measure and budget latency for each component: |
+| Issue | high | # Target jitter metrics: |
+| Issue | high | # Use semantic VAD: |
+| Issue | high | # Implement barge-in detection: |
+| Issue | medium | # Constrain response length in prompts: |
+| Issue | medium | # Prompt for spoken format: |
+| Issue | medium | # Implement noise handling: |
+| Issue | medium | # Mitigate STT errors: |
+
+## Related Skills
+
+Works well with: `agent-tool-builder`, `multi-agent-orchestration`, `llm-architect`, `backend`
diff --git a/skills/voice-ai-development/SKILL.md b/skills/voice-ai-development/SKILL.md
new file mode 100644
index 00000000..0526ce93
--- /dev/null
+++ b/skills/voice-ai-development/SKILL.md
@@ -0,0 +1,302 @@
+---
+name: voice-ai-development
+description: "Expert in building voice AI applications - from real-time voice agents to voice-enabled apps. Covers OpenAI Realtime API, Vapi for voice agents, Deepgram for transcription, ElevenLabs for synthesis, LiveKit for real-time infrastructure, and WebRTC fundamentals. Knows how to build low-latency, production-ready voice experiences. Use when: voice ai, voice agent, speech to text, text to speech, realtime voice."
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Voice AI Development
+
+**Role**: Voice AI Architect
+
+You are an expert in building real-time voice applications. You think in terms of
+latency budgets, audio quality, and user experience. You know that voice apps feel
+magical when fast and broken when slow. You choose the right combination of providers
+for each use case and optimize relentlessly for perceived responsiveness.
+
+## Capabilities
+
+- OpenAI Realtime API
+- Vapi voice agents
+- Deepgram STT/TTS
+- ElevenLabs voice synthesis
+- LiveKit real-time infrastructure
+- WebRTC audio handling
+- Voice agent design
+- Latency optimization
+
+## Requirements
+
+- Python or Node.js
+- API keys for providers
+- Audio handling knowledge
+
+## Patterns
+
+### OpenAI Realtime API
+
+Native voice-to-voice with GPT-4o
+
+**When to use**: When you want integrated voice AI without separate STT/TTS
+
+```python
+import asyncio
+import websockets
+import json
+import base64
+
+OPENAI_API_KEY = "sk-..."
+
+async def voice_session():
+ url = "wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview"
+ headers = {
+ "Authorization": f"Bearer {OPENAI_API_KEY}",
+ "OpenAI-Beta": "realtime=v1"
+ }
+
+ async with websockets.connect(url, extra_headers=headers) as ws:
+ # Configure session
+ await ws.send(json.dumps({
+ "type": "session.update",
+ "session": {
+ "modalities": ["text", "audio"],
+ "voice": "alloy", # alloy, echo, fable, onyx, nova, shimmer
+ "input_audio_format": "pcm16",
+ "output_audio_format": "pcm16",
+ "input_audio_transcription": {
+ "model": "whisper-1"
+ },
+ "turn_detection": {
+ "type": "server_vad", # Voice activity detection
+ "threshold": 0.5,
+ "prefix_padding_ms": 300,
+ "silence_duration_ms": 500
+ },
+ "tools": [
+ {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather for a location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string"}
+ }
+ }
+ }
+ ]
+ }
+ }))
+
+ # Send audio (PCM16, 24kHz, mono)
+ async def send_audio(audio_bytes):
+ await ws.send(json.dumps({
+ "type": "input_audio_buffer.append",
+ "audio": base64.b64encode(audio_bytes).decode()
+ }))
+
+ # Receive events
+ async for message in ws:
+ event = json.loads(message)
+
+ if event["type"] == "resp
+```
+
+### Vapi Voice Agent
+
+Build voice agents with Vapi platform
+
+**When to use**: Phone-based agents, quick deployment
+
+```python
+# Vapi provides hosted voice agents with webhooks
+
+from flask import Flask, request, jsonify
+import vapi
+
+app = Flask(__name__)
+client = vapi.Vapi(api_key="...")
+
+# Create an assistant
+assistant = client.assistants.create(
+ name="Support Agent",
+ model={
+ "provider": "openai",
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a helpful support agent..."
+ }
+ ]
+ },
+ voice={
+ "provider": "11labs",
+ "voiceId": "21m00Tcm4TlvDq8ikWAM" # Rachel
+ },
+ firstMessage="Hi! How can I help you today?",
+ transcriber={
+ "provider": "deepgram",
+ "model": "nova-2"
+ }
+)
+
+# Webhook for conversation events
+@app.route("/vapi/webhook", methods=["POST"])
+def vapi_webhook():
+ event = request.json
+
+ if event["type"] == "function-call":
+ # Handle tool call
+ name = event["functionCall"]["name"]
+ args = event["functionCall"]["parameters"]
+
+ if name == "check_order":
+ result = check_order(args["order_id"])
+ return jsonify({"result": result})
+
+ elif event["type"] == "end-of-call-report":
+ # Call ended - save transcript
+ transcript = event["transcript"]
+ save_transcript(event["call"]["id"], transcript)
+
+ return jsonify({"ok": True})
+
+# Start outbound call
+call = client.calls.create(
+ assistant_id=assistant.id,
+ customer={
+ "number": "+1234567890"
+ },
+ phoneNumber={
+ "twilioPhoneNumber": "+0987654321"
+ }
+)
+
+# Or create web call
+web_call = client.calls.create(
+ assistant_id=assistant.id,
+ type="web"
+)
+# Returns URL for WebRTC connection
+```
+
+### Deepgram STT + ElevenLabs TTS
+
+Best-in-class transcription and synthesis
+
+**When to use**: High quality voice, custom pipeline
+
+```python
+import asyncio
+from deepgram import DeepgramClient, LiveTranscriptionEvents
+from elevenlabs import ElevenLabs
+
+# Deepgram real-time transcription
+deepgram = DeepgramClient(api_key="...")
+
+async def transcribe_stream(audio_stream):
+ connection = deepgram.listen.live.v("1")
+
+ async def on_transcript(result):
+ transcript = result.channel.alternatives[0].transcript
+ if transcript:
+ print(f"Heard: {transcript}")
+ if result.is_final:
+ # Process final transcript
+ await handle_user_input(transcript)
+
+ connection.on(LiveTranscriptionEvents.Transcript, on_transcript)
+
+ await connection.start({
+ "model": "nova-2", # Best quality
+ "language": "en",
+ "smart_format": True,
+ "interim_results": True, # Get partial results
+ "utterance_end_ms": 1000,
+ "vad_events": True, # Voice activity detection
+ "encoding": "linear16",
+ "sample_rate": 16000
+ })
+
+ # Stream audio
+ async for chunk in audio_stream:
+ await connection.send(chunk)
+
+ await connection.finish()
+
+# ElevenLabs streaming synthesis
+eleven = ElevenLabs(api_key="...")
+
+def text_to_speech_stream(text: str):
+ """Stream TTS audio chunks."""
+ audio_stream = eleven.text_to_speech.convert_as_stream(
+ voice_id="21m00Tcm4TlvDq8ikWAM", # Rachel
+ model_id="eleven_turbo_v2_5", # Fastest
+ text=text,
+ output_format="pcm_24000" # Raw PCM for low latency
+ )
+
+ for chunk in audio_stream:
+ yield chunk
+
+# Or with WebSocket for lowest latency
+async def tts_websocket(text_stream):
+ async with eleven.text_to_speech.stream_async(
+ voice_id="21m00Tcm4TlvDq8ikWAM",
+ model_id="eleven_turbo_v2_5"
+ ) as tts:
+ async for text_chunk in text_stream:
+ audio = await tts.send(text_chunk)
+ yield audio
+
+ # Flush remaining audio
+ final_audio = await tts.flush()
+ yield final_audio
+```
+
+## Anti-Patterns
+
+### ❌ Non-streaming Pipeline
+
+**Why bad**: Adds seconds of latency.
+User perceives as slow.
+Loses conversation flow.
+
+**Instead**: Stream everything:
+- STT: interim results
+- LLM: token streaming
+- TTS: chunk streaming
+Start TTS before LLM finishes.
+
+### ❌ Ignoring Interruptions
+
+**Why bad**: Frustrating user experience.
+Feels like talking to a machine.
+Wastes time.
+
+**Instead**: Implement barge-in detection.
+Use VAD to detect user speech.
+Stop TTS immediately.
+Clear audio queue.
+
+### ❌ Single Provider Lock-in
+
+**Why bad**: May not be best quality.
+Single point of failure.
+Harder to optimize.
+
+**Instead**: Mix best providers:
+- Deepgram for STT (speed + accuracy)
+- ElevenLabs for TTS (voice quality)
+- OpenAI/Anthropic for LLM
+
+## Limitations
+
+- Latency varies by provider
+- Cost per minute adds up
+- Quality depends on network
+- Complex debugging
+
+## Related Skills
+
+Works well with: `langgraph`, `structured-output`, `langfuse`
diff --git a/skills/workflow-automation/SKILL.md b/skills/workflow-automation/SKILL.md
index 545f7441..f0ebe2d8 100644
--- a/skills/workflow-automation/SKILL.md
+++ b/skills/workflow-automation/SKILL.md
@@ -1,705 +1,68 @@
---
name: workflow-automation
-description: "Design and implement automated workflows combining visual logic with custom code. Create multi-step automations, integrate APIs, and build AI-native pipelines. Use when designing automation flows, integrating APIs, building event-driven systems, or creating LangChain-style AI workflows."
+description: "Workflow automation is the infrastructure that makes AI agents reliable. Without durable execution, a network hiccup during a 10-step payment flow means lost money and angry customers. With it, workflows resume exactly where they left off. This skill covers the platforms (n8n, Temporal, Inngest) and patterns (sequential, parallel, orchestrator-worker) that turn brittle scripts into production-grade automation. Key insight: The platforms make different tradeoffs. n8n optimizes for accessibility"
+source: vibeship-spawner-skills (Apache 2.0)
---
-# 🔄 Workflow Automation
+# Workflow Automation
-> Patterns for building robust automated workflows, inspired by [n8n](https://github.com/n8n-io/n8n) and modern automation platforms.
+You are a workflow automation architect who has seen both the promise and
+the pain of these platforms. You've migrated teams from brittle cron jobs
+to durable execution and watched their on-call burden drop by 80%.
-## When to Use This Skill
+Your core insight: Different platforms make different tradeoffs. n8n is
+accessible but sacrifices performance. Temporal is correct but complex.
+Inngest balances developer experience with reliability. There's no "best" -
+only "best for your situation."
-Use this skill when:
+You push for durable execution
-- Designing multi-step automation workflows
-- Integrating multiple APIs and services
-- Building event-driven systems
-- Creating AI-augmented pipelines
-- Handling errors in complex flows
+## Capabilities
----
+- workflow-automation
+- workflow-orchestration
+- durable-execution
+- event-driven-workflows
+- step-functions
+- job-queues
+- background-jobs
+- scheduled-tasks
-## 1. Workflow Design Principles
+## Patterns
-### 1.1 Core Concepts
+### Sequential Workflow Pattern
-```
-┌─────────────────────────────────────────────────────────────┐
-│ WORKFLOW │
-│ ┌────────┐ ┌────────┐ ┌────────┐ ┌────────┐ │
-│ │Trigger │───▶│ Node │───▶│ Node │───▶│ Action │ │
-│ └────────┘ └────────┘ └────────┘ └────────┘ │
-│ │ │ │ │ │
-│ ▼ ▼ ▼ ▼ │
-│ [Webhook] [Transform] [Condition] [Send Email] │
-└─────────────────────────────────────────────────────────────┘
-```
+Steps execute in order, each output becomes next input
-**Key Components**:
+### Parallel Workflow Pattern
-- **Trigger**: What starts the workflow
-- **Node**: Individual processing step
-- **Edge**: Connection between nodes
-- **Action**: External effect (API call, email, etc.)
+Independent steps run simultaneously, aggregate results
-### 1.2 Trigger Types
+### Orchestrator-Worker Pattern
-```javascript
-const TRIGGER_TYPES = {
- // Event-based
- webhook: {
- description: "HTTP request triggers workflow",
- use_case: "External integrations, form submissions",
- example: "POST /webhook/order-created",
- },
+Central coordinator dispatches work to specialized workers
- // Time-based
- cron: {
- description: "Scheduled execution",
- use_case: "Reports, cleanup, sync jobs",
- example: "0 9 * * *", // Daily at 9 AM
- },
+## Anti-Patterns
- // Change-based
- polling: {
- description: "Check for changes periodically",
- use_case: "Monitor RSS, check file changes",
- example: "Every 5 minutes check for new items",
- },
+### ❌ No Durable Execution for Payments
- // Message-based
- queue: {
- description: "Process from message queue",
- use_case: "Async processing, decoupling",
- example: "SQS, RabbitMQ, Redis Streams",
- },
+### ❌ Monolithic Workflows
- // Manual
- manual: {
- description: "User-initiated execution",
- use_case: "Testing, on-demand tasks",
- example: "Run workflow button",
- },
-};
-```
+### ❌ No Observability
-### 1.3 Node Types
+## ⚠️ Sharp Edges
-```javascript
-const NODE_TYPES = {
- // Data transformation
- transform: {
- description: "Modify data shape or values",
- operations: ["map", "filter", "merge", "split"],
- },
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | # ALWAYS use idempotency keys for external calls: |
+| Issue | high | # Break long workflows into checkpointed steps: |
+| Issue | high | # ALWAYS set timeouts on activities: |
+| Issue | critical | # WRONG - side effects in workflow code: |
+| Issue | medium | # ALWAYS use exponential backoff: |
+| Issue | high | # WRONG - large data in workflow: |
+| Issue | high | # Inngest onFailure handler: |
+| Issue | medium | # Every production n8n workflow needs: |
- // Flow control
- condition: {
- description: "Branch based on logic",
- operations: ["if/else", "switch", "filter"],
- },
+## Related Skills
- // External actions
- action: {
- description: "Interact with external services",
- operations: ["HTTP request", "database", "email", "API"],
- },
-
- // Sub-workflows
- subworkflow: {
- description: "Call another workflow",
- operations: ["invoke", "wait", "parallel"],
- },
-
- // Error handling
- errorHandler: {
- description: "Handle failures gracefully",
- operations: ["retry", "fallback", "notify"],
- },
-};
-```
-
----
-
-## 2. Common Workflow Patterns
-
-### 2.1 Sequential Pipeline
-
-```javascript
-// Simple A → B → C flow
-const sequentialWorkflow = {
- trigger: { type: "webhook", path: "/process" },
- nodes: [
- {
- id: "fetch",
- type: "http",
- config: {
- method: "GET",
- url: "{{trigger.data.api_url}}",
- },
- },
- {
- id: "transform",
- type: "code",
- config: {
- code: `
- return items.map(item => ({
- id: item.id,
- name: item.name.toUpperCase(),
- processed: true
- }));
- `,
- },
- },
- {
- id: "save",
- type: "database",
- config: {
- operation: "insert",
- table: "processed_items",
- data: "{{transform.output}}",
- },
- },
- ],
-};
-```
-
-### 2.2 Parallel Execution
-
-```javascript
-// Fan-out: Execute multiple nodes in parallel
-const parallelWorkflow = {
- trigger: { type: "cron", schedule: "0 * * * *" },
- nodes: [
- {
- id: "parallel_group",
- type: "parallel",
- nodes: [
- {
- id: "fetch_users",
- type: "http",
- config: { url: "/api/users" },
- },
- {
- id: "fetch_orders",
- type: "http",
- config: { url: "/api/orders" },
- },
- {
- id: "fetch_products",
- type: "http",
- config: { url: "/api/products" },
- },
- ],
- },
- {
- id: "merge",
- type: "merge",
- config: {
- method: "append", // or "combine", "zip"
- inputs: ["fetch_users", "fetch_orders", "fetch_products"],
- },
- },
- ],
-};
-```
-
-### 2.3 Conditional Branching
-
-```javascript
-const conditionalWorkflow = {
- trigger: { type: "webhook", path: "/order" },
- nodes: [
- {
- id: "check_value",
- type: "switch",
- config: {
- property: "{{trigger.data.total}}",
- rules: [
- { operator: "gte", value: 1000, output: "high_value" },
- { operator: "gte", value: 100, output: "medium_value" },
- { operator: "lt", value: 100, output: "low_value" },
- ],
- },
- },
- {
- id: "high_value",
- type: "action",
- onlyIf: "{{check_value.output}} === 'high_value'",
- config: {
- action: "notify_sales_team",
- },
- },
- {
- id: "medium_value",
- type: "action",
- onlyIf: "{{check_value.output}} === 'medium_value'",
- config: {
- action: "send_thank_you_email",
- },
- },
- {
- id: "low_value",
- type: "action",
- onlyIf: "{{check_value.output}} === 'low_value'",
- config: {
- action: "add_to_newsletter",
- },
- },
- ],
-};
-```
-
-### 2.4 Loop/Iterator Pattern
-
-```javascript
-const loopWorkflow = {
- trigger: { type: "manual" },
- nodes: [
- {
- id: "fetch_items",
- type: "http",
- config: { url: "/api/items" },
- },
- {
- id: "process_each",
- type: "loop",
- config: {
- items: "{{fetch_items.data}}",
- batchSize: 10, // Process 10 at a time
- continueOnError: true,
- },
- nodes: [
- {
- id: "enrich",
- type: "http",
- config: {
- url: "/api/enrich/{{item.id}}",
- },
- },
- {
- id: "save",
- type: "database",
- config: {
- operation: "update",
- id: "{{item.id}}",
- data: "{{enrich.output}}",
- },
- },
- ],
- },
- ],
-};
-```
-
-### 2.5 Wait/Delay Pattern
-
-```javascript
-const waitWorkflow = {
- trigger: { type: "webhook", path: "/signup" },
- nodes: [
- {
- id: "send_welcome",
- type: "email",
- config: {
- to: "{{trigger.data.email}}",
- template: "welcome",
- },
- },
- {
- id: "wait_24h",
- type: "wait",
- config: {
- duration: "24h",
- // Or: resumeAt: "{{trigger.data.preferred_time}}"
- },
- },
- {
- id: "send_onboarding",
- type: "email",
- config: {
- to: "{{trigger.data.email}}",
- template: "onboarding_tips",
- },
- },
- ],
-};
-```
-
----
-
-## 3. Error Handling Patterns
-
-### 3.1 Retry with Backoff
-
-```javascript
-const retryConfig = {
- retries: 3,
- backoff: "exponential", // linear, exponential, fixed
- initialDelay: 1000, // ms
- maxDelay: 30000, // ms
- retryOn: ["ECONNRESET", "ETIMEDOUT", "HTTP_5XX"],
-};
-
-const nodeWithRetry = {
- id: "api_call",
- type: "http",
- config: { url: "/api/external" },
- errorHandling: {
- retry: retryConfig,
- onMaxRetries: {
- action: "continue", // or "fail", "branch"
- fallbackValue: { data: [] },
- },
- },
-};
-```
-
-### 3.2 Dead Letter Queue
-
-```javascript
-const workflowWithDLQ = {
- config: {
- onError: {
- action: "send_to_dlq",
- queue: "failed_workflows",
- includeContext: true, // Include full workflow state
- },
- },
- nodes: [
- /* ... */
- ],
-};
-
-// Separate workflow to process failed items
-const dlqProcessor = {
- trigger: {
- type: "queue",
- queue: "failed_workflows",
- },
- nodes: [
- {
- id: "analyze",
- type: "code",
- config: {
- code: `
- const error = $input.error;
- const context = $input.context;
-
- // Classify error
- if (error.type === 'VALIDATION') {
- return { action: 'discard', reason: 'Bad data' };
- }
- if (error.type === 'RATE_LIMIT') {
- return { action: 'retry', delay: '1h' };
- }
- return { action: 'manual_review' };
- `,
- },
- },
- ],
-};
-```
-
-### 3.3 Compensation/Rollback
-
-```javascript
-const sagaWorkflow = {
- name: "order_saga",
- nodes: [
- {
- id: "reserve_inventory",
- type: "api",
- compensate: {
- id: "release_inventory",
- type: "api",
- config: { method: "POST", url: "/inventory/release" },
- },
- },
- {
- id: "charge_payment",
- type: "api",
- compensate: {
- id: "refund_payment",
- type: "api",
- config: { method: "POST", url: "/payments/refund" },
- },
- },
- {
- id: "create_shipment",
- type: "api",
- compensate: {
- id: "cancel_shipment",
- type: "api",
- config: { method: "POST", url: "/shipments/cancel" },
- },
- },
- ],
- onError: {
- strategy: "compensate_all", // Run all compensations in reverse order
- },
-};
-```
-
----
-
-## 4. Integration Patterns
-
-### 4.1 API Integration Template
-
-```javascript
-const apiIntegration = {
- name: "github_integration",
- baseUrl: "https://api.github.com",
- auth: {
- type: "bearer",
- token: "{{secrets.GITHUB_TOKEN}}",
- },
- operations: {
- listRepos: {
- method: "GET",
- path: "/user/repos",
- params: {
- per_page: 100,
- sort: "updated",
- },
- },
- createIssue: {
- method: "POST",
- path: "/repos/{{owner}}/{{repo}}/issues",
- body: {
- title: "{{title}}",
- body: "{{body}}",
- labels: "{{labels}}",
- },
- },
- },
- rateLimiting: {
- requests: 5000,
- period: "1h",
- strategy: "queue", // queue, reject, throttle
- },
-};
-```
-
-### 4.2 Webhook Handler
-
-```javascript
-const webhookHandler = {
- trigger: {
- type: "webhook",
- path: "/webhooks/stripe",
- method: "POST",
- authentication: {
- type: "signature",
- header: "stripe-signature",
- secret: "{{secrets.STRIPE_WEBHOOK_SECRET}}",
- algorithm: "sha256",
- },
- },
- nodes: [
- {
- id: "validate",
- type: "code",
- config: {
- code: `
- const event = $input.body;
- if (!['checkout.session.completed',
- 'payment_intent.succeeded'].includes(event.type)) {
- return { skip: true };
- }
- return event;
- `,
- },
- },
- {
- id: "route",
- type: "switch",
- config: {
- property: "{{validate.type}}",
- routes: {
- "checkout.session.completed": "handle_checkout",
- "payment_intent.succeeded": "handle_payment",
- },
- },
- },
- ],
-};
-```
-
----
-
-## 5. AI-Native Workflows
-
-### 5.1 LLM in Pipeline
-
-```javascript
-const aiWorkflow = {
- trigger: { type: "webhook", path: "/analyze" },
- nodes: [
- {
- id: "extract_text",
- type: "code",
- config: {
- code: "return { text: $input.document.content }",
- },
- },
- {
- id: "analyze_sentiment",
- type: "llm",
- config: {
- model: "gpt-4",
- prompt: `
- Analyze the sentiment of the following text.
- Return JSON: {"sentiment": "positive|negative|neutral", "confidence": 0-1}
-
- Text: {{extract_text.text}}
- `,
- responseFormat: "json",
- },
- },
- {
- id: "route_by_sentiment",
- type: "switch",
- config: {
- property: "{{analyze_sentiment.sentiment}}",
- routes: {
- negative: "escalate_to_support",
- positive: "send_thank_you",
- neutral: "archive",
- },
- },
- },
- ],
-};
-```
-
-### 5.2 Agent Workflow
-
-```javascript
-const agentWorkflow = {
- trigger: { type: "webhook", path: "/research" },
- nodes: [
- {
- id: "research_agent",
- type: "agent",
- config: {
- model: "gpt-4",
- tools: ["web_search", "calculator", "code_interpreter"],
- maxIterations: 10,
- prompt: `
- Research the following topic and provide a comprehensive summary:
- {{trigger.topic}}
-
- Use the tools available to gather accurate, up-to-date information.
- `,
- },
- },
- {
- id: "format_report",
- type: "llm",
- config: {
- model: "gpt-4",
- prompt: `
- Format this research into a professional report with sections:
- - Executive Summary
- - Key Findings
- - Recommendations
-
- Research: {{research_agent.output}}
- `,
- },
- },
- {
- id: "send_report",
- type: "email",
- config: {
- to: "{{trigger.email}}",
- subject: "Research Report: {{trigger.topic}}",
- body: "{{format_report.output}}",
- },
- },
- ],
-};
-```
-
----
-
-## 6. Workflow Best Practices
-
-### 6.1 Design Checklist
-
-- [ ] **Idempotency**: Can workflow run multiple times safely?
-- [ ] **Error handling**: What happens when nodes fail?
-- [ ] **Timeouts**: Are there appropriate timeouts?
-- [ ] **Logging**: Is there enough observability?
-- [ ] **Rate limits**: Are external APIs rate-limited?
-- [ ] **Secrets**: Are credentials stored securely?
-- [ ] **Testing**: Can workflow be tested in isolation?
-
-### 6.2 Naming Conventions
-
-```javascript
-// Workflows: verb_noun or noun_verb
-"sync_customers";
-"process_orders";
-"daily_report_generator";
-
-// Nodes: action_target
-"fetch_user_data";
-"transform_to_csv";
-"send_notification_email";
-
-// Variables: lowercase_snake_case
-"order_total";
-"customer_email";
-"processing_date";
-```
-
-### 6.3 Testing Workflows
-
-```javascript
-const workflowTest = {
- name: "order_processing_test",
- workflow: "process_order",
- testCases: [
- {
- name: "valid_order",
- input: {
- order_id: "test-123",
- items: [{ sku: "A1", qty: 2 }],
- },
- expectedOutput: {
- status: "processed",
- },
- mocks: {
- inventory_check: { available: true },
- payment_process: { success: true },
- },
- },
- {
- name: "out_of_stock",
- input: {
- order_id: "test-456",
- items: [{ sku: "B2", qty: 100 }],
- },
- expectedOutput: {
- status: "failed",
- reason: "insufficient_inventory",
- },
- mocks: {
- inventory_check: { available: false },
- },
- },
- ],
-};
-```
-
----
-
-## Resource Links
-
-- [n8n Documentation](https://docs.n8n.io/)
-- [Temporal Workflows](https://temporal.io/)
-- [Apache Airflow](https://airflow.apache.org/)
-- [Zapier Automation Patterns](https://zapier.com/blog/automation-patterns/)
+Works well with: `multi-agent-orchestration`, `agent-tool-builder`, `backend`, `devops`
diff --git a/skills/zapier-make-patterns/SKILL.md b/skills/zapier-make-patterns/SKILL.md
new file mode 100644
index 00000000..e637974b
--- /dev/null
+++ b/skills/zapier-make-patterns/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: zapier-make-patterns
+description: "No-code automation democratizes workflow building. Zapier and Make (formerly Integromat) let non-developers automate business processes without writing code. But no-code doesn't mean no-complexity - these platforms have their own patterns, pitfalls, and breaking points. This skill covers when to use which platform, how to build reliable automations, and when to graduate to code-based solutions. Key insight: Zapier optimizes for simplicity and integrations (7000+ apps), Make optimizes for power "
+source: vibeship-spawner-skills (Apache 2.0)
+---
+
+# Zapier & Make Patterns
+
+You are a no-code automation architect who has built thousands of Zaps and
+Scenarios for businesses of all sizes. You've seen automations that save
+companies 40% of their time, and you've debugged disasters where bad data
+flowed through 12 connected apps.
+
+Your core insight: No-code is powerful but not unlimited. You know exactly
+when a workflow belongs in Zapier (simple, fast, maximum integrations),
+when it belongs in Make (complex branching, data transformation, budget),
+and when it needs to g
+
+## Capabilities
+
+- zapier
+- make
+- integromat
+- no-code-automation
+- zaps
+- scenarios
+- workflow-builders
+- business-process-automation
+
+## Patterns
+
+### Basic Trigger-Action Pattern
+
+Single trigger leads to one or more actions
+
+### Multi-Step Sequential Pattern
+
+Chain of actions executed in order
+
+### Conditional Branching Pattern
+
+Different actions based on conditions
+
+## Anti-Patterns
+
+### ❌ Text in Dropdown Fields
+
+### ❌ No Error Handling
+
+### ❌ Hardcoded Values
+
+## ⚠️ Sharp Edges
+
+| Issue | Severity | Solution |
+|-------|----------|----------|
+| Issue | critical | # ALWAYS use dropdowns to select, don't type |
+| Issue | critical | # Prevention: |
+| Issue | high | # Understand the math: |
+| Issue | high | # When a Zap breaks after app update: |
+| Issue | high | # Immediate fix: |
+| Issue | medium | # Handle duplicates: |
+| Issue | medium | # Understand operation counting: |
+| Issue | medium | # Best practices: |
+
+## Related Skills
+
+Works well with: `workflow-automation`, `agent-tool-builder`, `backend`, `api-designer`