From 1966f6a8a218850b0b1f84c88481ad066be93c09 Mon Sep 17 00:00:00 2001
From: sickn33
Date: Tue, 7 Apr 2026 18:25:18 +0200
Subject: [PATCH] fix(skills): Restore vibeship imports
Rebuild the affected vibeship-derived skills from the pinned upstream
snapshot instead of leaving the truncated imported bodies on main.
Refresh the derived catalog and plugin mirrors so the canonical skills,
compatibility data, and generated artifacts stay in sync.
Refs #473
---
CATALOG.md | 120 +-
CHANGELOG.md | 40 +
data/bundles.json | 36 +-
data/catalog.json | 988 ++++-----
.../skills/3d-web-experience/SKILL.md | 189 +-
.../skills/agent-evaluation/SKILL.md | 1114 +++++++++-
.../skills/agent-memory-systems/SKILL.md | 1062 ++++++++-
.../skills/agent-tool-builder/SKILL.md | 684 +++++-
.../skills/ai-agents-architect/SKILL.md | 302 ++-
.../skills/ai-product/SKILL.md | 742 ++++++-
.../skills/ai-wrapper-product/SKILL.md | 494 ++++-
.../skills/algolia-search/SKILL.md | 882 +++++++-
.../skills/autonomous-agents/SKILL.md | 1059 ++++++++-
.../skills/aws-serverless/SKILL.md | 1081 +++++++++-
.../skills/azure-functions/SKILL.md | 1337 +++++++++++-
.../skills/browser-automation/SKILL.md | 1093 +++++++++-
.../skills/browser-extension-builder/SKILL.md | 191 +-
.../skills/bullmq-specialist/SKILL.md | 363 +++-
.../skills/clerk-auth/SKILL.md | 811 ++++++-
.../skills/computer-use-agents/SKILL.md | 1898 ++++++++++++++++-
.../skills/context-window-management/SKILL.md | 289 ++-
.../skills/conversation-memory/SKILL.md | 477 ++++-
.../skills/crewai/SKILL.md | 304 ++-
.../skills/discord-bot-architect/SKILL.md | 1241 ++++++++++-
.../skills/email-systems/SKILL.md | 672 +++++-
.../skills/file-uploads/SKILL.md | 223 +-
.../skills/firebase/SKILL.md | 662 +++++-
.../skills/gcp-cloud-run/SKILL.md | 1149 +++++++++-
.../skills/graphql/SKILL.md | 1053 ++++++++-
.../skills/hubspot-integration/SKILL.md | 823 ++++++-
.../skills/inngest/SKILL.md | 447 +++-
.../skills/interactive-portfolio/SKILL.md | 386 +++-
.../skills/langfuse/SKILL.md | 343 ++-
.../skills/langgraph/SKILL.md | 317 ++-
.../skills/micro-saas-launcher/SKILL.md | 387 +++-
.../skills/neon-postgres/SKILL.md | 587 ++++-
.../skills/nextjs-supabase-auth/SKILL.md | 290 ++-
.../skills/notion-template-business/SKILL.md | 415 +++-
.../skills/personal-tool-builder/SKILL.md | 581 ++++-
.../skills/plaid-fintech/SKILL.md | 844 +++++++-
.../skills/prompt-caching/SKILL.md | 463 +++-
.../skills/rag-engineer/SKILL.md | 300 ++-
.../skills/salesforce-development/SKILL.md | 931 +++++++-
.../skills/scroll-experience/SKILL.md | 425 +++-
.../skills/segment-cdp/SKILL.md | 836 +++++++-
.../skills/shopify-apps/SKILL.md | 1494 ++++++++++++-
.../skills/slack-bot-builder/SKILL.md | 1190 ++++++++++-
.../skills/telegram-bot-builder/SKILL.md | 189 +-
.../skills/telegram-mini-app/SKILL.md | 489 ++++-
.../skills/trigger-dev/SKILL.md | 943 +++++++-
.../skills/twilio-communications/SKILL.md | 1338 +++++++++++-
.../skills/upstash-qstash/SKILL.md | 925 +++++++-
.../skills/vercel-deployment/SKILL.md | 676 +++++-
.../skills/viral-generator-builder/SKILL.md | 169 +-
.../skills/voice-agents/SKILL.md | 964 ++++++++-
.../skills/voice-ai-development/SKILL.md | 427 +++-
.../skills/workflow-automation/SKILL.md | 1011 ++++++++-
.../skills/zapier-make-patterns/SKILL.md | 799 ++++++-
.../skills/3d-web-experience/SKILL.md | 189 +-
.../skills/agent-evaluation/SKILL.md | 1114 +++++++++-
.../skills/agent-memory-systems/SKILL.md | 1062 ++++++++-
.../skills/agent-tool-builder/SKILL.md | 684 +++++-
.../skills/ai-agents-architect/SKILL.md | 302 ++-
.../skills/ai-product/SKILL.md | 742 ++++++-
.../skills/ai-wrapper-product/SKILL.md | 494 ++++-
.../skills/algolia-search/SKILL.md | 882 +++++++-
.../skills/autonomous-agents/SKILL.md | 1059 ++++++++-
.../skills/aws-serverless/SKILL.md | 1081 +++++++++-
.../skills/azure-functions/SKILL.md | 1337 +++++++++++-
.../skills/browser-automation/SKILL.md | 1093 +++++++++-
.../skills/browser-extension-builder/SKILL.md | 191 +-
.../skills/bullmq-specialist/SKILL.md | 363 +++-
.../skills/clerk-auth/SKILL.md | 811 ++++++-
.../skills/computer-use-agents/SKILL.md | 1898 ++++++++++++++++-
.../skills/context-window-management/SKILL.md | 289 ++-
.../skills/conversation-memory/SKILL.md | 477 ++++-
.../skills/crewai/SKILL.md | 304 ++-
.../skills/discord-bot-architect/SKILL.md | 1241 ++++++++++-
.../skills/email-systems/SKILL.md | 672 +++++-
.../skills/file-uploads/SKILL.md | 223 +-
.../skills/firebase/SKILL.md | 662 +++++-
.../skills/gcp-cloud-run/SKILL.md | 1149 +++++++++-
.../skills/graphql/SKILL.md | 1053 ++++++++-
.../skills/hubspot-integration/SKILL.md | 823 ++++++-
.../skills/inngest/SKILL.md | 447 +++-
.../skills/interactive-portfolio/SKILL.md | 386 +++-
.../skills/langfuse/SKILL.md | 343 ++-
.../skills/langgraph/SKILL.md | 317 ++-
.../skills/micro-saas-launcher/SKILL.md | 387 +++-
.../skills/neon-postgres/SKILL.md | 587 ++++-
.../skills/nextjs-supabase-auth/SKILL.md | 290 ++-
.../skills/notion-template-business/SKILL.md | 415 +++-
.../skills/personal-tool-builder/SKILL.md | 581 ++++-
.../skills/plaid-fintech/SKILL.md | 844 +++++++-
.../skills/prompt-caching/SKILL.md | 463 +++-
.../skills/rag-engineer/SKILL.md | 300 ++-
.../skills/salesforce-development/SKILL.md | 931 +++++++-
.../skills/scroll-experience/SKILL.md | 425 +++-
.../skills/segment-cdp/SKILL.md | 836 +++++++-
.../skills/shopify-apps/SKILL.md | 1494 ++++++++++++-
.../skills/slack-bot-builder/SKILL.md | 1190 ++++++++++-
.../skills/telegram-bot-builder/SKILL.md | 189 +-
.../skills/telegram-mini-app/SKILL.md | 489 ++++-
.../skills/trigger-dev/SKILL.md | 943 +++++++-
.../skills/twilio-communications/SKILL.md | 1338 +++++++++++-
.../skills/upstash-qstash/SKILL.md | 925 +++++++-
.../skills/vercel-deployment/SKILL.md | 676 +++++-
.../skills/viral-generator-builder/SKILL.md | 169 +-
.../skills/voice-agents/SKILL.md | 964 ++++++++-
.../skills/voice-ai-development/SKILL.md | 427 +++-
.../skills/workflow-automation/SKILL.md | 1011 ++++++++-
.../skills/zapier-make-patterns/SKILL.md | 799 ++++++-
.../skills/agent-evaluation/SKILL.md | 1114 +++++++++-
.../skills/ai-agents-architect/SKILL.md | 302 ++-
.../skills/langgraph/SKILL.md | 317 ++-
.../skills/rag-engineer/SKILL.md | 300 ++-
.../skills/workflow-automation/SKILL.md | 1011 ++++++++-
.../skills/azure-functions/SKILL.md | 1337 +++++++++++-
.../skills/algolia-search/SKILL.md | 882 +++++++-
.../skills/hubspot-integration/SKILL.md | 823 ++++++-
.../skills/plaid-fintech/SKILL.md | 844 +++++++-
.../skills/interactive-portfolio/SKILL.md | 386 +++-
.../skills/aws-serverless/SKILL.md | 1081 +++++++++-
.../skills/algolia-search/SKILL.md | 882 +++++++-
.../skills/hubspot-integration/SKILL.md | 823 ++++++-
.../skills/plaid-fintech/SKILL.md | 844 +++++++-
.../skills/twilio-communications/SKILL.md | 1338 +++++++++++-
.../skills/context-window-management/SKILL.md | 289 ++-
.../skills/langfuse/SKILL.md | 343 ++-
.../skills/prompt-caching/SKILL.md | 463 +++-
.../skills/browser-automation/SKILL.md | 1093 +++++++++-
.../skills/3d-web-experience/SKILL.md | 189 +-
.../skills/scroll-experience/SKILL.md | 425 +++-
skills/3d-web-experience/SKILL.md | 189 +-
skills/agent-evaluation/SKILL.md | 1114 +++++++++-
skills/agent-memory-systems/SKILL.md | 1062 ++++++++-
skills/agent-tool-builder/SKILL.md | 684 +++++-
skills/ai-agents-architect/SKILL.md | 302 ++-
skills/ai-product/SKILL.md | 742 ++++++-
skills/ai-wrapper-product/SKILL.md | 494 ++++-
skills/algolia-search/SKILL.md | 882 +++++++-
skills/autonomous-agents/SKILL.md | 1059 ++++++++-
skills/aws-serverless/SKILL.md | 1081 +++++++++-
skills/azure-functions/SKILL.md | 1337 +++++++++++-
skills/browser-automation/SKILL.md | 1093 +++++++++-
skills/browser-extension-builder/SKILL.md | 191 +-
skills/bullmq-specialist/SKILL.md | 363 +++-
skills/clerk-auth/SKILL.md | 811 ++++++-
skills/computer-use-agents/SKILL.md | 1898 ++++++++++++++++-
skills/context-window-management/SKILL.md | 289 ++-
skills/conversation-memory/SKILL.md | 477 ++++-
skills/crewai/SKILL.md | 304 ++-
skills/discord-bot-architect/SKILL.md | 1241 ++++++++++-
skills/email-systems/SKILL.md | 672 +++++-
skills/file-uploads/SKILL.md | 223 +-
skills/firebase/SKILL.md | 662 +++++-
skills/gcp-cloud-run/SKILL.md | 1149 +++++++++-
skills/graphql/SKILL.md | 1053 ++++++++-
skills/hubspot-integration/SKILL.md | 823 ++++++-
skills/inngest/SKILL.md | 447 +++-
skills/interactive-portfolio/SKILL.md | 386 +++-
skills/langfuse/SKILL.md | 343 ++-
skills/langgraph/SKILL.md | 317 ++-
skills/micro-saas-launcher/SKILL.md | 387 +++-
skills/neon-postgres/SKILL.md | 587 ++++-
skills/nextjs-supabase-auth/SKILL.md | 290 ++-
skills/notion-template-business/SKILL.md | 415 +++-
skills/personal-tool-builder/SKILL.md | 581 ++++-
skills/plaid-fintech/SKILL.md | 844 +++++++-
skills/prompt-caching/SKILL.md | 463 +++-
skills/rag-engineer/SKILL.md | 300 ++-
skills/salesforce-development/SKILL.md | 931 +++++++-
skills/scroll-experience/SKILL.md | 425 +++-
skills/segment-cdp/SKILL.md | 836 +++++++-
skills/shopify-apps/SKILL.md | 1494 ++++++++++++-
skills/slack-bot-builder/SKILL.md | 1190 ++++++++++-
skills/telegram-bot-builder/SKILL.md | 189 +-
skills/telegram-mini-app/SKILL.md | 489 ++++-
skills/trigger-dev/SKILL.md | 943 +++++++-
skills/twilio-communications/SKILL.md | 1338 +++++++++++-
skills/upstash-qstash/SKILL.md | 925 +++++++-
skills/vercel-deployment/SKILL.md | 676 +++++-
skills/viral-generator-builder/SKILL.md | 169 +-
skills/voice-agents/SKILL.md | 964 ++++++++-
skills/voice-ai-development/SKILL.md | 427 +++-
skills/workflow-automation/SKILL.md | 1011 ++++++++-
skills/zapier-make-patterns/SKILL.md | 799 ++++++-
skills_index.json | 120 +-
tools/scripts/restore_vibeship_skills.js | 539 +++++
189 files changed, 126068 insertions(+), 5944 deletions(-)
create mode 100644 tools/scripts/restore_vibeship_skills.js
diff --git a/CATALOG.md b/CATALOG.md
index 88c4f586..6a61bb89 100644
--- a/CATALOG.md
+++ b/CATALOG.md
@@ -4,7 +4,7 @@ Generated at: 2026-02-08T00:00:00.000Z
Total skills: 1377
-## architecture (88)
+## architecture (91)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
@@ -23,6 +23,7 @@ Total skills: 1377
| `bash-scripting` | Bash scripting workflow for creating production-ready shell scripts with defensive patterns, error handling, and testing. | bash, scripting | bash, scripting, creating, shell, scripts, defensive, error, handling, testing |
| `binary-analysis-patterns` | Comprehensive patterns and techniques for analyzing compiled binaries, understanding assembly code, and reconstructing program logic. | binary | binary, analysis, techniques, analyzing, compiled, binaries, understanding, assembly, code, reconstructing, program, logic |
| `brainstorming` | Use before creative or constructive work (features, architecture, behavior). Transforms vague ideas into validated designs through disciplined reasoning and ... | brainstorming | brainstorming, before, creative, constructive, work, features, architecture, behavior, transforms, vague, ideas, validated |
+| `browser-extension-builder` | Expert in building browser extensions that solve real problems - Chrome, Firefox, and cross-browser extensions. Covers extension architecture, manifest v3, c... | browser, extension, builder | browser, extension, builder, building, extensions, solve, real, problems, chrome, firefox, cross, covers |
| `building-native-ui` | Complete guide for building beautiful apps with Expo Router. Covers fundamentals, styling, components, navigation, animations, patterns, and native tabs. | building, native, ui | building, native, ui, complete, beautiful, apps, expo, router, covers, fundamentals, styling, components |
| `c4-architecture-c4-architecture` | Generate comprehensive C4 architecture documentation for an existing repository/codebase using a bottom-up analysis approach. | c4, architecture | c4, architecture, generate, documentation, existing, repository, codebase, bottom, up, analysis, approach |
| `c4-code` | Expert C4 Code-level documentation specialist. Analyzes code directories to create comprehensive C4 code-level documentation including function signatures, a... | c4, code | c4, code, level, documentation, analyzes, directories, including, function, signatures, arguments, dependencies, structure |
@@ -55,6 +56,7 @@ Total skills: 1377
| `godot-gdscript-patterns` | Master Godot 4 GDScript patterns including signals, scenes, state machines, and optimization. Use when building Godot games, implementing game systems, or le... | godot, gdscript | godot, gdscript, including, signals, scenes, state, machines, optimization, building, games, implementing, game |
| `hig-patterns` | Apple Human Interface Guidelines interaction and UX patterns. | hig | hig, apple, human, interface, guidelines, interaction, ux |
| `i18n-localization` | Internationalization and localization patterns. Detecting hardcoded strings, managing translations, locale files, RTL support. | i18n, localization | i18n, localization, internationalization, detecting, hardcoded, strings, managing, translations, locale, files, rtl |
+| `inngest` | Inngest expert for serverless-first background jobs, event-driven workflows, and durable execution without managing queues or workers. | inngest | inngest, serverless, first, background, jobs, event, driven, durable, execution, without, managing, queues |
| `kotlin-coroutines-expert` | Expert patterns for Kotlin Coroutines and Flow, covering structured concurrency, error handling, and testing. | kotlin, coroutines | kotlin, coroutines, flow, covering, structured, concurrency, error, handling, testing |
| `kpi-dashboard-design` | Comprehensive patterns for designing effective Key Performance Indicator (KPI) dashboards that drive business decisions. | kpi, dashboard | kpi, dashboard, designing, effective, key, performance, indicator, dashboards, drive, business, decisions |
| `makepad-event-action` | CRITICAL: Use for Makepad event and action handling. Triggers on: makepad event, makepad action, Event enum, ActionTrait, handle_event, MouseDown, KeyDown, T... | makepad, event, action | makepad, event, action, critical, handling, triggers, enum, actiontrait, handle, mousedown, keydown, touchupdate |
@@ -78,10 +80,10 @@ Total skills: 1377
| `robius-event-action` | CRITICAL: Use for Robius event and action patterns. Triggers on: custom action, MatchEvent, post_action, cx.widget_action, handle_actions, DefaultNone, widge... | robius, event, action | robius, event, action, critical, triggers, custom, matchevent, post, cx, widget, handle, actions |
| `robius-widget-patterns` | CRITICAL: Use for Robius widget patterns. Triggers on: apply_over, TextOrImage, modal, 可复用, 模态, collapsible, drag drop, reusable widget, widget design, pagef... | robius, widget | robius, widget, critical, triggers, apply, textorimage, modal, collapsible, drag, drop, reusable, pageflip |
| `saga-orchestration` | Patterns for managing distributed transactions and long-running business processes. | saga | saga, orchestration, managing, distributed, transactions, long, running, business, processes |
+| `salesforce-development` | Expert patterns for Salesforce platform development including Lightning Web Components (LWC), Apex triggers and classes, REST/Bulk APIs, Connected Apps, and ... | salesforce | salesforce, development, platform, including, lightning, web, components, lwc, apex, triggers, classes, rest |
| `seo-plan` | Strategic SEO planning for new or existing websites. Industry-specific templates, competitive analysis, content strategy, and implementation roadmap. Use whe... | seo, plan | seo, plan, strategic, planning, new, existing, websites, industry, specific, competitive, analysis, content |
| `shadcn` | Manages shadcn/ui components and projects, providing context, documentation, and usage patterns for building modern design systems. | shadcn | shadcn, manages, ui, components, providing, context, documentation, usage, building |
| `site-architecture` | Plan or restructure website hierarchy, navigation, URL patterns, breadcrumbs, and internal linking. Use when mapping pages, sections, and site structure, but... | site, architecture | site, architecture, plan, restructure, website, hierarchy, navigation, url, breadcrumbs, internal, linking, mapping |
-| `slack-bot-builder` | The Bolt framework is Slack's recommended approach for building apps. It handles authentication, event routing, request verification, and HTTP request proces... | slack, bot, builder | slack, bot, builder, bolt, framework, recommended, approach, building, apps, authentication, event, routing |
| `software-architecture` | Guide for quality focused software architecture. This skill should be used when users want to write code, design architecture, analyze code, in any case that... | software, architecture | software, architecture, quality, skill, should, used, users, want, write, code, analyze, any |
| `swiftui-ui-patterns` | Apply proven SwiftUI UI patterns for navigation, sheets, async state, and reusable screens. | swiftui, ui | swiftui, ui, apply, proven, navigation, sheets, async, state, reusable, screens |
| `tailwind-design-system` | Build production-ready design systems with Tailwind CSS, including design tokens, component variants, responsive patterns, and accessibility. | tailwind | tailwind, css, including, tokens, component, variants, responsive, accessibility |
@@ -96,8 +98,9 @@ Total skills: 1377
| `wordpress-theme-development` | WordPress theme development workflow covering theme architecture, template hierarchy, custom post types, block editor support, responsive design, and WordPre... | wordpress, theme | wordpress, theme, development, covering, architecture, hierarchy, custom, post, types, block, editor, responsive |
| `workflow-orchestration-patterns` | Master workflow orchestration architecture with Temporal, covering fundamental design decisions, resilience patterns, and best practices for building reliabl... | | orchestration, architecture, temporal, covering, fundamental, decisions, resilience, building, reliable, distributed |
| `workflow-patterns` | Use this skill when implementing tasks according to Conductor's TDD workflow, handling phase checkpoints, managing git commits for tasks, or understanding th... | | skill, implementing, tasks, according, conductor, tdd, handling, phase, checkpoints, managing, git, commits |
+| `zapier-make-patterns` | No-code automation democratizes workflow building. Zapier and Make (formerly Integromat) let non-developers automate business processes without writing code.... | zapier, make | zapier, make, no, code, automation, democratizes, building, formerly, integromat, let, non, developers |
-## business (75)
+## business (76)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
@@ -118,6 +121,7 @@ Total skills: 1377
| `customer-psychographic-profiler` | One sentence - what this skill does and when to invoke it | customer, psychographic, profiler | customer, psychographic, profiler, one, sentence, what, skill, does, invoke |
| `defi-protocol-templates` | Implement DeFi protocols with production-ready templates for staking, AMMs, governance, and lending systems. Use when building decentralized finance applicat... | defi, protocol | defi, protocol, protocols, staking, amms, governance, lending, building, decentralized, finance, applications, smart |
| `email-sequence` | You are an expert in email marketing and automation. Your goal is to create email sequences that nurture relationships, drive action, and move people toward ... | email, sequence | email, sequence, marketing, automation, goal, sequences, nurture, relationships, drive, action, move, people |
+| `email-systems` | Email has the highest ROI of any marketing channel. $36 for every $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no personalization, ... | email | email, highest, roi, any, marketing, channel, 36, every, spent, yet, most, startups |
| `framework-migration-legacy-modernize` | Orchestrate a comprehensive legacy system modernization using the strangler fig pattern, enabling gradual replacement of outdated components while maintainin... | framework, migration, legacy, modernize | framework, migration, legacy, modernize, orchestrate, modernization, strangler, fig, enabling, gradual, replacement, outdated |
| `free-tool-strategy` | You are an expert in engineering-as-marketing strategy. Your goal is to help plan and evaluate free tools that generate leads, attract organic traffic, and b... | free | free, engineering, marketing, goal, plan, evaluate, generate, leads, attract, organic, traffic, brand |
| `growth-engine` | Motor de crescimento para produtos digitais -- growth hacking, SEO, ASO, viral loops, email marketing, CRM, referral programs e aquisicao organica. | growth, seo, marketing, viral, acquisition | growth, seo, marketing, viral, acquisition, engine, motor, de, crescimento, para, produtos, digitais |
@@ -130,11 +134,10 @@ Total skills: 1377
| `market-sizing-analysis` | Comprehensive market sizing methodologies for calculating Total Addressable Market (TAM), Serviceable Available Market (SAM), and Serviceable Obtainable Mark... | market, sizing | market, sizing, analysis, methodologies, calculating, total, addressable, tam, serviceable, available, sam, obtainable |
| `marketing-ideas` | Provide proven marketing strategies and growth ideas for SaaS and software products, prioritized using a marketing feasibility scoring system. | marketing, ideas | marketing, ideas, provide, proven, growth, saas, software, products, prioritized, feasibility, scoring |
| `marketing-psychology` | Apply behavioral science and mental models to marketing decisions, prioritized using a psychological leverage and feasibility scoring system. | marketing, psychology | marketing, psychology, apply, behavioral, science, mental, models, decisions, prioritized, psychological, leverage, feasibility |
-| `notion-template-business` | You know templates are real businesses that can generate serious income. You've seen creators make six figures selling Notion templates. You understand it's ... | notion, business | notion, business, know, real, businesses, generate, serious, income, ve, seen, creators, six |
+| `notion-template-business` | Expert in building and selling Notion templates as a business - not just making templates, but building a sustainable digital product business. Covers templa... | notion, business | notion, business, building, selling, just, making, sustainable, digital, product, covers, pricing, marketplaces |
| `odoo-ecommerce-configurator` | Expert guide for Odoo eCommerce and Website: product catalog, payment providers, shipping methods, SEO, and order-to-fulfillment workflow. | odoo, ecommerce, configurator | odoo, ecommerce, configurator, website, product, catalog, payment, providers, shipping, methods, seo, order |
| `odoo-hr-payroll-setup` | Expert guide for Odoo HR and Payroll: salary structures, payslip rules, leave policies, employee contracts, and payroll journal entries. | odoo, hr, payroll, setup | odoo, hr, payroll, setup, salary, structures, payslip, rules, leave, policies, employee, contracts |
| `paid-ads` | You are an expert performance marketer with direct access to ad platform accounts. Your goal is to help create, optimize, and scale paid advertising campaign... | paid, ads | paid, ads, performance, marketer, direct, access, ad, platform, accounts, goal, optimize, scale |
-| `personal-tool-builder` | You believe the best tools come from real problems. You've built dozens of personal tools - some stayed personal, others became products used by thousands. Y... | personal, builder | personal, builder, believe, come, real, problems, ve, built, dozens, some, stayed, others |
| `pricing-strategy` | Design pricing, packaging, and monetization strategies based on value, customer willingness to pay, and growth objectives. | pricing | pricing, packaging, monetization, value, customer, willingness, pay, growth, objectives |
| `product-design` | Design de produto nivel Apple — sistemas visuais, UX flows, acessibilidade, linguagem visual proprietaria, design tokens, prototipagem e handoff. Cobre Figma... | design, ux, design-systems, accessibility, figma | design, ux, design-systems, accessibility, figma, product, de, produto, nivel, apple, sistemas, visuais |
| `product-inventor` | Product Inventor e Design Alchemist de nivel maximo — combina Product Thinking, Design Systems, UI Engineering, Psicologia Cognitiva, Storytelling e execucao... | product-thinking, innovation, ux-design, storytelling | product-thinking, innovation, ux-design, storytelling, product, inventor, alchemist, de, nivel, maximo, combina, thinking |
@@ -144,6 +147,7 @@ Total skills: 1377
| `sales-automator` | Draft cold emails, follow-ups, and proposal templates. Creates pricing pages, case studies, and sales scripts. Use PROACTIVELY for sales outreach or lead nur... | sales, automator | sales, automator, draft, cold, emails, follow, ups, proposal, creates, pricing, pages, case |
| `sales-enablement` | Create sales collateral such as decks, one-pagers, objection docs, demo scripts, playbooks, and proposal templates. Use when a sales team needs assets that h... | sales, enablement | sales, enablement, collateral, such, decks, one, pagers, objection, docs, demo, scripts, playbooks |
| `screenshots` | Generate marketing screenshots of your app using Playwright. Use when the user wants to create screenshots for Product Hunt, social media, landing pages, or ... | screenshots | screenshots, generate, marketing, app, playwright, user, wants, product, hunt, social, media, landing |
+| `scroll-experience` | Expert in building immersive scroll-driven experiences - parallax storytelling, scroll animations, interactive narratives, and cinematic web experiences. Lik... | scroll, experience | scroll, experience, building, immersive, driven, experiences, parallax, storytelling, animations, interactive, narratives, cinematic |
| `seo-aeo-blog-writer` | Writes long-form blog posts with TL;DR block, definition sentence, comparison table, and 5-question FAQ for SEO ranking and AEO citation. Activate when the u... | seo, aeo, blog, writer | seo, aeo, blog, writer, writes, long, form, posts, tl, dr, block, definition |
| `seo-aeo-content-cluster` | Builds a topical authority map with a pillar page, prioritised cluster articles, content types, internal link map, and content gap analysis. Activate when th... | seo, aeo, content, cluster | seo, aeo, content, cluster, topical, authority, map, pillar, page, prioritised, articles, types |
| `seo-aeo-internal-linking` | Maps internal link opportunities between pages with anchor text, placement instructions, orphan page detection, and cannibalization checks. Activate when the... | seo, aeo, internal, linking | seo, aeo, internal, linking, maps, link, opportunities, between, pages, anchor, text, placement |
@@ -177,29 +181,29 @@ Total skills: 1377
| `warren-buffett` | Agente que simula Warren Buffett — o maior investidor do seculo XX e XXI, CEO da Berkshire Hathaway, discipulo de Benjamin Graham e socio intelectual de Char... | persona, investing, value-investing, business | persona, investing, value-investing, business, warren, buffett, agente, que, simula, maior, investidor, do |
| `whatsapp-automation` | Automate WhatsApp Business tasks via Rube MCP (Composio): send messages, manage templates, upload media, and handle contacts. Always search tools first for c... | whatsapp | whatsapp, automation, automate, business, tasks, via, rube, mcp, composio, send, messages, upload |
-## data-ai (257)
+## data-ai (260)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
| `adhx` | Fetch any X/Twitter post as clean LLM-friendly JSON. Converts x.com, twitter.com, or adhx.com links into structured data with full article content, author in... | adhx | adhx, fetch, any, twitter, post, clean, llm, friendly, json, converts, com, links |
| `advanced-evaluation` | This skill should be used when the user asks to "implement LLM-as-judge", "compare model outputs", "create evaluation rubrics", "mitigate evaluation bias", o... | advanced, evaluation | advanced, evaluation, skill, should, used, user, asks, llm, judge, compare, model, outputs |
-| `agent-evaluation` | You're a quality engineer who has seen agents that aced benchmarks fail spectacularly in production. You've learned that evaluating LLM agents is fundamental... | agent, evaluation | agent, evaluation, re, quality, engineer, who, seen, agents, aced, benchmarks, fail, spectacularly |
| `agent-framework-azure-ai-py` | Build persistent agents on Azure AI Foundry using the Microsoft Agent Framework Python SDK. | agent, framework, azure, ai, py | agent, framework, azure, ai, py, persistent, agents, foundry, microsoft, python, sdk |
| `agent-memory-mcp` | A hybrid memory system that provides persistent, searchable knowledge management for AI agents (Architecture, Patterns, Decisions). | agent, memory, mcp | agent, memory, mcp, hybrid, provides, persistent, searchable, knowledge, ai, agents, architecture, decisions |
+| `agent-tool-builder` | Tools are how AI agents interact with the world. A well-designed tool is the difference between an agent that works and one that hallucinates, fails silently... | agent, builder | agent, builder, how, ai, agents, interact, world, well, designed, difference, between, works |
| `agentfolio` | Skill for discovering and researching autonomous AI agents, tools, and ecosystems using the AgentFolio directory. | agentfolio | agentfolio, skill, discovering, researching, autonomous, ai, agents, ecosystems, directory |
| `agentmail` | Email infrastructure for AI agents. Create accounts, send/receive emails, manage webhooks, and check karma balance via the AgentMail API. | agentmail | agentmail, email, infrastructure, ai, agents, accounts, send, receive, emails, webhooks, check, karma |
| `agentphone` | Build AI phone agents with AgentPhone API. Use when the user wants to make phone calls, send/receive SMS, manage phone numbers, create voice agents, set up w... | agentphone | agentphone, ai, phone, agents, api, user, wants, calls, send, receive, sms, numbers |
| `agents-v2-py` | Build container-based Foundry Agents with Azure AI Projects SDK (ImageBasedHostedAgentDefinition). Use when creating hosted agents with custom container imag... | agents, v2, py | agents, v2, py, container, foundry, azure, ai, sdk, imagebasedhostedagentdefinition, creating, hosted, custom |
| `ai-agent-development` | AI agent development workflow for building autonomous agents, multi-agent systems, and agent orchestration with CrewAI, LangGraph, and custom agents. | ai, agent | ai, agent, development, building, autonomous, agents, multi, orchestration, crewai, langgraph, custom |
-| `ai-agents-architect` | I build AI systems that can act autonomously while remaining controllable. I understand that agents fail in unexpected ways - I design for graceful degradati... | ai, agents | ai, agents, architect, act, autonomously, while, remaining, controllable, understand, fail, unexpected, ways |
+| `ai-agents-architect` | Expert in designing and building autonomous AI agents. Masters tool use, memory systems, planning strategies, and multi-agent orchestration. | ai, agents | ai, agents, architect, designing, building, autonomous, masters, memory, planning, multi, agent, orchestration |
| `ai-analyzer` | AI驱动的综合健康分析系统,整合多维度健康数据、识别异常模式、预测健康风险、提供个性化建议。支持智能问答和AI健康报告生成。 | ai, analyzer | ai, analyzer |
| `ai-engineer` | Build production-ready LLM applications, advanced RAG systems, and intelligent agents. Implements vector search, multimodal AI, agent orchestration, and ente... | ai | ai, engineer, llm, applications, rag, intelligent, agents, implements, vector, search, multimodal, agent |
| `ai-ml` | AI and machine learning workflow covering LLM application development, RAG implementation, agent architecture, ML pipelines, and AI-powered features. | ai, ml | ai, ml, machine, learning, covering, llm, application, development, rag, agent, architecture, pipelines |
| `ai-native-cli` | Design spec with 98 rules for building CLI tools that AI agents can safely use. Covers structured JSON output, error handling, input contracts, safety guardr... | ai, native, cli | ai, native, cli, spec, 98, rules, building, agents, safely, covers, structured, json |
-| `ai-product` | You are an AI product engineer who has shipped LLM features to millions of users. You've debugged hallucinations at 3am, optimized prompts to reduce costs by... | ai, product | ai, product, engineer, who, shipped, llm, features, millions, users, ve, debugged, hallucinations |
+| `ai-product` | Every product will be AI-powered. The question is whether you'll build it right or ship a demo that falls apart in production. | ai, product | ai, product, every, powered, question, whether, ll, right, ship, demo, falls, apart |
| `ai-seo` | Optimize content for AI search and LLM citations across AI Overviews, ChatGPT, Perplexity, Claude, Gemini, and similar systems. Use when improving AI visibil... | ai, seo | ai, seo, optimize, content, search, llm, citations, overviews, chatgpt, perplexity, claude, gemini |
| `ai-studio-image` | Geracao de imagens humanizadas via Google AI Studio (Gemini). Fotos realistas estilo influencer ou educacional com iluminacao natural e imperfeicoes sutis. | image-generation, ai-studio, google, photography | image-generation, ai-studio, google, photography, ai, studio, image, geracao, de, imagens, humanizadas, via |
-| `ai-wrapper-product` | You know AI wrappers get a bad rap, but the good ones solve real problems. You build products where AI is the engine, not the gimmick. You understand prompt ... | ai, wrapper, product | ai, wrapper, product, know, wrappers, get, bad, rap, good, ones, solve, real |
+| `ai-wrapper-product` | Expert in building products that wrap AI APIs (OpenAI, Anthropic, etc. ) into focused tools people will pay for. Not just "ChatGPT but different" - products ... | ai, wrapper, product | ai, wrapper, product, building, products, wrap, apis, openai, anthropic, etc, people, pay |
| `alpha-vantage` | Access 20+ years of global financial data: equities, options, forex, crypto, commodities, economic indicators, and 50+ technical indicators. | alpha, vantage | alpha, vantage, access, 20, years, global, financial, data, equities, options, forex, crypto |
| `analytics-product` | Analytics de produto — PostHog, Mixpanel, eventos, funnels, cohorts, retencao, north star metric, OKRs e dashboards de produto. | analytics, product, metrics, posthog, mixpanel | analytics, product, metrics, posthog, mixpanel, de, produto, eventos, funnels, cohorts, retencao, north |
| `analytics-tracking` | Design, audit, and improve analytics tracking systems that produce reliable, decision-ready data. | analytics, tracking | analytics, tracking, audit, improve, produce, reliable, decision, data |
@@ -213,7 +217,7 @@ Total skills: 1377
| `appdeploy` | Deploy web apps with backend APIs, database, and file storage. Use when the user asks to deploy or publish a website or web app and wants a public URL. Uses ... | appdeploy | appdeploy, deploy, web, apps, backend, apis, database, file, storage, user, asks, publish |
| `astropy` | Astropy is the core Python package for astronomy, providing essential functionality for astronomical research and data analysis. | astropy | astropy, core, python, package, astronomy, providing, essential, functionality, astronomical, research, data, analysis |
| `audio-transcriber` | Transform audio recordings into professional Markdown documentation with intelligent summaries using LLM integration | audio, transcription, whisper, meeting-minutes, speech-to-text | audio, transcription, whisper, meeting-minutes, speech-to-text, transcriber, transform, recordings, professional, markdown, documentation, intelligent |
-| `autonomous-agents` | You are an agent architect who has learned the hard lessons of autonomous AI. You've seen the gap between impressive demos and production disasters. You know... | autonomous, agents | autonomous, agents, agent, architect, who, learned, hard, lessons, ai, ve, seen, gap |
+| `autonomous-agents` | Autonomous agents are AI systems that can independently decompose goals, plan actions, execute tools, and self-correct without constant human guidance. The c... | autonomous, agents | autonomous, agents, ai, independently, decompose, goals, plan, actions, execute, self, correct, without |
| `avoid-ai-writing` | Audit and rewrite content to remove 21 categories of AI writing patterns with a 43-entry replacement table | avoid, ai, writing | avoid, ai, writing, audit, rewrite, content, remove, 21, categories, 43, entry, replacement |
| `awt-e2e-testing` | AI-powered E2E web testing — eyes and hands for AI coding tools. Declarative YAML scenarios, Playwright execution, visual matching (OpenCV + OCR), platform a... | awt, e2e | awt, e2e, testing, ai, powered, web, eyes, hands, coding, declarative, yaml, scenarios |
| `azure-ai-agents-persistent-dotnet` | Azure AI Agents Persistent SDK for .NET. Low-level SDK for creating and managing AI agents with threads, messages, runs, and tools. | azure, ai, agents, persistent, dotnet | azure, ai, agents, persistent, dotnet, sdk, net, low, level, creating, managing, threads |
@@ -272,6 +276,7 @@ Total skills: 1377
| `beautiful-prose` | A hard-edged writing style contract for timeless, forceful English prose without modern AI tics. Use when users ask for prose or rewrites that must be clean,... | beautiful, prose | beautiful, prose, hard, edged, writing, style, contract, timeless, forceful, english, without, ai |
| `behavioral-modes` | AI operational modes (brainstorm, implement, debug, review, teach, ship, orchestrate). Use to adapt behavior based on task type. | behavioral, modes | behavioral, modes, ai, operational, brainstorm, debug, review, teach, ship, orchestrate, adapt, behavior |
| `biopython` | Biopython is a comprehensive set of freely available Python tools for biological computation. It provides functionality for sequence manipulation, file I/O, ... | biopython | biopython, set, freely, available, python, biological, computation, provides, functionality, sequence, manipulation, file |
+| `browser-automation` | Browser automation powers web testing, scraping, and AI agent interactions. The difference between a flaky script and a reliable system comes down to underst... | browser | browser, automation, powers, web, testing, scraping, ai, agent, interactions, difference, between, flaky |
| `business-analyst` | Master modern business analysis with AI-powered analytics, real-time dashboards, and data-driven insights. Build comprehensive KPI frameworks, predictive mod... | business, analyst | business, analyst, analysis, ai, powered, analytics, real, time, dashboards, data, driven, insights |
| `cc-skill-backend-patterns` | Backend architecture patterns, API design, database optimization, and server-side best practices for Node.js, Express, and Next.js API routes. | cc, skill, backend | cc, skill, backend, architecture, api, database, optimization, server, side, node, js, express |
| `cc-skill-clickhouse-io` | ClickHouse database patterns, query optimization, analytics, and data engineering best practices for high-performance analytical workloads. | cc, skill, clickhouse, io | cc, skill, clickhouse, io, database, query, optimization, analytics, data, engineering, high, performance |
@@ -283,13 +288,13 @@ Total skills: 1377
| `code-documentation-doc-generate` | You are a documentation expert specializing in creating comprehensive, maintainable documentation from code. Generate API docs, architecture diagrams, user g... | code, documentation, doc, generate | code, documentation, doc, generate, specializing, creating, maintainable, api, docs, architecture, diagrams, user |
| `code-reviewer` | Elite code review expert specializing in modern AI-powered code | code | code, reviewer, elite, review, specializing, ai, powered |
| `codex-review` | Professional code review with auto CHANGELOG generation, integrated with Codex AI. Use when you want professional code review before commits, you need automa... | codex | codex, review, professional, code, auto, changelog, generation, integrated, ai, want, before, commits |
+| `computer-use-agents` | Build AI agents that interact with computers like humans do - viewing screens, moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer... | computer, use, agents | computer, use, agents, ai, interact, computers, like, humans, do, viewing, screens, moving |
| `constant-time-analysis` | Analyze cryptographic code to detect operations that leak secret data through execution timing variations. | constant, time | constant, time, analysis, analyze, cryptographic, code, detect, operations, leak, secret, data, through |
| `content-marketer` | Elite content marketing strategist specializing in AI-powered content creation, omnichannel distribution, SEO optimization, and data-driven performance marke... | content, marketer | content, marketer, elite, marketing, strategist, specializing, ai, powered, creation, omnichannel, distribution, seo |
| `context-driven-development` | Guide for implementing and maintaining context as a managed artifact alongside code, enabling consistent AI interactions and team alignment through structure... | driven | driven, context, development, implementing, maintaining, managed, artifact, alongside, code, enabling, consistent, ai |
| `context-manager` | Elite AI context engineering specialist mastering dynamic context management, vector databases, knowledge graphs, and intelligent memory systems. | manager | manager, context, elite, ai, engineering, mastering, dynamic, vector, databases, knowledge, graphs, intelligent |
-| `context-window-management` | You're a context engineering specialist who has optimized LLM applications handling millions of conversations. You've seen systems hit token limits, suffer c... | window | window, context, re, engineering, who, optimized, llm, applications, handling, millions, conversations, ve |
-| `conversation-memory` | Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory pers... | conversation, memory | conversation, memory, persistent, llm, conversations, including, short, term, long, entity, remember, persistence |
-| `crewai` | You are an expert in designing collaborative AI agent teams with CrewAI. You think in terms of roles, responsibilities, and delegation. You design clear agen... | crewai | crewai, designing, collaborative, ai, agent, teams, think, terms, roles, responsibilities, delegation, clear |
+| `context-window-management` | Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot | window | window, context, managing, llm, windows, including, summarization, trimming, routing, avoiding, rot |
+| `conversation-memory` | Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory | conversation, memory | conversation, memory, persistent, llm, conversations, including, short, term, long, entity |
| `crypto-bd-agent` | Production-tested patterns for building AI agents that autonomously discover, > evaluate, and acquire token listings for cryptocurrency exchanges. | crypto, bd, agent | crypto, bd, agent, tested, building, ai, agents, autonomously, discover, evaluate, acquire, token |
| `customer-support` | Elite AI-powered customer support specialist mastering conversational AI, automated ticketing, sentiment analysis, and omnichannel support experiences. | customer, support | customer, support, elite, ai, powered, mastering, conversational, automated, ticketing, sentiment, analysis, omnichannel |
| `data-engineering-data-driven-feature` | Build features guided by data insights, A/B testing, and continuous measurement using specialized agents for analysis, implementation, and experimentation. | data, engineering, driven | data, engineering, driven, feature, features, guided, insights, testing, continuous, measurement, specialized, agents |
@@ -328,6 +333,7 @@ Total skills: 1377
| `global-chat-agent-discovery` | Discover and search 18K+ MCP servers and AI agents across 6+ registries using Global Chat's cross-protocol directory and MCP server. | mcp, ai-agents, agent-discovery, agents-txt, a2a, developer-tools | mcp, ai-agents, agent-discovery, agents-txt, a2a, developer-tools, global, chat, agent, discovery, discover, search |
| `google-analytics-automation` | Automate Google Analytics tasks via Rube MCP (Composio): run reports, list accounts/properties, funnels, pivots, key events. Always search tools first for cu... | google, analytics | google, analytics, automation, automate, tasks, via, rube, mcp, composio, run, reports, list |
| `googlesheets-automation` | Automate Google Sheets operations (read, write, format, filter, manage spreadsheets) via Rube MCP (Composio). Read/write data, manage tabs, apply formatting,... | googlesheets | googlesheets, automation, automate, google, sheets, operations, read, write, format, filter, spreadsheets, via |
+| `graphql` | GraphQL gives clients exactly the data they need - no more, no less. One endpoint, typed schema, introspection. But the flexibility that makes it powerful al... | graphql | graphql, gives, clients, exactly, data, no, less, one, endpoint, typed, schema, introspection |
| `hosted-agents-v2-py` | Build hosted agents using Azure AI Projects SDK with ImageBasedHostedAgentDefinition. Use when creating container-based agents in Azure AI Foundry. | hosted, agents, v2, py | hosted, agents, v2, py, azure, ai, sdk, imagebasedhostedagentdefinition, creating, container, foundry |
| `hugging-face-community-evals` | Run local evaluations for Hugging Face Hub models with inspect-ai or lighteval. | hugging, face, community, evals | hugging, face, community, evals, run, local, evaluations, hub, models, inspect, ai, lighteval |
| `hugging-face-datasets` | Create and manage datasets on Hugging Face Hub. Supports initializing repos, defining configs/system prompts, streaming row updates, and SQL-based dataset qu... | hugging, face, datasets | hugging, face, datasets, hub, supports, initializing, repos, defining, configs, prompts, streaming, row |
@@ -339,7 +345,7 @@ Total skills: 1377
| `instagram` | Integracao completa com Instagram via Graph API. Publicacao, analytics, comentarios, DMs, hashtags, agendamento, templates e gestao de contas Business/Creator. | social-media, instagram, graph-api, content | social-media, instagram, graph-api, content, integracao, completa, com, via, graph, api, publicacao, analytics |
| `ios-developer` | Develop native iOS applications with Swift/SwiftUI. Masters iOS 18, SwiftUI, UIKit integration, Core Data, networking, and App Store optimization. | ios | ios, developer, develop, native, applications, swift, swiftui, masters, 18, uikit, integration, core |
| `langchain-architecture` | Master the LangChain framework for building sophisticated LLM applications with agents, chains, memory, and tool integration. | langchain, architecture | langchain, architecture, framework, building, sophisticated, llm, applications, agents, chains, memory, integration |
-| `langgraph` | You are an expert in building production-grade AI agents with LangGraph. You understand that agents need explicit structure - graphs make the flow visible an... | langgraph | langgraph, building, grade, ai, agents, understand, explicit, structure, graphs, flow, visible, debuggable |
+| `langgraph` | Expert in LangGraph - the production-grade framework for building stateful, multi-actor AI applications. Covers graph construction, state management, cycles ... | langgraph | langgraph, grade, framework, building, stateful, multi, actor, ai, applications, covers, graph, construction |
| `libreoffice/base` | Database management, forms, reports, and data operations with LibreOffice Base. | libreoffice/base | libreoffice/base, base, database, forms, reports, data, operations, libreoffice |
| `libreoffice/calc` | Spreadsheet creation, format conversion (ODS/XLSX/CSV), formulas, data automation with LibreOffice Calc. | libreoffice/calc | libreoffice/calc, calc, spreadsheet, creation, format, conversion, ods, xlsx, csv, formulas, data, automation |
| `libreoffice/draw` | Vector graphics and diagram creation, format conversion (ODG/SVG/PDF) with LibreOffice Draw. | libreoffice/draw | libreoffice/draw, draw, vector, graphics, diagram, creation, format, conversion, odg, svg, pdf, libreoffice |
@@ -360,7 +366,7 @@ Total skills: 1377
| `moyu` | Anti-over-engineering guardrail that activates when an AI coding agent expands scope, adds abstractions, or changes files the user did not request. | moyu | moyu, anti, engineering, guardrail, activates, ai, coding, agent, expands, scope, adds, abstractions |
| `n8n-expression-syntax` | Validate n8n expression syntax and fix common errors. Use when writing n8n expressions, using {{}} syntax, accessing $json/$node variables, troubleshooting e... | n8n, expression, syntax | n8n, expression, syntax, validate, fix, common, errors, writing, expressions, accessing, json, node |
| `nanobanana-ppt-skills` | AI-powered PPT generation with document analysis and styled images | nanobanana, ppt, skills | nanobanana, ppt, skills, ai, powered, generation, document, analysis, styled, images |
-| `neon-postgres` | Configure Prisma for Neon with connection pooling. | neon, postgres | neon, postgres, configure, prisma, connection, pooling |
+| `neon-postgres` | Expert patterns for Neon serverless Postgres, branching, connection pooling, and Prisma/Drizzle integration | neon, postgres | neon, postgres, serverless, branching, connection, pooling, prisma, drizzle, integration |
| `nestjs-expert` | You are an expert in Nest.js with deep knowledge of enterprise-grade Node.js application architecture, dependency injection patterns, decorators, middleware,... | nestjs | nestjs, nest, js, deep, knowledge, enterprise, grade, node, application, architecture, dependency, injection |
| `nextjs-best-practices` | Next.js App Router principles. Server Components, data fetching, routing patterns. | nextjs, best, practices | nextjs, best, practices, next, js, app, router, principles, server, components, data, fetching |
| `obsidian-bases` | Create and edit Obsidian Bases (.base files) with views, filters, formulas, and summaries. Use when working with .base files, creating database-like views of... | obsidian, bases | obsidian, bases, edit, base, files, views, filters, formulas, summaries, working, creating, database |
@@ -375,10 +381,10 @@ Total skills: 1377
| `programmatic-seo` | Design and evaluate programmatic SEO strategies for creating SEO-driven pages at scale using templates and structured data. | programmatic, seo | programmatic, seo, evaluate, creating, driven, pages, scale, structured, data |
| `progressive-estimation` | Estimate AI-assisted and hybrid human+agent development work with research-backed PERT statistics and calibration feedback loops | estimation, project-management, pert, sprint-planning, ai-agents | estimation, project-management, pert, sprint-planning, ai-agents, progressive, estimate, ai, assisted, hybrid, human, agent |
| `project-development` | This skill covers the principles for identifying tasks suited to LLM processing, designing effective project architectures, and iterating rapidly using agent... | | development, skill, covers, principles, identifying, tasks, suited, llm, processing, designing, effective, architectures |
-| `prompt-caching` | You're a caching specialist who has reduced LLM costs by 90% through strategic caching. You've implemented systems that cache at multiple levels: prompt pref... | prompt, caching | prompt, caching, re, who, reduced, llm, costs, 90, through, strategic, ve, implemented |
+| `prompt-caching` | Caching strategies for LLM prompts including Anthropic prompt caching, response caching, and CAG (Cache Augmented Generation) | prompt, caching | prompt, caching, llm, prompts, including, anthropic, response, cag, cache, augmented, generation |
| `prompt-engineering-patterns` | Master advanced prompt engineering techniques to maximize LLM performance, reliability, and controllability. | prompt, engineering | prompt, engineering, techniques, maximize, llm, performance, reliability, controllability |
| `pydantic-ai` | Build production-ready AI agents with PydanticAI — type-safe tool use, structured outputs, dependency injection, and multi-model support. | pydantic-ai, ai-agents, llm, openai, anthropic, gemini, tool-use, structured-output, python | pydantic-ai, ai-agents, llm, openai, anthropic, gemini, tool-use, structured-output, python, pydantic, ai, agents |
-| `rag-engineer` | I bridge the gap between raw documents and LLM understanding. I know that retrieval quality determines generation quality - garbage in, garbage out. I obsess... | rag | rag, engineer, bridge, gap, between, raw, documents, llm, understanding, know, retrieval, quality |
+| `rag-engineer` | Expert in building Retrieval-Augmented Generation systems. Masters embedding models, vector databases, chunking strategies, and retrieval optimization for LL... | rag | rag, engineer, building, retrieval, augmented, generation, masters, embedding, models, vector, databases, chunking |
| `rag-implementation` | RAG (Retrieval-Augmented Generation) implementation workflow covering embedding selection, vector database setup, chunking strategies, and retrieval optimiza... | rag | rag, retrieval, augmented, generation, covering, embedding, selection, vector, database, setup, chunking, optimization |
| `react-best-practices` | Comprehensive performance optimization guide for React and Next.js applications, maintained by Vercel. Use when writing new React components or Next.js pages... | react, best, practices | react, best, practices, performance, optimization, next, js, applications, maintained, vercel, writing, new |
| `react-ui-patterns` | Modern React UI patterns for loading states, error handling, and data fetching. Use when building UI components, handling async data, or managing UI states. | react, ui | react, ui, loading, states, error, handling, data, fetching, building, components, async, managing |
@@ -392,7 +398,7 @@ Total skills: 1377
| `scientific-writing` | This is the core skill for the deep research and writing tool—combining AI-driven deep research with well-formatted written outputs. Every document produced ... | scientific, writing | scientific, writing, core, skill, deep, research, combining, ai, driven, well, formatted, written |
| `scikit-learn` | Machine learning in Python with scikit-learn. Use for classification, regression, clustering, model evaluation, and ML pipelines. | scikit, learn | scikit, learn, machine, learning, python, classification, regression, clustering, model, evaluation, ml, pipelines |
| `seek-and-analyze-video` | Seek and analyze video content using Memories.ai Large Visual Memory Model for persistent video intelligence | video, ai, memories, social-media, youtube, tiktok, analysis | video, ai, memories, social-media, youtube, tiktok, analysis, seek, analyze, content, large, visual |
-| `segment-cdp` | Client-side tracking with Analytics.js. Include track, identify, page, and group calls. Anonymous ID persists until identify merges with user. | segment, cdp | segment, cdp, client, side, tracking, analytics, js, include, track, identify, page, group |
+| `segment-cdp` | Expert patterns for Segment Customer Data Platform including Analytics.js, server-side tracking, tracking plans with Protocols, identity resolution, destinat... | segment, cdp | segment, cdp, customer, data, platform, including, analytics, js, server, side, tracking, plans |
| `sendgrid-automation` | Automate SendGrid email delivery workflows including marketing campaigns (Single Sends), contact and list management, sender identity setup, and email analyt... | sendgrid | sendgrid, automation, automate, email, delivery, including, marketing, campaigns, single, sends, contact, list |
| `seo` | Run a broad SEO audit across technical SEO, on-page SEO, schema, sitemaps, content quality, AI search readiness, and GEO. Use as the umbrella skill when the ... | seo | seo, run, broad, audit, technical, page, schema, sitemaps, content, quality, ai, search |
| `seo-aeo-schema-generator` | Generates valid JSON-LD structured data for 10 schema types with rich result eligibility validation and implementation-ready script blocks. Activate when the... | seo, aeo, schema, generator | seo, aeo, schema, generator, generates, valid, json, ld, structured, data, 10, types |
@@ -416,7 +422,9 @@ Total skills: 1377
| `tanstack-query-expert` | Expert in TanStack Query (React Query) — asynchronous state management. Covers data fetching, stale time configuration, mutations, optimistic updates, and Ne... | tanstack, query | tanstack, query, react, asynchronous, state, covers, data, fetching, stale, time, configuration, mutations |
| `team-collaboration-standup-notes` | You are an expert team communication specialist focused on async-first standup practices, AI-assisted note generation from commit history, and effective remo... | team, collaboration, standup, notes | team, collaboration, standup, notes, communication, async, first, ai, assisted, note, generation, commit |
| `technical-change-tracker` | Track code changes with structured JSON records, state machine enforcement, and AI session handoff for bot continuity | change-tracking, session-handoff, documentation, accessibility, state-machine | change-tracking, session-handoff, documentation, accessibility, state-machine, technical, change, tracker, track, code, changes, structured |
+| `telegram-bot-builder` | Expert in building Telegram bots that solve real problems - from simple automation to complex AI-powered bots. Covers bot architecture, the Telegram Bot API,... | telegram, bot, builder | telegram, bot, builder, building, bots, solve, real, problems, simple, automation, complex, ai |
| `travel-health-analyzer` | 分析旅行健康数据、评估目的地健康风险、提供疫苗接种建议、生成多语言紧急医疗信息卡片。支持WHO/CDC数据集成的专业级旅行健康风险评估。 | travel, health, analyzer | travel, health, analyzer, who, cdc |
+| `trigger-dev` | Trigger.dev expert for background jobs, AI workflows, and reliable async execution with excellent developer experience and TypeScript-first design. | trigger, dev | trigger, dev, background, jobs, ai, reliable, async, execution, excellent, developer, experience, typescript |
| `uniprot-database` | Direct REST API access to UniProt. Protein searches, FASTA retrieval, ID mapping, Swiss-Prot/TrEMBL. For Python workflows with multiple databases, prefer bio... | uniprot, database | uniprot, database, direct, rest, api, access, protein, searches, fasta, retrieval, id, mapping |
| `unity-ecs-patterns` | Production patterns for Unity's Data-Oriented Technology Stack (DOTS) including Entity Component System, Job System, and Burst Compiler. | unity, ecs | unity, ecs, data, oriented, technology, stack, dots, including, entity, component, job, burst |
| `uxui-principles` | Evaluate interfaces against 168 research-backed UX/UI principles, detect antipatterns, and inject UX context into AI coding sessions. | ux, ui, design, evaluation, principles, antipatterns, accessibility | ux, ui, design, evaluation, principles, antipatterns, accessibility, uxui, evaluate, interfaces, against, 168 |
@@ -427,8 +435,8 @@ Total skills: 1377
| `vibe-code-auditor` | Audit rapidly generated or AI-produced code for structural flaws, fragility, and production risks. | vibe, code, auditor | vibe, code, auditor, audit, rapidly, generated, ai, produced, structural, flaws, fragility, risks |
| `videodb-skills` | Upload, stream, search, edit, transcribe, and generate AI video and audio using the VideoDB SDK. | video, editing, transcription, subtitles, search, streaming, ai-generation, media | video, editing, transcription, subtitles, search, streaming, ai-generation, media, videodb, skills, upload, stream |
| `vizcom` | AI-powered product design tool for transforming sketches into full-fidelity 3D renders. | vizcom | vizcom, ai, powered, product, transforming, sketches, full, fidelity, 3d, renders |
-| `voice-agents` | You are a voice AI architect who has shipped production voice agents handling millions of calls. You understand the physics of latency - every component adds... | voice, agents | voice, agents, ai, architect, who, shipped, handling, millions, calls, understand, physics, latency |
-| `voice-ai-development` | You are an expert in building real-time voice applications. You think in terms of latency budgets, audio quality, and user experience. You know that voice ap... | voice, ai | voice, ai, development, building, real, time, applications, think, terms, latency, budgets, audio |
+| `voice-agents` | Voice agents represent the frontier of AI interaction - humans speaking naturally with AI systems. | voice, agents | voice, agents, represent, frontier, ai, interaction, humans, speaking, naturally |
+| `voice-ai-development` | Expert in building voice AI applications - from real-time voice agents to voice-enabled apps. Covers OpenAI Realtime API, Vapi for voice agents, Deepgram for... | voice, ai | voice, ai, development, building, applications, real, time, agents, enabled, apps, covers, openai |
| `voice-ai-engine-development` | Build real-time conversational AI voice engines using async worker pipelines, streaming transcription, LLM agents, and TTS synthesis with interrupt handling ... | voice, ai, engine | voice, ai, engine, development, real, time, conversational, engines, async, worker, pipelines, streaming |
| `web-artifacts-builder` | To build powerful frontend claude.ai artifacts, follow these steps: | web, artifacts, builder | web, artifacts, builder, powerful, frontend, claude, ai, follow, these, steps |
| `wellally-tech` | Integrate multiple digital health data sources, connect to [WellAlly.tech](https://www.wellally.tech/) knowledge base, providing data import and knowledge re... | wellally, tech | wellally, tech, integrate, multiple, digital, health, data, sources, connect, https, www, knowledge |
@@ -437,13 +445,13 @@ Total skills: 1377
| `yann-lecun` | Agente que simula Yann LeCun — inventor das Convolutional Neural Networks, Chief AI Scientist da Meta, Prêmio Turing 2018. | persona, cnn, meta, ai-safety-critic, open-source | persona, cnn, meta, ai-safety-critic, open-source, yann, lecun, agente, que, simula, inventor, das |
| `yes-md` | 6-layer AI governance: safety gates, evidence-based debugging, anti-slack detection, and machine-enforced hooks. Makes AI safe, thorough, and honest. | yes, md | yes, md, layer, ai, governance, safety, gates, evidence, debugging, anti, slack, detection |
| `youtube-automation` | Automate YouTube tasks via Rube MCP (Composio): upload videos, manage playlists, search content, get analytics, and handle comments. Always search tools firs... | youtube | youtube, automation, automate, tasks, via, rube, mcp, composio, upload, videos, playlists, search |
-| `zapier-make-patterns` | You are a no-code automation architect who has built thousands of Zaps and Scenarios for businesses of all sizes. You've seen automations that save companies... | zapier, make | zapier, make, no, code, automation, architect, who, built, thousands, zaps, scenarios, businesses |
-## development (186)
+## development (190)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
-| `algolia-search` | Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning Use when: adding search to, algolia, instan... | algolia, search | algolia, search, indexing, react, instantsearch, relevance, tuning, adding, api, functionality |
+| `3d-web-experience` | Expert in building 3D experiences for the web - Three.js, React Three Fiber, Spline, WebGL, and interactive 3D scenes. Covers product configurators, 3D portf... | 3d, web, experience | 3d, web, experience, building, experiences, three, js, react, fiber, spline, webgl, interactive |
+| `algolia-search` | Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning | algolia, search | algolia, search, indexing, react, instantsearch, relevance, tuning |
| `android-jetpack-compose-expert` | Expert guidance for building modern Android UIs with Jetpack Compose, covering state management, navigation, performance, and Material Design 3. | android, jetpack, compose | android, jetpack, compose, guidance, building, uis, covering, state, navigation, performance, material |
| `android_ui_verification` | Automated end-to-end UI testing and verification on an Android Emulator using ADB. | android_ui_verification | android_ui_verification, android, ui, verification, automated, testing, emulator, adb |
| `animejs-animation` | Advanced JavaScript animation library skill for creating complex, high-performance web animations. | animejs, animation | animejs, animation, javascript, library, skill, creating, complex, high, performance, web, animations |
@@ -467,6 +475,7 @@ Total skills: 1377
| `azure-eventgrid-py` | Azure Event Grid SDK for Python. Use for publishing events, handling CloudEvents, and event-driven architectures. | azure, eventgrid, py | azure, eventgrid, py, event, grid, sdk, python, publishing, events, handling, cloudevents, driven |
| `azure-eventhub-dotnet` | Azure Event Hubs SDK for .NET. | azure, eventhub, dotnet | azure, eventhub, dotnet, event, hubs, sdk, net |
| `azure-eventhub-py` | Azure Event Hubs SDK for Python streaming. Use for high-throughput event ingestion, producers, consumers, and checkpointing. | azure, eventhub, py | azure, eventhub, py, event, hubs, sdk, python, streaming, high, throughput, ingestion, producers |
+| `azure-functions` | Expert patterns for Azure Functions development including isolated worker model, Durable Functions orchestration, cold start optimization, and production pat... | azure, functions | azure, functions, development, including, isolated, worker, model, durable, orchestration, cold, start, optimization |
| `azure-identity-java` | Authenticate Java applications with Azure services using Microsoft Entra ID (Azure AD). | azure, identity, java | azure, identity, java, authenticate, applications, microsoft, entra, id, ad |
| `azure-identity-rust` | Azure Identity SDK for Rust authentication. Use for DeveloperToolsCredential, ManagedIdentityCredential, ClientSecretCredential, and token-based authentication. | azure, identity, rust | azure, identity, rust, sdk, authentication, developertoolscredential, managedidentitycredential, clientsecretcredential, token |
| `azure-keyvault-certificates-rust` | Azure Key Vault Certificates SDK for Rust. Use for creating, importing, and managing certificates. | azure, keyvault, certificates, rust | azure, keyvault, certificates, rust, key, vault, sdk, creating, importing, managing |
@@ -497,7 +506,7 @@ Total skills: 1377
| `backend-architect` | Expert backend architect specializing in scalable API design, microservices architecture, and distributed systems. | backend | backend, architect, specializing, scalable, api, microservices, architecture, distributed |
| `baseline-ui` | Validates animation durations, enforces typography scale, checks component accessibility, and prevents layout anti-patterns in Tailwind CSS projects. Use whe... | baseline, ui | baseline, ui, validates, animation, durations, enforces, typography, scale, checks, component, accessibility, prevents |
| `bevy-ecs-expert` | Master Bevy's Entity Component System (ECS) in Rust, covering Systems, Queries, Resources, and parallel scheduling. | bevy, ecs | bevy, ecs, entity, component, rust, covering, queries, resources, parallel, scheduling |
-| `bullmq-specialist` | BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications. Use when: bullmq, bull que... | bullmq | bullmq, redis, backed, job, queues, background, processing, reliable, async, execution, node, js |
+| `bullmq-specialist` | BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications. | bullmq | bullmq, redis, backed, job, queues, background, processing, reliable, async, execution, node, js |
| `bun-development` | Fast, modern JavaScript/TypeScript development with the Bun runtime, inspired by [oven-sh/bun](https://github.com/oven-sh/bun). | bun | bun, development, fast, javascript, typescript, runtime, inspired, oven, sh, https, github, com |
| `cc-skill-coding-standards` | Universal coding standards, best practices, and patterns for TypeScript, JavaScript, React, and Node.js development. | cc, skill, coding, standards | cc, skill, coding, standards, universal, typescript, javascript, react, node, js, development |
| `cc-skill-frontend-patterns` | Frontend development patterns for React, Next.js, state management, performance optimization, and UI best practices. | cc, skill, frontend | cc, skill, frontend, development, react, next, js, state, performance, optimization, ui |
@@ -544,6 +553,7 @@ Total skills: 1377
| `go-rod-master` | Comprehensive guide for browser automation and web scraping with go-rod (Chrome DevTools Protocol) including stealth anti-bot-detection patterns. | go, rod, master | go, rod, master, browser, automation, web, scraping, chrome, devtools, protocol, including, stealth |
| `golang-pro` | Master Go 1.21+ with modern patterns, advanced concurrency, performance optimization, and production-ready microservices. | golang | golang, pro, go, 21, concurrency, performance, optimization, microservices |
| `hono` | Build ultra-fast web APIs and full-stack apps with Hono — runs on Cloudflare Workers, Deno, Bun, Node.js, and any WinterCG-compatible runtime. | hono, edge, cloudflare-workers, bun, deno, api, typescript, web-standards | hono, edge, cloudflare-workers, bun, deno, api, typescript, web-standards, ultra, fast, web, apis |
+| `hubspot-integration` | Expert patterns for HubSpot CRM integration including OAuth authentication, CRM objects, associations, batch operations, webhooks, and custom objects. Covers... | hubspot, integration | hubspot, integration, crm, including, oauth, authentication, objects, associations, batch, operations, webhooks, custom |
| `hugging-face-dataset-viewer` | Query Hugging Face datasets through the Dataset Viewer API for splits, rows, search, filters, and parquet links. | hugging, face, dataset, viewer | hugging, face, dataset, viewer, query, datasets, through, api, splits, rows, search, filters |
| `hugging-face-evaluation` | Add and manage evaluation results in Hugging Face model cards. Supports extracting eval tables from README content, importing scores from Artificial Analysis... | hugging, face, evaluation | hugging, face, evaluation, add, results, model, cards, supports, extracting, eval, tables, readme |
| `hugging-face-gradio` | Build or edit Gradio apps, layouts, components, and chat interfaces in Python. | hugging, face, gradio | hugging, face, gradio, edit, apps, layouts, components, chat, interfaces, python |
@@ -561,7 +571,6 @@ Total skills: 1377
| `makepad-skills` | Makepad UI development skills for Rust apps: setup, patterns, shaders, packaging, and troubleshooting. | makepad, skills | makepad, skills, ui, development, rust, apps, setup, shaders, packaging, troubleshooting |
| `matplotlib` | Matplotlib is Python's foundational visualization library for creating static, animated, and interactive plots. | matplotlib | matplotlib, python, foundational, visualization, library, creating, static, animated, interactive, plots |
| `mcp-builder-ms` | Use this skill when building MCP servers to integrate external APIs or services, whether in Python (FastMCP) or Node/TypeScript (MCP SDK). | mcp, builder, ms | mcp, builder, ms, skill, building, servers, integrate, external, apis, whether, python, fastmcp |
-| `micro-saas-launcher` | You ship fast and iterate. You know the difference between a side project and a business. You've seen what works in the indie hacker community. You help peop... | micro, saas, launcher | micro, saas, launcher, ship, fast, iterate, know, difference, between, side, business, ve |
| `microsoft-azure-webjobs-extensions-authentication-events-dotnet` | Microsoft Entra Authentication Events SDK for .NET. Azure Functions triggers for custom authentication extensions. | microsoft, azure, webjobs, extensions, authentication, events, dotnet | microsoft, azure, webjobs, extensions, authentication, events, dotnet, entra, sdk, net, functions, triggers |
| `mobile-design` | (Mobile-First · Touch-First · Platform-Respectful) | mobile | mobile, first, touch, platform, respectful |
| `mobile-developer` | Develop React Native, Flutter, or native mobile apps with modern architecture patterns. Masters cross-platform development, native integrations, offline sync... | mobile | mobile, developer, develop, react, native, flutter, apps, architecture, masters, cross, platform, development |
@@ -604,11 +613,11 @@ Total skills: 1377
| `ruby-pro` | Write idiomatic Ruby code with metaprogramming, Rails patterns, and performance optimization. Specializes in Ruby on Rails, gem development, and testing fram... | ruby | ruby, pro, write, idiomatic, code, metaprogramming, rails, performance, optimization, specializes, gem, development |
| `rust-async-patterns` | Master Rust async programming with Tokio, async traits, error handling, and concurrent patterns. Use when building async Rust applications, implementing conc... | rust, async | rust, async, programming, tokio, traits, error, handling, concurrent, building, applications, implementing, debugging |
| `rust-pro` | Master Rust 1.75+ with modern async patterns, advanced type system features, and production-ready systems programming. | rust | rust, pro, 75, async, type, features, programming |
-| `scroll-experience` | You see scrolling as a narrative device, not just navigation. You create moments of delight as users scroll. You know when to use subtle animations and when ... | scroll, experience | scroll, experience, see, scrolling, narrative, device, just, navigation, moments, delight, users, know |
| `seaborn` | Seaborn is a Python visualization library for creating publication-quality statistical graphics. Use this skill for dataset-oriented plotting, multivariate a... | seaborn | seaborn, python, visualization, library, creating, publication, quality, statistical, graphics, skill, dataset, oriented |
| `senior-frontend` | Frontend development skill for React, Next.js, TypeScript, and Tailwind CSS applications. Use when building React components, optimizing Next.js performance,... | senior, frontend | senior, frontend, development, skill, react, next, js, typescript, tailwind, css, applications, building |
-| `shopify-apps` | Modern Shopify app template with React Router | shopify, apps | shopify, apps, app, react, router |
+| `shopify-apps` | Expert patterns for Shopify app development including Remix/React Router apps, embedded apps with App Bridge, webhook handling, GraphQL Admin API, Polaris co... | shopify, apps | shopify, apps, app, development, including, remix, react, router, embedded, bridge, webhook, handling |
| `shopify-development` | Build Shopify apps, extensions, themes using GraphQL Admin API, Shopify CLI, Polaris UI, and Liquid. | shopify | shopify, development, apps, extensions, themes, graphql, admin, api, cli, polaris, ui, liquid |
+| `slack-bot-builder` | Build Slack apps using the Bolt framework across Python, JavaScript, and Java. Covers Block Kit for rich UIs, interactive components, slash commands, event h... | slack, bot, builder | slack, bot, builder, apps, bolt, framework, python, javascript, java, covers, block, kit |
| `sred-work-summary` | Go back through the previous year of work and create a Notion doc that groups relevant links into projects that can then be documented as SRED projects. | sred, work, summary | sred, work, summary, go, back, through, previous, year, notion, doc, groups, relevant |
| `statsmodels` | Statsmodels is Python's premier library for statistical modeling, providing tools for estimation, inference, and diagnostics across a wide range of statistic... | statsmodels | statsmodels, python, premier, library, statistical, modeling, providing, estimation, inference, diagnostics, wide, range |
| `sveltekit` | Build full-stack web applications with SvelteKit — file-based routing, SSR, SSG, API routes, and form actions in one framework. | svelte, sveltekit, fullstack, ssr, ssg, typescript | svelte, sveltekit, fullstack, ssr, ssg, typescript, full, stack, web, applications, file, routing |
@@ -617,31 +626,30 @@ Total skills: 1377
| `systems-programming-rust-project` | You are a Rust project architecture expert specializing in scaffolding production-ready Rust applications. Generate complete project structures with cargo to... | programming, rust | programming, rust, architecture, specializing, scaffolding, applications, generate, complete, structures, cargo, tooling, proper |
| `tavily-web` | Web search, content extraction, crawling, and research capabilities using Tavily API. Use when you need to search the web for current information, extracting... | tavily, web | tavily, web, search, content, extraction, crawling, research, capabilities, api, current, information, extracting |
| `telegram` | Integracao completa com Telegram Bot API. Setup com BotFather, mensagens, webhooks, inline keyboards, grupos, canais. Boilerplates Node.js e Python. | messaging, telegram, bots, webhooks | messaging, telegram, bots, webhooks, integracao, completa, com, bot, api, setup, botfather, mensagens |
+| `telegram-mini-app` | Expert in building Telegram Mini Apps (TWA) - web apps that run inside Telegram with native-like experience. Covers the TON ecosystem, Telegram Web App API, ... | telegram, mini, app | telegram, mini, app, building, apps, twa, web, run, inside, native, like, experience |
| `temporal-python-testing` | Comprehensive testing approaches for Temporal workflows using pytest, progressive disclosure resources for specific testing scenarios. | temporal, python | temporal, python, testing, approaches, pytest, progressive, disclosure, resources, specific, scenarios |
| `transformers-js` | Run Hugging Face models in JavaScript or TypeScript with Transformers.js in Node.js or the browser. | transformers, js | transformers, js, run, hugging, face, models, javascript, typescript, node, browser |
-| `trigger-dev` | You are a Trigger.dev expert who builds reliable background jobs with exceptional developer experience. You understand that Trigger.dev bridges the gap betwe... | trigger, dev | trigger, dev, who, reliable, background, jobs, exceptional, developer, experience, understand, bridges, gap |
| `trpc-fullstack` | Build end-to-end type-safe APIs with tRPC — routers, procedures, middleware, subscriptions, and Next.js/React integration patterns. | typescript, trpc, api, fullstack, nextjs, react, type-safety | typescript, trpc, api, fullstack, nextjs, react, type-safety, type, safe, apis, routers, procedures |
+| `twilio-communications` | Build communication features with Twilio: SMS messaging, voice calls, WhatsApp Business API, and user verification (2FA). Covers the full spectrum from simpl... | twilio, communications | twilio, communications, communication, features, sms, messaging, voice, calls, whatsapp, business, api, user |
| `typescript-advanced-types` | Comprehensive guidance for mastering TypeScript's advanced type system including generics, conditional types, mapped types, template literal types, and utili... | typescript, advanced, types | typescript, advanced, types, guidance, mastering, type, including, generics, conditional, mapped, literal, utility |
| `typescript-expert` | TypeScript and JavaScript expert with deep knowledge of type-level programming, performance optimization, monorepo management, migration strategies, and mode... | typescript | typescript, javascript, deep, knowledge, type, level, programming, performance, optimization, monorepo, migration, tooling |
| `typescript-pro` | Master TypeScript with advanced types, generics, and strict type safety. Handles complex type systems, decorators, and enterprise-grade patterns. | typescript | typescript, pro, types, generics, strict, type, safety, complex, decorators, enterprise, grade |
| `ui-ux-pro-max` | Comprehensive design guide for web and mobile applications. Use when designing new UI components or pages, choosing color palettes and typography, or reviewi... | ui, ux, max | ui, ux, max, pro, web, mobile, applications, designing, new, components, pages, choosing |
| `uv-package-manager` | Comprehensive guide to using uv, an extremely fast Python package installer and resolver written in Rust, for modern Python project management and dependency... | uv, package, manager | uv, package, manager, extremely, fast, python, installer, resolver, written, rust, dependency |
+| `viral-generator-builder` | Expert in building shareable generator tools that go viral - name generators, quiz makers, avatar creators, personality tests, and calculator tools. Covers t... | viral, generator, builder | viral, generator, builder, building, shareable, go, name, generators, quiz, makers, avatar, creators |
| `webapp-testing` | To test local web applications, write native Python Playwright scripts. | webapp | webapp, testing, test, local, web, applications, write, native, python, playwright, scripts |
| `zod-validation-expert` | Expert in Zod — TypeScript-first schema validation. Covers parsing, custom errors, refinements, type inference, and integration with React Hook Form, Next.js... | zod, validation | zod, validation, typescript, first, schema, covers, parsing, custom, errors, refinements, type, inference |
| `zustand-store-ts` | Create Zustand stores following established patterns with proper TypeScript types and middleware. | zustand, store, ts | zustand, store, ts, stores, following, established, proper, typescript, types, middleware |
-## general (346)
+## general (336)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
| `00-andruia-consultant` | Arquitecto de Soluciones Principal y Consultor Tecnológico de Andru.ia. Diagnostica y traza la hoja de ruta óptima para proyectos de IA en español. | 00, andruia, consultant | 00, andruia, consultant, arquitecto, de, soluciones, principal, consultor, tecnol, gico, andru, ia |
| `10-andruia-skill-smith` | Ingeniero de Sistemas de Andru.ia. Diseña, redacta y despliega nuevas habilidades (skills) dentro del repositorio siguiendo el Estándar de Diamante. | 10, andruia, skill, smith | 10, andruia, skill, smith, ingeniero, de, sistemas, andru, ia, dise, redacta, despliega |
| `20-andruia-niche-intelligence` | Estratega de Inteligencia de Dominio de Andru.ia. Analiza el nicho específico de un proyecto para inyectar conocimientos, regulaciones y estándares únicos de... | 20, andruia, niche, intelligence | 20, andruia, niche, intelligence, estratega, de, inteligencia, dominio, andru, ia, analiza, el |
-| `3d-web-experience` | You bring the third dimension to the web. You know when 3D enhances and when it's just showing off. You balance visual impact with performance. You make 3D a... | 3d, web, experience | 3d, web, experience, bring, third, dimension, know, enhances, just, showing, off, balance |
| `address-github-comments` | Use when you need to address review or issue comments on an open GitHub Pull Request using the gh CLI. | address, github, comments | address, github, comments, review, issue, open, pull, request, gh, cli |
| `agent-manager-skill` | Manage multiple local CLI agents via tmux sessions (start/stop/monitor/assign) with cron-friendly scheduling. | agent, manager, skill | agent, manager, skill, multiple, local, cli, agents, via, tmux, sessions, start, stop |
-| `agent-memory-systems` | You are a cognitive architect who understands that memory makes agents intelligent. You've built memory systems for agents handling millions of interactions.... | agent, memory | agent, memory, cognitive, architect, who, understands, makes, agents, intelligent, ve, built, handling |
-| `agent-tool-builder` | You are an expert in the interface between LLMs and the outside world. You've seen tools that work beautifully and tools that cause agents to hallucinate, lo... | agent, builder | agent, builder, interface, between, llms, outside, world, ve, seen, work, beautifully, cause |
| `agents-md` | This skill should be used when the user asks to "create AGENTS.md", "update AGENTS.md", "maintain agent docs", "set up CLAUDE.md", or needs to keep agent ins... | agents, md | agents, md, skill, should, used, user, asks, update, maintain, agent, docs, set |
| `algorithmic-art` | Algorithmic philosophies are computational aesthetic movements that are then expressed through code. Output .md files (philosophy), .html files (interactive ... | algorithmic, art | algorithmic, art, philosophies, computational, aesthetic, movements, then, expressed, through, code, output, md |
| `amazon-alexa` | Integracao completa com Amazon Alexa para criar skills de voz inteligentes, transformar Alexa em assistente com Claude como cerebro (projeto Auri) e integrar... | voice, alexa, aws, smart-home, iot | voice, alexa, aws, smart-home, iot, amazon, integracao, completa, com, para, criar, skills |
@@ -661,7 +669,6 @@ Total skills: 1377
| `awareness-stage-mapper` | One sentence - what this skill does and when to invoke it | awareness, stage, mapper | awareness, stage, mapper, one, sentence, what, skill, does, invoke |
| `aws-cost-cleanup` | Automated cleanup of unused AWS resources to reduce costs | aws, cost, cleanup | aws, cost, cleanup, automated, unused, resources, reduce, costs |
| `aws-cost-optimizer` | Comprehensive AWS cost analysis and optimization recommendations using AWS CLI and Cost Explorer | aws, cost, optimizer | aws, cost, optimizer, analysis, optimization, recommendations, cli, explorer |
-| `aws-serverless` | Proper Lambda function structure with error handling | aws, serverless | aws, serverless, proper, lambda, function, structure, error, handling |
| `azure-appconfiguration-ts` | Centralized configuration management with feature flags and dynamic refresh. | azure, appconfiguration, ts | azure, appconfiguration, ts, centralized, configuration, feature, flags, dynamic, refresh |
| `azure-identity-ts` | Authenticate to Azure services with various credential types. | azure, identity, ts | azure, identity, ts, authenticate, various, credential, types |
| `azure-servicebus-ts` | Enterprise messaging with queues, topics, and subscriptions. | azure, servicebus, ts | azure, servicebus, ts, enterprise, messaging, queues, topics, subscriptions |
@@ -715,6 +722,7 @@ Total skills: 1377
| `cpp-pro` | Write idiomatic C++ code with modern features, RAII, smart pointers, and STL algorithms. Handles templates, move semantics, and performance optimization. | cpp | cpp, pro, write, idiomatic, code, features, raii, smart, pointers, stl, algorithms, move |
| `create-branch` | Create a git branch following Sentry naming conventions. Use when asked to "create a branch", "new branch", "start a branch", "make a branch", "switch to a n... | create, branch | create, branch, git, following, sentry, naming, conventions, asked, new, start, switch, starting |
| `create-issue-gate` | Use when starting a new implementation task and an issue must be created with strict acceptance criteria gating before execution. | create, issue, gate | create, issue, gate, starting, new, task, must, created, strict, acceptance, criteria, gating |
+| `crewai` | Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500 companies. | crewai | crewai, leading, role, multi, agent, framework, used, 60, fortune, 500, companies |
| `daily` | Documentation and capabilities reference for Daily | daily | daily, documentation, capabilities, reference |
| `daily-news-report` | Scrapes content based on a preset URL list, filters high-quality technical information, and generates daily Markdown reports. | daily, news, report | daily, news, report, scrapes, content, preset, url, list, filters, high, quality, technical |
| `debug-buttercup` | All pods run in namespace crs. Use when pods in the crs namespace are in CrashLoopBackOff, OOMKilled, or restarting, multiple services restart simultaneously... | debug, buttercup | debug, buttercup, all, pods, run, namespace, crs, crashloopbackoff, oomkilled, restarting, multiple, restart |
@@ -729,7 +737,6 @@ Total skills: 1377
| `docx-official` | A user may ask you to create, edit, or analyze the contents of a .docx file. A .docx file is essentially a ZIP archive containing XML files and other resourc... | docx, official | docx, official, user, may, ask, edit, analyze, contents, file, essentially, zip, archive |
| `dx-optimizer` | Developer Experience specialist. Improves tooling, setup, and workflows. Use PROACTIVELY when setting up new projects, after team feedback, or when developme... | dx, optimizer | dx, optimizer, developer, experience, improves, tooling, setup, proactively, setting, up, new, after |
| `elon-musk` | Agente que simula Elon Musk com profundidade psicologica e comunicacional de alta fidelidade. Ativado para: "fale como Elon", "simule Elon Musk", "o que Elon... | persona, first-principles, innovation, strategy | persona, first-principles, innovation, strategy, elon, musk, agente, que, simula, com, profundidade, psicologica |
-| `email-systems` | You are an email systems engineer who has maintained 99.9% deliverability across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with blacklists, a... | email | email, engineer, who, maintained, 99, deliverability, millions, emails, ve, debugged, spf, dkim |
| `emergency-card` | 生成紧急情况下快速访问的医疗信息摘要卡片。当用户需要旅行、就诊准备、紧急情况或询问"紧急信息"、"医疗卡片"、"急救信息"时使用此技能。提取关键信息(过敏、用药、急症、植入物),支持多格式输出(JSON、文本、二维码),用于急救或快速就医。 | emergency, card | emergency, card, json |
| `emotional-arc-designer` | One sentence - what this skill does and when to invoke it | emotional, arc, designer | emotional, arc, designer, one, sentence, what, skill, does, invoke |
| `energy-procurement` | Codified expertise for electricity and gas procurement, tariff optimisation, demand charge management, renewable PPA evaluation, and multi-facility energy co... | energy, procurement | energy, procurement, codified, expertise, electricity, gas, tariff, optimisation, demand, charge, renewable, ppa |
@@ -774,7 +781,6 @@ Total skills: 1377
| `github-issue-creator` | Turn error logs, screenshots, voice notes, and rough bug reports into crisp, developer-ready GitHub issues with repro steps, impact, and evidence. | github, issue, creator | github, issue, creator, turn, error, logs, screenshots, voice, notes, rough, bug, reports |
| `goal-analyzer` | 分析健康目标数据、识别目标模式、评估目标进度,并提供个性化目标管理建议。支持与营养、运动、睡眠等健康数据的关联分析。 | goal, analyzer | goal, analyzer |
| `godot-4-migration` | Specialized guide for migrating Godot 3.x projects to Godot 4 (GDScript 2.0), covering syntax changes, Tweens, and exports. | godot, 4, migration | godot, 4, migration, specialized, migrating, gdscript, covering, syntax, changes, tweens, exports |
-| `graphql` | You're a developer who has built GraphQL APIs at scale. You've seen the N+1 query problem bring down production servers. You've watched clients craft deeply ... | graphql | graphql, re, developer, who, built, apis, scale, ve, seen, query, problem, bring |
| `haskell-pro` | Expert Haskell engineer specializing in advanced type systems, pure | haskell | haskell, pro, engineer, specializing, type, pure |
| `headline-psychologist` | One sentence - what this skill does and when to invoke it | headline, psychologist | headline, psychologist, one, sentence, what, skill, does, invoke |
| `health-trend-analyzer` | 分析一段时间内健康数据的趋势和模式。关联药物、症状、生命体征、化验结果和其他健康指标的变化。识别令人担忧的趋势、改善情况,并提供数据驱动的洞察。当用户询问健康趋势、模式、随时间的变化或"我的健康状况有什么变化?"时使用。支持多维度分析(体重/BMI、症状、药物依从性、化验结果、情绪睡眠),相关性分析,变化检测,以... | health, trend, analyzer | health, trend, analyzer, bmi, html, echarts |
@@ -793,7 +799,6 @@ Total skills: 1377
| `hig-project-context` | Create or update a shared Apple design context document that other HIG skills use to tailor guidance. | hig | hig, context, update, shared, apple, document, other, skills, tailor, guidance |
| `hig-technologies` | Check for .claude/apple-design-context.md before asking questions. Use existing context and only ask for information not already covered. | hig, technologies | hig, technologies, check, claude, apple, context, md, before, asking, questions, existing, ask |
| `hosted-agents` | Build background agents in sandboxed environments. Use for hosted coding agents, sandboxed VMs, Modal sandboxes, and remote coding environments. | hosted, agents | hosted, agents, background, sandboxed, environments, coding, vms, modal, sandboxes, remote |
-| `hubspot-integration` | Authentication for single-account integrations | hubspot, integration | hubspot, integration, authentication, single, account, integrations |
| `hugging-face-cli` | Use the Hugging Face Hub CLI (`hf`) to download, upload, and manage models, datasets, and Spaces. | hugging, face, cli | hugging, face, cli, hub, hf, download, upload, models, datasets, spaces |
| `hugging-face-model-trainer` | Train or fine-tune TRL language models on Hugging Face Jobs, including SFT, DPO, GRPO, and GGUF export. | hugging, face, model, trainer | hugging, face, model, trainer, train, fine, tune, trl, language, models, jobs, including |
| `hugging-face-paper-publisher` | Publish and manage research papers on Hugging Face Hub. Supports creating paper pages, linking papers to models/datasets, claiming authorship, and generating... | hugging, face, paper, publisher | hugging, face, paper, publisher, publish, research, papers, hub, supports, creating, pages, linking |
@@ -801,8 +806,7 @@ Total skills: 1377
| `identity-mirror` | One sentence - what this skill does and when to invoke it | identity, mirror | identity, mirror, one, sentence, what, skill, does, invoke |
| `ilya-sutskever` | Agente que simula Ilya Sutskever — co-fundador da OpenAI, ex-Chief Scientist, fundador da SSI. Use quando quiser perspectivas sobre: AGI safety-first, consci... | persona, agi, safety, scaling-laws, openai | persona, agi, safety, scaling-laws, openai, ilya, sutskever, agente, que, simula, co, fundador |
| `infinite-gratitude` | Multi-agent research skill for parallel research execution (10 agents, battle-tested with real case studies). | infinite, gratitude | infinite, gratitude, multi, agent, research, skill, parallel, execution, 10, agents, battle, tested |
-| `inngest` | You are an Inngest expert who builds reliable background processing without managing infrastructure. You understand that serverless doesn't mean you can't ha... | inngest | inngest, who, reliable, background, processing, without, managing, infrastructure, understand, serverless, doesn, mean |
-| `interactive-portfolio` | You know a portfolio isn't a resume - it's a first impression that needs to convert. You balance creativity with usability. You understand that hiring manage... | interactive, portfolio | interactive, portfolio, know, isn, resume, first, impression, convert, balance, creativity, usability, understand |
+| `interactive-portfolio` | Expert in building portfolios that actually land jobs and clients - not just showing work, but creating memorable experiences. Covers developer portfolios, d... | interactive, portfolio | interactive, portfolio, building, portfolios, actually, land, jobs, clients, just, showing, work, creating |
| `internal-comms-anthropic` | To write internal communications, use this skill for: | internal, comms, anthropic | internal, comms, anthropic, write, communications, skill |
| `internal-comms-community` | To write internal communications, use this skill for: | internal, comms, community | internal, comms, community, write, communications, skill |
| `interview-coach` | Full job search coaching system — JD decoding, resume, storybank, mock interviews, transcript analysis, comp negotiation. 23 commands, persistent state. | interview, job-search, coaching, career, storybank, negotiation | interview, job-search, coaching, career, storybank, negotiation, coach, full, job, search, jd, decoding |
@@ -838,6 +842,7 @@ Total skills: 1377
| `memory-systems` | Design short-term, long-term, and graph-based memory architectures. Use when building agents that must persist across sessions, needing to maintain entity co... | memory | memory, short, term, long, graph, architectures, building, agents, must, persist, sessions, needing |
| `mental-health-analyzer` | 分析心理健康数据、识别心理模式、评估心理健康状况、提供个性化心理健康建议。支持与睡眠、运动、营养等其他健康数据的关联分析。 | mental, health, analyzer | mental, health, analyzer |
| `mermaid-expert` | Create Mermaid diagrams for flowcharts, sequences, ERDs, and architectures. Masters syntax for all diagram types and styling. | mermaid | mermaid, diagrams, flowcharts, sequences, erds, architectures, masters, syntax, all, diagram, types, styling |
+| `micro-saas-launcher` | Expert in launching small, focused SaaS products fast - the indie hacker approach to building profitable software. Covers idea validation, MVP development, p... | micro, saas, launcher | micro, saas, launcher, launching, small, products, fast, indie, hacker, approach, building, profitable |
| `minecraft-bukkit-pro` | Master Minecraft server plugin development with Bukkit, Spigot, and Paper APIs. | minecraft, bukkit | minecraft, bukkit, pro, server, plugin, development, spigot, paper, apis |
| `monetization` | Estrategia e implementacao de monetizacao para produtos digitais - Stripe, subscriptions, pricing experiments, freemium, upgrade flows, churn prevention, rev... | monetization, stripe, saas, pricing, subscriptions | monetization, stripe, saas, pricing, subscriptions, estrategia, implementacao, de, monetizacao, para, produtos, digitais |
| `monorepo-management` | Build efficient, scalable monorepos that enable code sharing, consistent tooling, and atomic changes across multiple packages and applications. | monorepo | monorepo, efficient, scalable, monorepos, enable, code, sharing, consistent, tooling, atomic, changes, multiple |
@@ -871,9 +876,9 @@ Total skills: 1377
| `pentest-checklist` | Provide a comprehensive checklist for planning, executing, and following up on penetration tests. Ensure thorough preparation, proper scoping, and effective ... | pentest, checklist | pentest, checklist, provide, planning, executing, following, up, penetration, tests, thorough, preparation, proper |
| `performance-optimizer` | Identifies and fixes performance bottlenecks in code, databases, and APIs. Measures before and after to prove improvements. | performance, optimizer | performance, optimizer, identifies, fixes, bottlenecks, code, databases, apis, measures, before, after, prove |
| `performance-profiling` | Performance profiling principles. Measurement, analysis, and optimization techniques. | performance, profiling | performance, profiling, principles, measurement, analysis, optimization, techniques |
+| `personal-tool-builder` | Expert in building custom tools that solve your own problems first. The best products often start as personal tools - scratch your own itch, build for yourse... | personal, builder | personal, builder, building, custom, solve, own, problems, first, products, often, start, scratch |
| `phase-gated-debugging` | Use when debugging any bug. Enforces a 5-phase protocol where code edits are blocked until root cause is confirmed. Prevents premature fix attempts. | phase, gated, debugging | phase, gated, debugging, any, bug, enforces, protocol, where, code, edits, blocked, until |
| `pitch-psychologist` | One sentence - what this skill does and when to invoke it | pitch, psychologist | pitch, psychologist, one, sentence, what, skill, does, invoke |
-| `plaid-fintech` | Create a linktoken for Plaid Link, exchange publictoken for accesstoken. Link tokens are short-lived, one-time use. Access tokens don't expire but may need u... | plaid, fintech | plaid, fintech, linktoken, link, exchange, publictoken, accesstoken, tokens, short, lived, one, time |
| `plan-writing` | Structured task planning with clear breakdowns, dependencies, and verification criteria. Use when implementing features, refactoring, or any multi-step work. | plan, writing | plan, writing, structured, task, planning, clear, breakdowns, dependencies, verification, criteria, implementing, features |
| `planning-with-files` | Work like Manus: Use persistent markdown files as your "working memory on disk." | planning, with, files | planning, with, files, work, like, manus, persistent, markdown, working, memory, disk |
| `playwright-skill` | IMPORTANT - Path Resolution: This skill can be installed in different locations (plugin system, manual installation, global, or project-specific). Before exe... | playwright, skill | playwright, skill, important, path, resolution, installed, different, locations, plugin, manual, installation, global |
@@ -932,8 +937,6 @@ Total skills: 1377
| `swiftui-performance-audit` | Audit SwiftUI performance issues from code review and profiling evidence. | swiftui, performance, audit | swiftui, performance, audit, issues, code, review, profiling, evidence |
| `tcm-constitution-analyzer` | 分析中医体质数据、识别体质类型、评估体质特征,并提供个性化养生建议。支持与营养、运动、睡眠等健康数据的关联分析。 | tcm, constitution, analyzer | tcm, constitution, analyzer |
| `team-composition-analysis` | Design optimal team structures, hiring plans, compensation strategies, and equity allocation for early-stage startups from pre-seed through Series A. | team, composition | team, composition, analysis, optimal, structures, hiring, plans, compensation, equity, allocation, early, stage |
-| `telegram-bot-builder` | You build bots that people actually use daily. You understand that bots should feel like helpful assistants, not clunky interfaces. You know the Telegram eco... | telegram, bot, builder | telegram, bot, builder, bots, people, actually, daily, understand, should, feel, like, helpful |
-| `telegram-mini-app` | You build apps where 800M+ Telegram users already are. You understand the Mini App ecosystem is exploding - games, DeFi, utilities, social apps. You know TON... | telegram, mini, app | telegram, mini, app, apps, where, 800m, users, already, understand, ecosystem, exploding, games |
| `theme-factory` | This skill provides a curated collection of professional font and color themes themes, each with carefully selected color palettes and font pairings. Once a ... | theme, factory | theme, factory, skill, provides, curated, collection, professional, font, color, themes, each, carefully |
| `threejs-animation` | Three.js animation - keyframe animation, skeletal animation, morph targets, animation mixing. Use when animating objects, playing GLTF animations, creating p... | threejs, animation | threejs, animation, three, js, keyframe, skeletal, morph, targets, mixing, animating, objects, playing |
| `threejs-fundamentals` | Three.js scene setup, cameras, renderer, Object3D hierarchy, coordinate systems. Use when setting up 3D scenes, creating cameras, configuring renderers, mana... | threejs, fundamentals | threejs, fundamentals, three, js, scene, setup, cameras, renderer, object3d, hierarchy, coordinate, setting |
@@ -948,12 +951,11 @@ Total skills: 1377
| `tool-use-guardian` | FREE — Intelligent tool-call reliability wrapper. Monitors, retries, fixes, and learns from tool failures. Auto-recovers from truncated JSON, timeouts, rate ... | reliability, tool-use, error-handling, retries, recovery, agent-infrastructure | reliability, tool-use, error-handling, retries, recovery, agent-infrastructure, guardian, free, intelligent, call, wrapper, monitors |
| `turborepo-caching` | Configure Turborepo for efficient monorepo builds with local and remote caching. Use when setting up Turborepo, optimizing build pipelines, or implementing d... | turborepo, caching | turborepo, caching, configure, efficient, monorepo, local, remote, setting, up, optimizing, pipelines, implementing |
| `tutorial-engineer` | Creates step-by-step tutorials and educational content from code. Transforms complex concepts into progressive learning experiences with hands-on examples. | tutorial | tutorial, engineer, creates, step, tutorials, educational, content, code, transforms, complex, concepts, progressive |
-| `twilio-communications` | Basic pattern for sending SMS messages with Twilio. Handles the fundamentals: phone number formatting, message delivery, and delivery status callbacks. | twilio, communications | twilio, communications, basic, sending, sms, messages, fundamentals, phone, number, formatting, message, delivery |
| `ui-skills` | Opinionated, evolving constraints to guide agents when building interfaces | ui, skills | ui, skills, opinionated, evolving, constraints, agents, building, interfaces |
| `ui-ux-designer` | Create interface designs, wireframes, and design systems. Masters user research, accessibility standards, and modern design tools. | ui, ux, designer | ui, ux, designer, interface, designs, wireframes, masters, user, research, accessibility, standards |
| `unsplash-integration` | Integration skill for searching and fetching high-quality, free-to-use professional photography from Unsplash. | unsplash, integration | unsplash, integration, skill, searching, fetching, high, quality, free, professional, photography |
| `upgrading-expo` | Upgrade Expo SDK versions | upgrading, expo | upgrading, expo, upgrade, sdk, versions |
-| `upstash-qstash` | You are an Upstash QStash expert who builds reliable serverless messaging without infrastructure management. You understand that QStash's simplicity is its p... | upstash, qstash | upstash, qstash, who, reliable, serverless, messaging, without, infrastructure, understand, simplicity, power, http |
+| `upstash-qstash` | Upstash QStash expert for serverless message queues, scheduled jobs, and reliable HTTP-based task delivery without managing infrastructure. | upstash, qstash | upstash, qstash, serverless, message, queues, scheduled, jobs, reliable, http, task, delivery, without |
| `using-git-worktrees` | Git worktrees create isolated workspaces sharing the same repository, allowing work on multiple branches simultaneously without switching. | using, git, worktrees | using, git, worktrees, isolated, workspaces, sharing, same, repository, allowing, work, multiple, branches |
| `using-superpowers` | Use when starting any conversation - establishes how to find and use skills, requiring Skill tool invocation before ANY response including clarifying questions | using, superpowers | using, superpowers, starting, any, conversation, establishes, how, find, skills, requiring, skill, invocation |
| `ux-persuasion-engineer` | One sentence - what this skill does and when to invoke it | ux, persuasion | ux, persuasion, engineer, one, sentence, what, skill, does, invoke |
@@ -961,7 +963,6 @@ Total skills: 1377
| `verification-before-completion` | Claiming work is complete without verification is dishonesty, not efficiency. Use when ANY variation of success/completion claims, ANY expression of satisfac... | verification, before, completion | verification, before, completion, claiming, work, complete, without, dishonesty, efficiency, any, variation, success |
| `vexor-cli` | Semantic file discovery via `vexor`. Use whenever locating where something is implemented/loaded/defined in a medium or large repo, or when the file location... | vexor, cli | vexor, cli, semantic, file, discovery, via, whenever, locating, where, something, implemented, loaded |
| `videodb` | Video and audio perception, indexing, and editing. Ingest files/URLs/live streams, build visual/spoken indexes, search with timestamps, edit timelines, add o... | video, editing, transcription, subtitles, search, streaming, ai-generation, media, live-streams, desktop-capture | video, editing, transcription, subtitles, search, streaming, ai-generation, media, live-streams, desktop-capture, videodb, audio |
-| `viral-generator-builder` | You understand why people share things. You build tools that create "identity moments" - results people want to show off. You know the difference between a t... | viral, generator, builder | viral, generator, builder, understand, why, people, share, things, identity, moments, results, want |
| `visual-emotion-engineer` | One sentence - what this skill does and when to invoke it | visual, emotion | visual, emotion, engineer, one, sentence, what, skill, does, invoke |
| `web-performance-optimization` | Optimize website and web application performance including loading speed, Core Web Vitals, bundle size, caching strategies, and runtime performance | web, performance, optimization | web, performance, optimization, optimize, website, application, including, loading, speed, core, vitals, bundle |
| `weightloss-analyzer` | 分析减肥数据、计算代谢率、追踪能量缺口、管理减肥阶段 | weightloss, analyzer | weightloss, analyzer |
@@ -981,17 +982,19 @@ Total skills: 1377
| `yann-lecun-tecnico` | Sub-skill técnica de Yann LeCun. Cobre CNNs, LeNet, backpropagation, JEPA (I-JEPA, V-JEPA, MC-JEPA), AMI (Advanced Machinery of Intelligence), Self-Supervise... | persona, cnn, jepa, self-supervised, pytorch | persona, cnn, jepa, self-supervised, pytorch, yann, lecun, tecnico, sub, skill, cnica, de |
| `youtube-summarizer` | Extract transcripts from YouTube videos and generate comprehensive, detailed summaries using intelligent analysis frameworks | video, summarization, transcription, youtube, content-analysis | video, summarization, transcription, youtube, content-analysis, summarizer, extract, transcripts, videos, generate, detailed, summaries |
-## infrastructure (122)
+## infrastructure (124)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
| `acceptance-orchestrator` | Use when a coding task should be driven end-to-end from issue intake through implementation, review, deployment, and acceptance verification with minimal hum... | acceptance, orchestrator | acceptance, orchestrator, coding, task, should, driven, issue, intake, through, review, deployment, verification |
+| `agent-evaluation` | Testing and benchmarking LLM agents including behavioral testing, capability assessment, reliability metrics, and production monitoring—where even top agents... | agent, evaluation | agent, evaluation, testing, benchmarking, llm, agents, including, behavioral, capability, assessment, reliability, metrics |
| `agentflow` | Orchestrate autonomous AI development pipelines through your Kanban board (Asana, GitHub Projects, Linear). Manages multi-worker Claude Code dispatch, determ... | agentflow | agentflow, orchestrate, autonomous, ai, development, pipelines, through, kanban, board, asana, github, linear |
| `airflow-dag-patterns` | Build production Apache Airflow DAGs with best practices for operators, sensors, testing, and deployment. Use when creating data pipelines, orchestrating wor... | airflow, dag | airflow, dag, apache, dags, operators, sensors, testing, deployment, creating, data, pipelines, orchestrating |
| `api-testing-observability-api-mock` | You are an API mocking expert specializing in realistic mock services for development, testing, and demos. Design mocks that simulate real API behavior and e... | api, observability, mock | api, observability, mock, testing, mocking, specializing, realistic, development, demos, mocks, simulate, real |
| `apify-brand-reputation-monitoring` | Scrape reviews, ratings, and brand mentions from multiple platforms using Apify Actors. | apify, brand, reputation, monitoring | apify, brand, reputation, monitoring, scrape, reviews, ratings, mentions, multiple, platforms, actors |
| `application-performance-performance-optimization` | Optimize end-to-end application performance with profiling, observability, and backend/frontend tuning. Use when coordinating performance optimization across... | application, performance, optimization | application, performance, optimization, optimize, profiling, observability, backend, frontend, tuning, coordinating, stack |
| `aws-penetration-testing` | Provide comprehensive techniques for penetration testing AWS cloud environments. Covers IAM enumeration, privilege escalation, SSRF to metadata endpoint, S3 ... | aws, penetration | aws, penetration, testing, provide, techniques, cloud, environments, covers, iam, enumeration, privilege, escalation |
+| `aws-serverless` | Specialized skill for building production-ready serverless applications on AWS. Covers Lambda functions, API Gateway, DynamoDB, SQS/SNS event-driven patterns... | aws, serverless | aws, serverless, specialized, skill, building, applications, covers, lambda, functions, api, gateway, dynamodb |
| `aws-skills` | AWS development with infrastructure automation and cloud architecture patterns | aws, skills | aws, skills, development, infrastructure, automation, cloud, architecture |
| `azd-deployment` | Deploy containerized frontend + backend applications to Azure Container Apps with remote builds, managed identity, and idempotent infrastructure. | azd, deployment | azd, deployment, deploy, containerized, frontend, backend, applications, azure, container, apps, remote, managed |
| `azure-ai-anomalydetector-java` | Build anomaly detection applications with Azure AI Anomaly Detector SDK for Java. Use when implementing univariate/multivariate anomaly detection, time-serie... | azure, ai, anomalydetector, java | azure, ai, anomalydetector, java, anomaly, detection, applications, detector, sdk, implementing, univariate, multivariate |
@@ -1019,7 +1022,6 @@ Total skills: 1377
| `cloud-architect` | Expert cloud architect specializing in AWS/Azure/GCP multi-cloud infrastructure design, advanced IaC (Terraform/OpenTofu/CDK), FinOps cost optimization, and ... | cloud | cloud, architect, specializing, aws, azure, gcp, multi, infrastructure, iac, terraform, opentofu, cdk |
| `cloud-devops` | Cloud infrastructure and DevOps workflow covering AWS, Azure, GCP, Kubernetes, Terraform, CI/CD, monitoring, and cloud-native development. | cloud, devops | cloud, devops, infrastructure, covering, aws, azure, gcp, kubernetes, terraform, ci, cd, monitoring |
| `code-review-ai-ai-review` | You are an expert AI-powered code review specialist combining automated static analysis, intelligent pattern recognition, and modern DevOps practices. Levera... | code, ai | code, ai, review, powered, combining, automated, static, analysis, intelligent, recognition, devops, leverage |
-| `computer-use-agents` | The fundamental architecture of computer use agents: observe screen, reason about next action, execute action, repeat. This loop integrates vision models wit... | computer, use, agents | computer, use, agents, fundamental, architecture, observe, screen, reason, about, next, action, execute |
| `cost-optimization` | Strategies and patterns for optimizing cloud costs across AWS, Azure, and GCP. | cost, optimization | cost, optimization, optimizing, cloud, costs, aws, azure, gcp |
| `data-engineer` | Build scalable data pipelines, modern data warehouses, and real-time streaming architectures. Implements Apache Spark, dbt, Airflow, and cloud-native data pl... | data | data, engineer, scalable, pipelines, warehouses, real, time, streaming, architectures, implements, apache, spark |
| `data-engineering-data-pipeline` | You are a data pipeline architecture expert specializing in scalable, reliable, and cost-effective data pipelines for batch and streaming data processing. | data, engineering, pipeline | data, engineering, pipeline, architecture, specializing, scalable, reliable, cost, effective, pipelines, batch, streaming |
@@ -1042,10 +1044,11 @@ Total skills: 1377
| `error-diagnostics-error-trace` | You are an error tracking and observability expert specializing in implementing comprehensive error monitoring solutions. Set up error tracking systems, conf... | error, diagnostics, trace | error, diagnostics, trace, tracking, observability, specializing, implementing, monitoring, solutions, set, up, configure |
| `expo-cicd-workflows` | Helps understand and write EAS workflow YAML files for Expo projects. Use this skill when the user asks about CI/CD or workflows in an Expo or EAS context, m... | expo, cicd | expo, cicd, helps, understand, write, eas, yaml, files, skill, user, asks, about |
| `expo-deployment` | Deploy Expo apps to production | expo, deployment | expo, deployment, deploy, apps |
+| `file-uploads` | Expert at handling file uploads and cloud storage. Covers S3, Cloudflare R2, presigned URLs, multipart uploads, and image optimization. Knows how to handle l... | file, uploads | file, uploads, handling, cloud, storage, covers, s3, cloudflare, r2, presigned, urls, multipart |
| `flutter-expert` | Master Flutter development with Dart 3, advanced widgets, and multi-platform deployment. | flutter | flutter, development, dart, widgets, multi, platform, deployment |
| `freshservice-automation` | Automate Freshservice ITSM tasks via Rube MCP (Composio): create/update tickets, bulk operations, service requests, and outbound emails. Always search tools ... | freshservice | freshservice, automation, automate, itsm, tasks, via, rube, mcp, composio, update, tickets, bulk |
| `game-development/game-art` | Game art principles. Visual style selection, asset pipeline, animation workflow. | game, development/game, art | game, development/game, art, principles, visual, style, selection, asset, pipeline, animation |
-| `gcp-cloud-run` | When to use: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads'] | gcp, cloud, run | gcp, cloud, run, web, applications, apis, any, runtime, library, complex, multiple, endpoints |
+| `gcp-cloud-run` | Specialized skill for building production-ready serverless applications on GCP. Covers Cloud Run services (containerized), Cloud Run Functions (event-driven)... | gcp, cloud, run | gcp, cloud, run, specialized, skill, building, serverless, applications, covers, containerized, functions, event |
| `git-hooks-automation` | Master Git hooks setup with Husky, lint-staged, pre-commit framework, and commitlint. Automate code quality gates, formatting, linting, and commit message en... | git, hooks | git, hooks, automation, setup, husky, lint, staged, pre, commit, framework, commitlint, automate |
| `git-pr-workflows-git-workflow` | Orchestrate a comprehensive git workflow from code review through PR creation, leveraging specialized agents for quality assurance, testing, and deployment r... | git, pr | git, pr, orchestrate, code, review, through, creation, leveraging, specialized, agents, quality, assurance |
| `github-automation` | Automate GitHub repositories, issues, pull requests, branches, CI/CD, and permissions via Rube MCP (Composio). Manage code workflows, review PRs, search code... | github | github, automation, automate, repositories, issues, pull, requests, branches, ci, cd, permissions, via |
@@ -1064,7 +1067,7 @@ Total skills: 1377
| `k6-load-testing` | Comprehensive k6 load testing skill for API, browser, and scalability testing. Write realistic load scenarios, analyze results, and integrate with CI/CD. | k6, load-testing, performance, api-testing, ci-cd | k6, load-testing, performance, api-testing, ci-cd, load, testing, skill, api, browser, scalability, write |
| `kubernetes-architect` | Expert Kubernetes architect specializing in cloud-native infrastructure, advanced GitOps workflows (ArgoCD/Flux), and enterprise container orchestration. | kubernetes | kubernetes, architect, specializing, cloud, native, infrastructure, gitops, argocd, flux, enterprise, container, orchestration |
| `kubernetes-deployment` | Kubernetes deployment workflow for container orchestration, Helm charts, service mesh, and production-ready K8s configurations. | kubernetes, deployment | kubernetes, deployment, container, orchestration, helm, charts, mesh, k8s, configurations |
-| `langfuse` | You are an expert in LLM observability and evaluation. You think in terms of traces, spans, and metrics. You know that LLM applications need monitoring just ... | langfuse | langfuse, llm, observability, evaluation, think, terms, traces, spans, metrics, know, applications, monitoring |
+| `langfuse` | Expert in Langfuse - the open-source LLM observability platform. Covers tracing, prompt management, evaluation, datasets, and integration with LangChain, Lla... | langfuse | langfuse, open, source, llm, observability, platform, covers, tracing, prompt, evaluation, datasets, integration |
| `lightning-channel-factories` | Technical reference on Lightning Network channel factories, multi-party channels, LSP architectures, and Bitcoin Layer 2 scaling without soft forks. Covers D... | lightning, channel, factories | lightning, channel, factories, technical, reference, network, multi, party, channels, lsp, architectures, bitcoin |
| `linux-troubleshooting` | Linux system troubleshooting workflow for diagnosing and resolving system issues, performance problems, and service failures. | linux, troubleshooting | linux, troubleshooting, diagnosing, resolving, issues, performance, problems, failures |
| `machine-learning-ops-ml-pipeline` | Design and implement a complete ML pipeline for: $ARGUMENTS | machine, learning, ops, ml, pipeline | machine, learning, ops, ml, pipeline, complete, arguments |
@@ -1089,7 +1092,6 @@ Total skills: 1377
| `progressive-web-app` | Build Progressive Web Apps (PWAs) with offline support, installability, and caching strategies. Trigger whenever the user mentions PWA, service workers, web ... | pwa, web-dev, service-worker, frontend, offline, caching | pwa, web-dev, service-worker, frontend, offline, caching, progressive, web, app, apps, pwas, installability |
| `prometheus-configuration` | Complete guide to Prometheus setup, metric collection, scrape configuration, and recording rules. | prometheus, configuration | prometheus, configuration, complete, setup, metric, collection, scrape, recording, rules |
| `pubmed-database` | Direct REST API access to PubMed. Advanced Boolean/MeSH queries, E-utilities API, batch processing, citation management. For Python workflows, prefer biopyth... | pubmed, database | pubmed, database, direct, rest, api, access, boolean, mesh, queries, utilities, batch, processing |
-| `salesforce-development` | Use @wire decorator for reactive data binding with Lightning Data Service or Apex methods. @wire fits LWC's reactive architecture and enables Salesforce perf... | salesforce | salesforce, development, wire, decorator, reactive, data, binding, lightning, apex, methods, fits, lwc |
| `seo-aeo-landing-page-writer` | Writes complete, structured landing pages optimized for SEO ranking, AEO citation, and visitor conversion. Activate when the user wants to write or generate ... | seo, aeo, landing, page, writer | seo, aeo, landing, page, writer, writes, complete, structured, pages, optimized, ranking, citation |
| `server-management` | Server management principles and decision-making. Process management, monitoring strategy, and scaling decisions. Teaches thinking, not commands. | server | server, principles, decision, making, process, monitoring, scaling, decisions, teaches, thinking, commands |
| `service-mesh-observability` | Complete guide to observability patterns for Istio, Linkerd, and service mesh deployments. | service, mesh, observability | service, mesh, observability, complete, istio, linkerd, deployments |
@@ -1104,8 +1106,9 @@ Total skills: 1377
| `terraform-specialist` | Expert Terraform/OpenTofu specialist mastering advanced IaC automation, state management, and enterprise infrastructure patterns. | terraform | terraform, opentofu, mastering, iac, automation, state, enterprise, infrastructure |
| `test-automator` | Master AI-powered test automation with modern frameworks, self-healing tests, and comprehensive quality engineering. Build scalable testing strategies with a... | automator | automator, test, ai, powered, automation, frameworks, self, healing, tests, quality, engineering, scalable |
| `unity-developer` | Build Unity games with optimized C# scripts, efficient rendering, and proper asset management. Masters Unity 6 LTS, URP/HDRP pipelines, and cross-platform de... | unity | unity, developer, games, optimized, scripts, efficient, rendering, proper, asset, masters, lts, urp |
-| `vercel-deployment` | Expert knowledge for deploying to Vercel with Next.js Use when: vercel, deploy, deployment, hosting, production. | vercel, deployment | vercel, deployment, knowledge, deploying, next, js, deploy, hosting |
+| `vercel-deployment` | Expert knowledge for deploying to Vercel with Next.js | vercel, deployment | vercel, deployment, knowledge, deploying, next, js |
| `whatsapp-cloud-api` | Integracao com WhatsApp Business Cloud API (Meta). Mensagens, templates, webhooks HMAC-SHA256, automacao de atendimento. Boilerplates Node.js e Python. | messaging, whatsapp, meta, webhooks | messaging, whatsapp, meta, webhooks, cloud, api, integracao, com, business, mensagens, hmac, sha256 |
+| `workflow-automation` | Workflow automation is the infrastructure that makes AI agents reliable. Without durable execution, a network hiccup during a 10-step payment flow means lost... | | automation, infrastructure, makes, ai, agents, reliable, without, durable, execution, network, hiccup, during |
| `x-twitter-scraper` | X (Twitter) data platform skill — tweet search, user lookup, follower extraction, engagement metrics, giveaway draws, monitoring, webhooks, 19 extraction too... | twitter, x-api, scraping, mcp, social-media, data-extraction, giveaway, monitoring, webhooks | twitter, x-api, scraping, mcp, social-media, data-extraction, giveaway, monitoring, webhooks, scraper, data, platform |
## security (170)
@@ -1115,6 +1118,7 @@ Total skills: 1377
| `007` | Security audit, hardening, threat modeling (STRIDE/PASTA), Red/Blue Team, OWASP checks, code review, incident response, and infrastructure security for any p... | security, audit, owasp, threat-modeling, hardening, pentest | security, audit, owasp, threat-modeling, hardening, pentest, 007, threat, modeling, stride, pasta, red |
| `accessibility-compliance-accessibility-audit` | You are an accessibility expert specializing in WCAG compliance, inclusive design, and assistive technology compatibility. Conduct audits, identify barriers,... | accessibility, compliance, audit | accessibility, compliance, audit, specializing, wcag, inclusive, assistive, technology, compatibility, conduct, audits, identify |
| `aegisops-ai` | Autonomous DevSecOps & FinOps Guardrails. Orchestrates Gemini 3 Flash to audit Linux Kernel patches, Terraform cost drifts, and K8s compliance. | aegisops, ai | aegisops, ai, autonomous, devsecops, finops, guardrails, orchestrates, gemini, flash, audit, linux, kernel |
+| `agent-memory-systems` | Memory is the cornerstone of intelligent agents. Without it, every interaction starts from zero. This skill covers the architecture of agent memory: short-te... | agent, memory | agent, memory, cornerstone, intelligent, agents, without, every, interaction, starts, zero, skill, covers |
| `agentic-actions-auditor` | Audits GitHub Actions workflows for security vulnerabilities in AI agent integrations including Claude Code Action, Gemini CLI, OpenAI Codex, and GitHub AI... | agentic, actions, auditor | agentic, actions, auditor, audits, github, security, vulnerabilities, ai, agent, integrations, including, claude |
| `ai-engineering-toolkit` | 6 production-ready AI engineering workflows: prompt evaluation (8-dimension scoring), context budget planning, RAG pipeline design, agent security audit (65-... | prompt-engineering, rag, security, evaluation, ai-engineering, llm | prompt-engineering, rag, security, evaluation, ai-engineering, llm, ai, engineering, toolkit, prompt, dimension, scoring |
| `ai-md` | Convert human-written CLAUDE.md into AI-native structured-label format. Battle-tested across 4 models. Same rules, fewer tokens, higher compliance. | ai, md | ai, md, convert, human, written, claude, native, structured, label, format, battle, tested |
@@ -1140,12 +1144,11 @@ Total skills: 1377
| `backend-security-coder` | Expert in secure backend coding practices specializing in input validation, authentication, and API security. Use PROACTIVELY for backend security implementa... | backend, security, coder | backend, security, coder, secure, coding, specializing, input, validation, authentication, api, proactively, implementations |
| `bdistill-behavioral-xray` | X-ray any AI model's behavioral patterns — refusal boundaries, hallucination tendencies, reasoning style, formatting defaults. No API key needed. | ai, testing, behavioral-analysis, model-evaluation, red-team, compliance, mcp | ai, testing, behavioral-analysis, model-evaluation, red-team, compliance, mcp, bdistill, behavioral, xray, ray, any |
| `broken-authentication` | Identify and exploit authentication and session management vulnerabilities in web applications. Broken authentication consistently ranks in the OWASP Top 10 ... | broken, authentication | broken, authentication, identify, exploit, session, vulnerabilities, web, applications, consistently, ranks, owasp, top |
-| `browser-extension-builder` | You extend the browser to give users superpowers. You understand the unique constraints of extension development - permissions, security, store policies. You... | browser, extension, builder | browser, extension, builder, extend, give, users, superpowers, understand, unique, constraints, development, permissions |
| `burp-suite-testing` | Execute comprehensive web application security testing using Burp Suite's integrated toolset, including HTTP traffic interception and modification, request a... | burp, suite | burp, suite, testing, execute, web, application, security, integrated, toolset, including, http, traffic |
| `burpsuite-project-parser` | Searches and explores Burp Suite project files (.burp) from the command line. Use when searching response headers or bodies with regex patterns, extracting s... | burpsuite, parser | burpsuite, parser, searches, explores, burp, suite, files, command, line, searching, response, headers |
| `cc-skill-security-review` | This skill ensures all code follows security best practices and identifies potential vulnerabilities. Use when implementing authentication or authorization, ... | cc, skill, security | cc, skill, security, review, ensures, all, code, follows, identifies, potential, vulnerabilities, implementing |
| `cicd-automation-workflow-automate` | You are a workflow automation expert specializing in creating efficient CI/CD pipelines, GitHub Actions workflows, and automated development processes. Desig... | cicd, automate | cicd, automate, automation, specializing, creating, efficient, ci, cd, pipelines, github, actions, automated |
-| `clerk-auth` | Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync Use when: adding authentication, clerk auth, user authentic... | clerk, auth | clerk, auth, middleware, organizations, webhooks, user, sync, adding, authentication, sign, up |
+| `clerk-auth` | Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync | clerk, auth | clerk, auth, middleware, organizations, webhooks, user, sync |
| `cloud-penetration-testing` | Conduct comprehensive security assessments of cloud infrastructure across Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). | cloud, penetration | cloud, penetration, testing, conduct, security, assessments, infrastructure, microsoft, azure, amazon, web, aws |
| `code-review-checklist` | Comprehensive checklist for conducting thorough code reviews covering functionality, security, performance, and maintainability | code, checklist | code, checklist, review, conducting, thorough, reviews, covering, functionality, security, performance, maintainability |
| `codebase-audit-pre-push` | Deep audit before GitHub push: removes junk files, dead code, security holes, and optimization issues. Checks every file line-by-line for production readiness. | codebase, audit, pre, push | codebase, audit, pre, push, deep, before, github, removes, junk, files, dead, code |
@@ -1166,9 +1169,8 @@ Total skills: 1377
| `ethical-hacking-methodology` | Master the complete penetration testing lifecycle from reconnaissance through reporting. This skill covers the five stages of ethical hacking methodology, es... | ethical, hacking, methodology | ethical, hacking, methodology, complete, penetration, testing, lifecycle, reconnaissance, through, reporting, skill, covers |
| `fda-food-safety-auditor` | Expert AI auditor for FDA Food Safety (FSMA), HACCP, and PCQI compliance. Reviews food facility records and preventive controls. | fda, food, safety, auditor | fda, food, safety, auditor, ai, fsma, haccp, pcqi, compliance, reviews, facility, records |
| `fda-medtech-compliance-auditor` | Expert AI auditor for Medical Device (SaMD) compliance, IEC 62304, and 21 CFR Part 820. Reviews DHFs, technical files, and software validation. | fda, medtech, compliance, auditor | fda, medtech, compliance, auditor, ai, medical, device, samd, iec, 62304, 21, cfr |
-| `file-uploads` | Careful about security and performance. Never trusts file extensions. Knows that large uploads need special handling. Prefers presigned URLs over server prox... | file, uploads | file, uploads, careful, about, security, performance, never, trusts, extensions, knows, large, special |
| `find-bugs` | Find bugs, security vulnerabilities, and code quality issues in local branch changes. Use when asked to review changes, find bugs, security review, or audit ... | find, bugs | find, bugs, security, vulnerabilities, code, quality, issues, local, branch, changes, asked, review |
-| `firebase` | You're a developer who has shipped dozens of Firebase projects. You've seen the "easy" path lead to security breaches, runaway costs, and impossible migratio... | firebase | firebase, re, developer, who, shipped, dozens, ve, seen, easy, path, lead, security |
+| `firebase` | Firebase gives you a complete backend in minutes - auth, database, storage, functions, hosting. But the ease of setup hides real complexity. Security rules a... | firebase | firebase, gives, complete, backend, minutes, auth, database, storage, functions, hosting, ease, setup |
| `firmware-analyst` | Expert firmware analyst specializing in embedded systems, IoT security, and hardware reverse engineering. | firmware, analyst | firmware, analyst, specializing, embedded, iot, security, hardware, reverse, engineering |
| `fixing-accessibility` | Audit and fix HTML accessibility issues including ARIA labels, keyboard navigation, focus management, color contrast, and form errors. Use when adding intera... | fixing, accessibility | fixing, accessibility, audit, fix, html, issues, including, aria, labels, keyboard, navigation, color |
| `framework-migration-deps-upgrade` | You are a dependency management expert specializing in safe, incremental upgrades of project dependencies. Plan and execute dependency updates with minimal r... | framework, migration, deps, upgrade | framework, migration, deps, upgrade, dependency, specializing, safe, incremental, upgrades, dependencies, plan, execute |
@@ -1207,7 +1209,7 @@ Total skills: 1377
| `mtls-configuration` | Configure mutual TLS (mTLS) for zero-trust service-to-service communication. Use when implementing zero-trust networking, certificate management, or securing... | mtls, configuration | mtls, configuration, configure, mutual, tls, zero, trust, communication, implementing, networking, certificate, securing |
| `network-101` | Configure and test common network services (HTTP, HTTPS, SNMP, SMB) for penetration testing lab environments. Enable hands-on practice with service enumerati... | network, 101 | network, 101, configure, test, common, http, https, snmp, smb, penetration, testing, lab |
| `network-engineer` | Expert network engineer specializing in modern cloud networking, security architectures, and performance optimization. | network | network, engineer, specializing, cloud, networking, security, architectures, performance, optimization |
-| `nextjs-supabase-auth` | Expert integration of Supabase Auth with Next.js App Router Use when: supabase auth next, authentication next.js, login supabase, auth middleware, protected ... | nextjs, supabase, auth | nextjs, supabase, auth, integration, next, js, app, router, authentication, login, middleware, protected |
+| `nextjs-supabase-auth` | Expert integration of Supabase Auth with Next.js App Router | nextjs, supabase, auth | nextjs, supabase, auth, integration, next, js, app, router |
| `nodejs-best-practices` | Node.js development principles and decision-making. Framework selection, async patterns, security, and architecture. Teaches thinking, not copying. | nodejs, best, practices | nodejs, best, practices, node, js, development, principles, decision, making, framework, selection, async |
| `observability-engineer` | Build production-ready monitoring, logging, and tracing systems. Implements comprehensive observability strategies, SLI/SLO management, and incident response... | observability | observability, engineer, monitoring, logging, tracing, implements, sli, slo, incident, response |
| `odoo-l10n-compliance` | Country-specific Odoo localization: tax configuration, e-invoicing (CFDI, FatturaPA, SAF-T), fiscal reporting, and country chart of accounts setup. | odoo, l10n, compliance | odoo, l10n, compliance, country, specific, localization, tax, configuration, invoicing, cfdi, fatturapa, saf |
@@ -1218,6 +1220,7 @@ Total skills: 1377
| `payment-integration` | Integrate Stripe, PayPal, and payment processors. Handles checkout flows, subscriptions, webhooks, and PCI compliance. Use PROACTIVELY when implementing paym... | payment, integration | payment, integration, integrate, stripe, paypal, processors, checkout, flows, subscriptions, webhooks, pci, compliance |
| `pci-compliance` | Master PCI DSS (Payment Card Industry Data Security Standard) compliance for secure payment processing and handling of cardholder data. | pci, compliance | pci, compliance, dss, payment, card, industry, data, security, standard, secure, processing, handling |
| `pentest-commands` | Provide a comprehensive command reference for penetration testing tools including network scanning, exploitation, password cracking, and web application test... | pentest, commands | pentest, commands, provide, command, reference, penetration, testing, including, network, scanning, exploitation, password |
+| `plaid-fintech` | Expert patterns for Plaid API integration including Link token flows, transactions sync, identity verification, Auth for ACH, balance checks, webhook handlin... | plaid, fintech | plaid, fintech, api, integration, including, link, token, flows, transactions, sync, identity, verification |
| `popup-cro` | Create and optimize popups, modals, overlays, slide-ins, and banners to increase conversions without harming user experience or brand trust. | popup, cro | popup, cro, optimize, popups, modals, overlays, slide, ins, banners, increase, conversions, without |
| `postmortem-writing` | Comprehensive guide to writing effective, blameless postmortems that drive organizational learning and prevent incident recurrence. | postmortem, writing | postmortem, writing, effective, blameless, postmortems, drive, organizational, learning, prevent, incident, recurrence |
| `privacy-by-design` | Use when building apps that collect user data. Ensures privacy protections are built in from the start—data minimization, consent, encryption. | privacy, by | privacy, by, building, apps, collect, user, data, ensures, protections, built, start, minimization |
@@ -1319,7 +1322,7 @@ Total skills: 1377
| `wiki-qa` | Answer repository questions grounded entirely in source code evidence. Use when user asks a question about the codebase, user wants to understand a specific ... | wiki, qa | wiki, qa, answer, repository, questions, grounded, entirely, source, code, evidence, user, asks |
| `windows-privilege-escalation` | Provide systematic methodologies for discovering and exploiting privilege escalation vulnerabilities on Windows systems during penetration testing engagements. | windows, privilege, escalation | windows, privilege, escalation, provide, systematic, methodologies, discovering, exploiting, vulnerabilities, during, penetration, testing |
-## workflow (102)
+## workflow (99)
| Skill | Description | Tags | Triggers |
| --- | --- | --- | --- |
@@ -1332,13 +1335,11 @@ Total skills: 1377
| `antigravity-skill-orchestrator` | A meta-skill that understands task requirements, dynamically selects appropriate skills, tracks successful skill combinations using agent-memory-mcp, and pre... | orchestration, meta-skill, agent-memory, task-evaluation | orchestration, meta-skill, agent-memory, task-evaluation, antigravity, skill, orchestrator, meta, understands, task, requirements, dynamically |
| `apify-influencer-discovery` | Find and evaluate influencers for brand partnerships, verify authenticity, and track collaboration performance across Instagram, Facebook, YouTube, and TikTok. | apify, influencer, discovery | apify, influencer, discovery, find, evaluate, influencers, brand, partnerships, verify, authenticity, track, collaboration |
| `asana-automation` | Automate Asana tasks via Rube MCP (Composio): tasks, projects, sections, teams, workspaces. Always search tools first for current schemas. | asana | asana, automation, automate, tasks, via, rube, mcp, composio, sections, teams, workspaces, always |
-| `azure-functions` | Modern .NET execution model with process isolation | azure, functions | azure, functions, net, execution, model, process, isolation |
| `bamboohr-automation` | Automate BambooHR tasks via Rube MCP (Composio): employees, time-off, benefits, dependents, employee updates. Always search tools first for current schemas. | bamboohr | bamboohr, automation, automate, tasks, via, rube, mcp, composio, employees, time, off, benefits |
| `basecamp-automation` | Automate Basecamp project management, to-dos, messages, people, and to-do list organization via Rube MCP (Composio). Always search tools first for current sc... | basecamp | basecamp, automation, automate, dos, messages, people, do, list, organization, via, rube, mcp |
| `billing-automation` | Master automated billing systems including recurring billing, invoice generation, dunning management, proration, and tax calculation. | billing | billing, automation, automated, including, recurring, invoice, generation, dunning, proration, tax, calculation |
| `bitbucket-automation` | Automate Bitbucket repositories, pull requests, branches, issues, and workspace management via Rube MCP (Composio). Always search tools first for current sch... | bitbucket | bitbucket, automation, automate, repositories, pull, requests, branches, issues, workspace, via, rube, mcp |
| `box-automation` | Automate Box operations including file upload/download, content search, folder management, collaboration, metadata queries, and sign requests through Composi... | box | box, automation, automate, operations, including, file, upload, download, content, search, folder, collaboration |
-| `browser-automation` | You are a browser automation expert who has debugged thousands of flaky tests and built scrapers that run for years without breaking. You've seen the evoluti... | browser | browser, automation, who, debugged, thousands, flaky, tests, built, scrapers, run, years, without |
| `cal-com-automation` | Automate Cal.com tasks via Rube MCP (Composio): manage bookings, check availability, configure webhooks, and handle teams. Always search tools first for curr... | cal, com | cal, com, automation, automate, tasks, via, rube, mcp, composio, bookings, check, availability |
| `canva-automation` | Automate Canva tasks via Rube MCP (Composio): designs, exports, folders, brand templates, autofill. Always search tools first for current schemas. | canva | canva, automation, automate, tasks, via, rube, mcp, composio, designs, exports, folders, brand |
| `changelog-automation` | Automate changelog generation from commits, PRs, and releases following Keep a Changelog format. Use when setting up release workflows, generating release no... | changelog | changelog, automation, automate, generation, commits, prs, releases, following, keep, format, setting, up |
@@ -1420,7 +1421,6 @@ Total skills: 1377
| `viboscope` | Psychological compatibility matching — find cofounders, collaborators, and friends through validated psychometrics | matching, psychology, compatibility, networking, collaboration | matching, psychology, compatibility, networking, collaboration, viboscope, psychological, find, cofounders, collaborators, friends, through |
| `web-scraper` | Web scraping inteligente multi-estrategia. Extrai dados estruturados de paginas web (tabelas, listas, precos). Paginacao, monitoramento e export CSV/JSON. | scraping, data-extraction, automation, csv | scraping, data-extraction, automation, csv, web, scraper, inteligente, multi, estrategia, extrai, dados, estruturados |
| `webflow-automation` | Automate Webflow CMS collections, site publishing, page management, asset uploads, and ecommerce orders via Rube MCP (Composio). Always search tools first fo... | webflow | webflow, automation, automate, cms, collections, site, publishing, page, asset, uploads, ecommerce, orders |
-| `workflow-automation` | You are a workflow automation architect who has seen both the promise and the pain of these platforms. You've migrated teams from brittle cron jobs to durabl... | | automation, architect, who, seen, both, promise, pain, these, platforms, ve, migrated, teams |
| `wrike-automation` | Automate Wrike project management via Rube MCP (Composio): create tasks/folders, manage projects, assign work, and track progress. Always search tools first ... | wrike | wrike, automation, automate, via, rube, mcp, composio, tasks, folders, assign, work, track |
| `zendesk-automation` | Automate Zendesk tasks via Rube MCP (Composio): tickets, users, organizations, replies. Always search tools first for current schemas. | zendesk | zendesk, automation, automate, tasks, via, rube, mcp, composio, tickets, users, organizations, replies |
| `zoho-crm-automation` | Automate Zoho CRM tasks via Rube MCP (Composio): create/update records, search contacts, manage leads, and convert leads. Always search tools first for curre... | zoho, crm | zoho, crm, automation, automate, tasks, via, rube, mcp, composio, update, records, search |
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9ac2b648..c07b3f4c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [9.9.0] - 2026-04-07 - "Vibeship Restore and Community Merge Batch"
+
+> Installable skill library update for Claude Code, Cursor, Codex CLI, Gemini CLI, Antigravity, and related AI coding assistants.
+
+Start here:
+
+- Install: `npx antigravity-awesome-skills`
+- Choose your tool: [README -> Choose Your Tool](https://github.com/sickn33/antigravity-awesome-skills#choose-your-tool)
+- Best skills by tool: [README -> Best Skills By Tool](https://github.com/sickn33/antigravity-awesome-skills#best-skills-by-tool)
+- Bundles: [docs/users/bundles.md](https://github.com/sickn33/antigravity-awesome-skills/blob/main/docs/users/bundles.md)
+- Workflows: [docs/users/workflows.md](https://github.com/sickn33/antigravity-awesome-skills/blob/main/docs/users/workflows.md)
+
+This release restores the full imported content for the affected `vibeship-spawner-skills` set after the truncation reported in issue `#473`, then folds in the current approved community PR batch. It also refreshes contributor syncing and README source credits so the repository state, plugin mirrors, and public credit surfaces stay aligned on `main`.
+
+## New Skills
+
+- **Satori skill pack** - merges PR #466 with the contributor-provided skills sourced from `MetcalfSolutions/Satori`.
+- **idea-darwin** - merges PR #469 to add the Darwin-style ideation workflow sourced from `warmskull/idea-darwin`.
+- **faf-skills contribution** - merges PR #477 as the maintained FAF contribution path sourced from `Wolfe-Jam/faf-skills`.
+
+## Improvements
+
+- **Issue #473 content restoration** - fully re-syncs the affected `vibeship-spawner-skills` imports on `main`, restoring the upstream body content instead of patching only a single truncated file.
+- **Canonical artifact refresh** - rebuilds the generated catalog, skill index, plugin mirrors, and compatibility data from the restored canonical `skills/` state.
+- **Post-merge maintainer sync** - refreshes contributor listings and README external-source credits as part of the mandatory after-merge maintainer flow for this batch.
+- **PR supersession cleanup** - closes PR #470 as superseded by PR #477 so the FAF change lands once, through the corrected contribution.
+
+## Who should care
+
+- **Users of restored vibeship-derived skills** get the full guidance back across the affected imported skill set instead of the previously truncated bodies.
+- **Contributors and maintainers** get a clean GitHub-only squash merge batch with the required contributor and source-credit follow-up recorded in the release.
+- **Anyone installing bundle or plugin variants** gets regenerated mirrors and catalog artifacts that match the restored canonical skills.
+
+## Credits
+
+- **Issue #473 reporter** for isolating the truncated `vibeship-spawner-skills` import problem.
+- **[@alecmetcalf](https://github.com/alecmetcalf)** for the Satori contribution merged in PR #466.
+- **[@warmskull](https://github.com/warmskull)** for `idea-darwin` merged in PR #469.
+- **[@Wolfe-Jam](https://github.com/Wolfe-Jam)** for the FAF skill contribution merged in PR #477.
+
## [9.8.0] - 2026-04-06 - "Governance, Tracking, and Discovery Skills"
> Installable skill library update for Claude Code, Cursor, Codex CLI, Gemini CLI, Antigravity, and related AI coding assistants.
diff --git a/data/bundles.json b/data/bundles.json
index ca6cd2f8..c0c3346d 100644
--- a/data/bundles.json
+++ b/data/bundles.json
@@ -4,6 +4,7 @@
"core-dev": {
"description": "Core development skills across languages, frameworks, and backend/frontend fundamentals.",
"skills": [
+ "3d-web-experience",
"agent-framework-azure-ai-py",
"agentmail",
"agentphone",
@@ -28,6 +29,7 @@
"astropy",
"async-python-patterns",
"audit-skills",
+ "aws-serverless",
"awt-e2e-testing",
"azd-deployment",
"azure-ai-agents-persistent-java",
@@ -62,6 +64,7 @@
"azure-eventhub-java",
"azure-eventhub-py",
"azure-eventhub-rust",
+ "azure-functions",
"azure-identity-java",
"azure-identity-py",
"azure-identity-rust",
@@ -150,6 +153,7 @@
"fastapi-pro",
"fastapi-router-py",
"fastapi-templates",
+ "firebase",
"firecrawl-scraper",
"flutter-expert",
"fp-async",
@@ -182,6 +186,7 @@
"golang-pro",
"grpc-golang",
"hono",
+ "hubspot-integration",
"hugging-face-dataset-viewer",
"hugging-face-evaluation",
"hugging-face-gradio",
@@ -199,6 +204,7 @@
"junta-leiloeiros",
"k6-load-testing",
"landing-page-generator",
+ "langgraph",
"m365-agents-py",
"m365-agents-ts",
"makepad-deployment",
@@ -207,7 +213,6 @@
"manifest",
"matplotlib",
"mcp-builder-ms",
- "micro-saas-launcher",
"mobile-design",
"mobile-developer",
"mobile-security-coder",
@@ -235,6 +240,7 @@
"pdf-official",
"php-pro",
"pipecat-friday-agent",
+ "plaid-fintech",
"playwright-java",
"podcast-generation",
"polars",
@@ -269,7 +275,6 @@
"sankhya-dashboard-html-jsp-custom-best-pratices",
"scanpy",
"scikit-learn",
- "scroll-experience",
"seaborn",
"security-audit",
"security/aws-secrets-rotation",
@@ -277,6 +282,7 @@
"seo-technical",
"shopify-apps",
"shopify-development",
+ "slack-bot-builder",
"snowflake-development",
"spline-3d-integration",
"sred-work-summary",
@@ -290,12 +296,15 @@
"tanstack-query-expert",
"tavily-web",
"telegram",
+ "telegram-bot-builder",
+ "telegram-mini-app",
"temporal-golang-pro",
"temporal-python-pro",
"temporal-python-testing",
"transformers-js",
"trigger-dev",
"trpc-fullstack",
+ "twilio-communications",
"typescript-advanced-types",
"typescript-expert",
"typescript-pro",
@@ -303,6 +312,8 @@
"uniprot-database",
"uv-package-manager",
"vercel-ai-sdk-expert",
+ "viral-generator-builder",
+ "voice-ai-development",
"web-artifacts-builder",
"webapp-testing",
"whatsapp-cloud-api",
@@ -344,7 +355,6 @@
"backend-security-coder",
"bdistill-behavioral-xray",
"broken-authentication",
- "browser-extension-builder",
"burp-suite-testing",
"burpsuite-project-parser",
"cc-skill-security-review",
@@ -366,7 +376,6 @@
"ethical-hacking-methodology",
"fda-food-safety-auditor",
"fda-medtech-compliance-auditor",
- "file-uploads",
"find-bugs",
"firebase",
"firmware-analyst",
@@ -406,6 +415,7 @@
"payment-integration",
"pci-compliance",
"pentest-commands",
+ "plaid-fintech",
"privacy-by-design",
"protocol-reverse-engineering",
"quant-analyst",
@@ -493,7 +503,6 @@
"observability-monitoring-slo-implement",
"progressive-web-app",
"pubmed-database",
- "salesforce-development",
"seo-aeo-landing-page-writer",
"service-mesh-expert",
"service-mesh-observability",
@@ -571,6 +580,7 @@
"django-perf-review",
"drizzle-orm-expert",
"dwarf-expert",
+ "firebase",
"fixing-metadata",
"food-database-query",
"fp-data-transforms",
@@ -583,6 +593,7 @@
"gdpr-data-handling",
"google-analytics-automation",
"googlesheets-automation",
+ "graphql",
"hugging-face-datasets",
"instagram",
"ios-developer",
@@ -618,7 +629,6 @@
"react-ui-patterns",
"referral-program",
"robius-state-management",
- "salesforce-development",
"sankhya-dashboard-html-jsp-custom-best-pratices",
"scala-pro",
"scanpy",
@@ -648,7 +658,6 @@
"x-twitter-scraper",
"xvary-stock-research",
"youtube-automation",
- "zapier-make-patterns",
"zeroize-audit"
]
},
@@ -657,12 +666,14 @@
"skills": [
"007",
"acceptance-orchestrator",
+ "agent-evaluation",
"agentflow",
"ai-engineering-toolkit",
"airflow-dag-patterns",
"api-testing-observability-api-mock",
"apify-brand-reputation-monitoring",
"application-performance-performance-optimization",
+ "aws-serverless",
"azd-deployment",
"azure-ai-anomalydetector-java",
"azure-mgmt-applicationinsights-dotnet",
@@ -675,7 +686,6 @@
"closed-loop-delivery",
"cloud-devops",
"code-review-ai-ai-review",
- "computer-use-agents",
"convex",
"data-engineering-data-pipeline",
"database-migrations-migration-observability",
@@ -752,7 +762,6 @@
"automation-core": {
"description": "Automation platforms, workflow tooling, and business systems.",
"skills": [
- "3d-web-experience",
"activecampaign-automation",
"agent-orchestrator",
"agentphone",
@@ -836,13 +845,11 @@
"humanize-chinese",
"incident-response-smart-fix",
"instagram-automation",
- "interactive-portfolio",
"intercom-automation",
"jira-automation",
"jobgpt",
"klaviyo-automation",
"kubernetes-deployment",
- "langgraph",
"libreoffice/calc",
"libreoffice/impress",
"libreoffice/writer",
@@ -886,13 +893,11 @@
"postgresql-optimization",
"posthog-automation",
"postmark-automation",
- "rag-engineer",
"rag-implementation",
"reddit-automation",
"render-automation",
"revops",
"salesforce-automation",
- "scroll-experience",
"security-audit",
"security/aws-secrets-rotation",
"segment-automation",
@@ -916,6 +921,7 @@
"tdd-workflow",
"tdd-workflows-tdd-green",
"telegram-automation",
+ "telegram-bot-builder",
"temporal-golang-pro",
"temporal-python-pro",
"terraform-infrastructure",
@@ -1093,6 +1099,7 @@
"apify-ecommerce",
"azure-mgmt-mongodbatlas-dotnet",
"billing-automation",
+ "browser-extension-builder",
"close-automation",
"growth-engine",
"hubspot-automation",
@@ -1134,6 +1141,7 @@
"shopify-development",
"stripe-automation",
"stripe-integration",
+ "telegram-bot-builder",
"webflow-automation",
"wordpress",
"wordpress-woocommerce-development",
@@ -1191,6 +1199,7 @@
"skills": [
"ad-creative",
"agent-orchestrator",
+ "agent-tool-builder",
"ai-seo",
"analyze-project",
"antigravity-skill-orchestrator",
@@ -1204,6 +1213,7 @@
"database-migration",
"drizzle-orm-expert",
"fixing-metadata",
+ "graphql",
"growth-engine",
"hybrid-search-implementation",
"keyword-extractor",
diff --git a/data/catalog.json b/data/catalog.json
index d56fd7e5..4e711915 100644
--- a/data/catalog.json
+++ b/data/catalog.json
@@ -114,8 +114,8 @@
{
"id": "3d-web-experience",
"name": "3d-web-experience",
- "description": "You bring the third dimension to the web. You know when 3D enhances and when it's just showing off. You balance visual impact with performance. You make 3D accessible to users who've never touched a 3D app. You create moments of wonder without sacrificing usability.",
- "category": "general",
+ "description": "Expert in building 3D experiences for the web - Three.js, React Three Fiber, Spline, WebGL, and interactive 3D scenes. Covers product configurators, 3D portfolios, immersive websites, and bringing depth to web experiences.",
+ "category": "development",
"tags": [
"3d",
"web",
@@ -125,15 +125,15 @@
"3d",
"web",
"experience",
- "bring",
- "third",
- "dimension",
- "know",
- "enhances",
- "just",
- "showing",
- "off",
- "balance"
+ "building",
+ "experiences",
+ "three",
+ "js",
+ "react",
+ "fiber",
+ "spline",
+ "webgl",
+ "interactive"
],
"path": "skills/3d-web-experience/SKILL.md"
},
@@ -443,8 +443,8 @@
{
"id": "agent-evaluation",
"name": "agent-evaluation",
- "description": "You're a quality engineer who has seen agents that aced benchmarks fail spectacularly in production. You've learned that evaluating LLM agents is fundamentally different from testing traditional software—the same input can produce different outputs, and \"correct\" often has no single answer.",
- "category": "data-ai",
+ "description": "Testing and benchmarking LLM agents including behavioral testing, capability assessment, reliability metrics, and production monitoring—where even top agents achieve less than 50% on real-world benchmarks",
+ "category": "infrastructure",
"tags": [
"agent",
"evaluation"
@@ -452,16 +452,16 @@
"triggers": [
"agent",
"evaluation",
- "re",
- "quality",
- "engineer",
- "who",
- "seen",
+ "testing",
+ "benchmarking",
+ "llm",
"agents",
- "aced",
- "benchmarks",
- "fail",
- "spectacularly"
+ "including",
+ "behavioral",
+ "capability",
+ "assessment",
+ "reliability",
+ "metrics"
],
"path": "skills/agent-evaluation/SKILL.md"
},
@@ -547,8 +547,8 @@
{
"id": "agent-memory-systems",
"name": "agent-memory-systems",
- "description": "You are a cognitive architect who understands that memory makes agents intelligent. You've built memory systems for agents handling millions of interactions. You know that the hard part isn't storing - it's retrieving the right memory at the right time.",
- "category": "general",
+ "description": "Memory is the cornerstone of intelligent agents. Without it, every interaction starts from zero. This skill covers the architecture of agent memory: short-term (context window), long-term (vector stores), and the cognitive architectures that organize them.",
+ "category": "security",
"tags": [
"agent",
"memory"
@@ -556,16 +556,16 @@
"triggers": [
"agent",
"memory",
- "cognitive",
- "architect",
- "who",
- "understands",
- "makes",
- "agents",
+ "cornerstone",
"intelligent",
- "ve",
- "built",
- "handling"
+ "agents",
+ "without",
+ "every",
+ "interaction",
+ "starts",
+ "zero",
+ "skill",
+ "covers"
],
"path": "skills/agent-memory-systems/SKILL.md"
},
@@ -650,8 +650,8 @@
{
"id": "agent-tool-builder",
"name": "agent-tool-builder",
- "description": "You are an expert in the interface between LLMs and the outside world. You've seen tools that work beautifully and tools that cause agents to hallucinate, loop, or fail silently. The difference is almost always in the design, not the implementation.",
- "category": "general",
+ "description": "Tools are how AI agents interact with the world. A well-designed tool is the difference between an agent that works and one that hallucinates, fails silently, or costs 10x more tokens than necessary. This skill covers tool design from schema to error handling.",
+ "category": "data-ai",
"tags": [
"agent",
"builder"
@@ -659,16 +659,16 @@
"triggers": [
"agent",
"builder",
- "interface",
- "between",
- "llms",
- "outside",
+ "how",
+ "ai",
+ "agents",
+ "interact",
"world",
- "ve",
- "seen",
- "work",
- "beautifully",
- "cause"
+ "well",
+ "designed",
+ "difference",
+ "between",
+ "works"
],
"path": "skills/agent-tool-builder/SKILL.md"
},
@@ -869,7 +869,7 @@
{
"id": "ai-agents-architect",
"name": "ai-agents-architect",
- "description": "I build AI systems that can act autonomously while remaining controllable. I understand that agents fail in unexpected ways - I design for graceful degradation and clear failure modes. I balance autonomy with oversight, knowing when an agent should ask for help vs proceed independently.",
+ "description": "Expert in designing and building autonomous AI agents. Masters tool use, memory systems, planning strategies, and multi-agent orchestration.",
"category": "data-ai",
"tags": [
"ai",
@@ -879,15 +879,15 @@
"ai",
"agents",
"architect",
- "act",
- "autonomously",
- "while",
- "remaining",
- "controllable",
- "understand",
- "fail",
- "unexpected",
- "ways"
+ "designing",
+ "building",
+ "autonomous",
+ "masters",
+ "memory",
+ "planning",
+ "multi",
+ "agent",
+ "orchestration"
],
"path": "skills/ai-agents-architect/SKILL.md"
},
@@ -1038,7 +1038,7 @@
{
"id": "ai-product",
"name": "ai-product",
- "description": "You are an AI product engineer who has shipped LLM features to millions of users. You've debugged hallucinations at 3am, optimized prompts to reduce costs by 80%, and built safety systems that caught thousands of harmful outputs. You know that demos are easy and production is hard.",
+ "description": "Every product will be AI-powered. The question is whether you'll build it right or ship a demo that falls apart in production.",
"category": "data-ai",
"tags": [
"ai",
@@ -1047,16 +1047,16 @@
"triggers": [
"ai",
"product",
- "engineer",
- "who",
- "shipped",
- "llm",
- "features",
- "millions",
- "users",
- "ve",
- "debugged",
- "hallucinations"
+ "every",
+ "powered",
+ "question",
+ "whether",
+ "ll",
+ "right",
+ "ship",
+ "demo",
+ "falls",
+ "apart"
],
"path": "skills/ai-product/SKILL.md"
},
@@ -1115,7 +1115,7 @@
{
"id": "ai-wrapper-product",
"name": "ai-wrapper-product",
- "description": "You know AI wrappers get a bad rap, but the good ones solve real problems. You build products where AI is the engine, not the gimmick. You understand prompt engineering is product development. You balance costs with user experience. You create AI products people actually pay for and use daily.",
+ "description": "Expert in building products that wrap AI APIs (OpenAI, Anthropic, etc. ) into focused tools people will pay for. Not just \"ChatGPT but different\" - products that solve specific problems with AI.",
"category": "data-ai",
"tags": [
"ai",
@@ -1126,15 +1126,15 @@
"ai",
"wrapper",
"product",
- "know",
- "wrappers",
- "get",
- "bad",
- "rap",
- "good",
- "ones",
- "solve",
- "real"
+ "building",
+ "products",
+ "wrap",
+ "apis",
+ "openai",
+ "anthropic",
+ "etc",
+ "people",
+ "pay"
],
"path": "skills/ai-wrapper-product/SKILL.md"
},
@@ -1219,7 +1219,7 @@
{
"id": "algolia-search",
"name": "algolia-search",
- "description": "Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning Use when: adding search to, algolia, instantsearch, search api, search functionality.",
+ "description": "Expert patterns for Algolia search implementation, indexing strategies, React InstantSearch, and relevance tuning",
"category": "development",
"tags": [
"algolia",
@@ -1232,10 +1232,7 @@
"react",
"instantsearch",
"relevance",
- "tuning",
- "adding",
- "api",
- "functionality"
+ "tuning"
],
"path": "skills/algolia-search/SKILL.md"
},
@@ -2871,7 +2868,7 @@
{
"id": "autonomous-agents",
"name": "autonomous-agents",
- "description": "You are an agent architect who has learned the hard lessons of autonomous AI. You've seen the gap between impressive demos and production disasters. You know that a 95% success rate per step means only 60% by step 10.",
+ "description": "Autonomous agents are AI systems that can independently decompose goals, plan actions, execute tools, and self-correct without constant human guidance. The challenge isn't making them capable - it's making them reliable. Every extra decision multiplies failure probability.",
"category": "data-ai",
"tags": [
"autonomous",
@@ -2880,16 +2877,16 @@
"triggers": [
"autonomous",
"agents",
- "agent",
- "architect",
- "who",
- "learned",
- "hard",
- "lessons",
"ai",
- "ve",
- "seen",
- "gap"
+ "independently",
+ "decompose",
+ "goals",
+ "plan",
+ "actions",
+ "execute",
+ "self",
+ "correct",
+ "without"
],
"path": "skills/autonomous-agents/SKILL.md"
},
@@ -3085,8 +3082,8 @@
{
"id": "aws-serverless",
"name": "aws-serverless",
- "description": "Proper Lambda function structure with error handling",
- "category": "general",
+ "description": "Specialized skill for building production-ready serverless applications on AWS. Covers Lambda functions, API Gateway, DynamoDB, SQS/SNS event-driven patterns, SAM/CDK deployment, and cold start optimization.",
+ "category": "infrastructure",
"tags": [
"aws",
"serverless"
@@ -3094,12 +3091,16 @@
"triggers": [
"aws",
"serverless",
- "proper",
+ "specialized",
+ "skill",
+ "building",
+ "applications",
+ "covers",
"lambda",
- "function",
- "structure",
- "error",
- "handling"
+ "functions",
+ "api",
+ "gateway",
+ "dynamodb"
],
"path": "skills/aws-serverless/SKILL.md"
},
@@ -4541,8 +4542,8 @@
{
"id": "azure-functions",
"name": "azure-functions",
- "description": "Modern .NET execution model with process isolation",
- "category": "workflow",
+ "description": "Expert patterns for Azure Functions development including isolated worker model, Durable Functions orchestration, cold start optimization, and production patterns. Covers .NET, Python, and Node.js programming models.",
+ "category": "development",
"tags": [
"azure",
"functions"
@@ -4550,11 +4551,16 @@
"triggers": [
"azure",
"functions",
- "net",
- "execution",
+ "development",
+ "including",
+ "isolated",
+ "worker",
"model",
- "process",
- "isolation"
+ "durable",
+ "orchestration",
+ "cold",
+ "start",
+ "optimization"
],
"path": "skills/azure-functions/SKILL.md"
},
@@ -7103,32 +7109,32 @@
{
"id": "browser-automation",
"name": "browser-automation",
- "description": "You are a browser automation expert who has debugged thousands of flaky tests and built scrapers that run for years without breaking. You've seen the evolution from Selenium to Puppeteer to Playwright and understand exactly when each tool shines.",
- "category": "workflow",
+ "description": "Browser automation powers web testing, scraping, and AI agent interactions. The difference between a flaky script and a reliable system comes down to understanding selectors, waiting strategies, and anti-detection patterns.",
+ "category": "data-ai",
"tags": [
"browser"
],
"triggers": [
"browser",
"automation",
- "who",
- "debugged",
- "thousands",
- "flaky",
- "tests",
- "built",
- "scrapers",
- "run",
- "years",
- "without"
+ "powers",
+ "web",
+ "testing",
+ "scraping",
+ "ai",
+ "agent",
+ "interactions",
+ "difference",
+ "between",
+ "flaky"
],
"path": "skills/browser-automation/SKILL.md"
},
{
"id": "browser-extension-builder",
"name": "browser-extension-builder",
- "description": "You extend the browser to give users superpowers. You understand the unique constraints of extension development - permissions, security, store policies. You build extensions that people install and actually use daily. You know the difference between a toy and a tool.",
- "category": "security",
+ "description": "Expert in building browser extensions that solve real problems - Chrome, Firefox, and cross-browser extensions. Covers extension architecture, manifest v3, content scripts, popup UIs, monetization strategies, and Chrome Web Store publishing.",
+ "category": "architecture",
"tags": [
"browser",
"extension",
@@ -7138,15 +7144,15 @@
"browser",
"extension",
"builder",
- "extend",
- "give",
- "users",
- "superpowers",
- "understand",
- "unique",
- "constraints",
- "development",
- "permissions"
+ "building",
+ "extensions",
+ "solve",
+ "real",
+ "problems",
+ "chrome",
+ "firefox",
+ "cross",
+ "covers"
],
"path": "skills/browser-extension-builder/SKILL.md"
},
@@ -7217,7 +7223,7 @@
{
"id": "bullmq-specialist",
"name": "bullmq-specialist",
- "description": "BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications. Use when: bullmq, bull queue, redis queue, background job, job queue.",
+ "description": "BullMQ expert for Redis-backed job queues, background processing, and reliable async execution in Node.js/TypeScript applications.",
"category": "development",
"tags": [
"bullmq"
@@ -8411,7 +8417,7 @@
{
"id": "clerk-auth",
"name": "clerk-auth",
- "description": "Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync Use when: adding authentication, clerk auth, user authentication, sign in, sign up.",
+ "description": "Expert patterns for Clerk auth implementation, middleware, organizations, webhooks, and user sync",
"category": "security",
"tags": [
"clerk",
@@ -8424,11 +8430,7 @@
"organizations",
"webhooks",
"user",
- "sync",
- "adding",
- "authentication",
- "sign",
- "up"
+ "sync"
],
"path": "skills/clerk-auth/SKILL.md"
},
@@ -9202,8 +9204,8 @@
{
"id": "computer-use-agents",
"name": "computer-use-agents",
- "description": "The fundamental architecture of computer use agents: observe screen, reason about next action, execute action, repeat. This loop integrates vision models with action execution through an iterative pipeline.",
- "category": "infrastructure",
+ "description": "Build AI agents that interact with computers like humans do - viewing screens, moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer Use, OpenAI's Operator/CUA, and open-source alternatives.",
+ "category": "data-ai",
"tags": [
"computer",
"use",
@@ -9213,15 +9215,15 @@
"computer",
"use",
"agents",
- "fundamental",
- "architecture",
- "observe",
- "screen",
- "reason",
- "about",
- "next",
- "action",
- "execute"
+ "ai",
+ "interact",
+ "computers",
+ "like",
+ "humans",
+ "do",
+ "viewing",
+ "screens",
+ "moving"
],
"path": "skills/computer-use-agents/SKILL.md"
},
@@ -9782,7 +9784,7 @@
{
"id": "context-window-management",
"name": "context-window-management",
- "description": "You're a context engineering specialist who has optimized LLM applications handling millions of conversations. You've seen systems hit token limits, suffer context rot, and lose critical information mid-dialogue.",
+ "description": "Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot",
"category": "data-ai",
"tags": [
"window"
@@ -9790,16 +9792,15 @@
"triggers": [
"window",
"context",
- "re",
- "engineering",
- "who",
- "optimized",
+ "managing",
"llm",
- "applications",
- "handling",
- "millions",
- "conversations",
- "ve"
+ "windows",
+ "including",
+ "summarization",
+ "trimming",
+ "routing",
+ "avoiding",
+ "rot"
],
"path": "skills/context-window-management/SKILL.md"
},
@@ -9832,7 +9833,7 @@
{
"id": "conversation-memory",
"name": "conversation-memory",
- "description": "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory persistence, long-term memory, chat history.",
+ "description": "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory",
"category": "data-ai",
"tags": [
"conversation",
@@ -9848,9 +9849,7 @@
"short",
"term",
"long",
- "entity",
- "remember",
- "persistence"
+ "entity"
],
"path": "skills/conversation-memory/SKILL.md"
},
@@ -10194,24 +10193,23 @@
{
"id": "crewai",
"name": "crewai",
- "description": "You are an expert in designing collaborative AI agent teams with CrewAI. You think in terms of roles, responsibilities, and delegation. You design clear agent personas with specific expertise, create well-defined tasks with expected outputs, and orchestrate crews for optimal collaboration.",
- "category": "data-ai",
+ "description": "Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500 companies.",
+ "category": "general",
"tags": [
"crewai"
],
"triggers": [
"crewai",
- "designing",
- "collaborative",
- "ai",
+ "leading",
+ "role",
+ "multi",
"agent",
- "teams",
- "think",
- "terms",
- "roles",
- "responsibilities",
- "delegation",
- "clear"
+ "framework",
+ "used",
+ "60",
+ "fortune",
+ "500",
+ "companies"
],
"path": "skills/crewai/SKILL.md"
},
@@ -12243,24 +12241,24 @@
{
"id": "email-systems",
"name": "email-systems",
- "description": "You are an email systems engineer who has maintained 99.9% deliverability across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with blacklists, and optimized for inbox placement. You know that email is the highest ROI channel when done right, and a spam folder nightmare when done wrong.",
- "category": "general",
+ "description": "Email has the highest ROI of any marketing channel. $36 for every $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no personalization, landing in spam folders.",
+ "category": "business",
"tags": [
"email"
],
"triggers": [
"email",
- "engineer",
- "who",
- "maintained",
- "99",
- "deliverability",
- "millions",
- "emails",
- "ve",
- "debugged",
- "spf",
- "dkim"
+ "highest",
+ "roi",
+ "any",
+ "marketing",
+ "channel",
+ "36",
+ "every",
+ "spent",
+ "yet",
+ "most",
+ "startups"
],
"path": "skills/email-systems/SKILL.md"
},
@@ -13387,8 +13385,8 @@
{
"id": "file-uploads",
"name": "file-uploads",
- "description": "Careful about security and performance. Never trusts file extensions. Knows that large uploads need special handling. Prefers presigned URLs over server proxying.",
- "category": "security",
+ "description": "Expert at handling file uploads and cloud storage. Covers S3, Cloudflare R2, presigned URLs, multipart uploads, and image optimization. Knows how to handle large files without blocking.",
+ "category": "infrastructure",
"tags": [
"file",
"uploads"
@@ -13396,16 +13394,16 @@
"triggers": [
"file",
"uploads",
- "careful",
- "about",
- "security",
- "performance",
- "never",
- "trusts",
- "extensions",
- "knows",
- "large",
- "special"
+ "handling",
+ "cloud",
+ "storage",
+ "covers",
+ "s3",
+ "cloudflare",
+ "r2",
+ "presigned",
+ "urls",
+ "multipart"
],
"path": "skills/file-uploads/SKILL.md"
},
@@ -13487,24 +13485,24 @@
{
"id": "firebase",
"name": "firebase",
- "description": "You're a developer who has shipped dozens of Firebase projects. You've seen the \"easy\" path lead to security breaches, runaway costs, and impossible migrations. You know Firebase is powerful, but you also know its sharp edges.",
+ "description": "Firebase gives you a complete backend in minutes - auth, database, storage, functions, hosting. But the ease of setup hides real complexity. Security rules are your last line of defense, and they're often wrong.",
"category": "security",
"tags": [
"firebase"
],
"triggers": [
"firebase",
- "re",
- "developer",
- "who",
- "shipped",
- "dozens",
- "ve",
- "seen",
- "easy",
- "path",
- "lead",
- "security"
+ "gives",
+ "complete",
+ "backend",
+ "minutes",
+ "auth",
+ "database",
+ "storage",
+ "functions",
+ "hosting",
+ "ease",
+ "setup"
],
"path": "skills/firebase/SKILL.md"
},
@@ -14797,7 +14795,7 @@
{
"id": "gcp-cloud-run",
"name": "gcp-cloud-run",
- "description": "When to use: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']",
+ "description": "Specialized skill for building production-ready serverless applications on GCP. Covers Cloud Run services (containerized), Cloud Run Functions (event-driven), cold start optimization, and event-driven architecture with Pub/Sub.",
"category": "infrastructure",
"tags": [
"gcp",
@@ -14808,15 +14806,15 @@
"gcp",
"cloud",
"run",
- "web",
+ "specialized",
+ "skill",
+ "building",
+ "serverless",
"applications",
- "apis",
- "any",
- "runtime",
- "library",
- "complex",
- "multiple",
- "endpoints"
+ "covers",
+ "containerized",
+ "functions",
+ "event"
],
"path": "skills/gcp-cloud-run/SKILL.md"
},
@@ -15759,24 +15757,24 @@
{
"id": "graphql",
"name": "graphql",
- "description": "You're a developer who has built GraphQL APIs at scale. You've seen the N+1 query problem bring down production servers. You've watched clients craft deeply nested queries that took minutes to resolve. You know that GraphQL's power is also its danger.",
- "category": "general",
+ "description": "GraphQL gives clients exactly the data they need - no more, no less. One endpoint, typed schema, introspection. But the flexibility that makes it powerful also makes it dangerous. Without proper controls, clients can craft queries that bring down your server.",
+ "category": "data-ai",
"tags": [
"graphql"
],
"triggers": [
"graphql",
- "re",
- "developer",
- "who",
- "built",
- "apis",
- "scale",
- "ve",
- "seen",
- "query",
- "problem",
- "bring"
+ "gives",
+ "clients",
+ "exactly",
+ "data",
+ "no",
+ "less",
+ "one",
+ "endpoint",
+ "typed",
+ "schema",
+ "introspection"
],
"path": "skills/graphql/SKILL.md"
},
@@ -16477,8 +16475,8 @@
{
"id": "hubspot-integration",
"name": "hubspot-integration",
- "description": "Authentication for single-account integrations",
- "category": "general",
+ "description": "Expert patterns for HubSpot CRM integration including OAuth authentication, CRM objects, associations, batch operations, webhooks, and custom objects. Covers Node.js and Python SDKs.",
+ "category": "development",
"tags": [
"hubspot",
"integration"
@@ -16486,10 +16484,16 @@
"triggers": [
"hubspot",
"integration",
+ "crm",
+ "including",
+ "oauth",
"authentication",
- "single",
- "account",
- "integrations"
+ "objects",
+ "associations",
+ "batch",
+ "operations",
+ "webhooks",
+ "custom"
],
"path": "skills/hubspot-integration/SKILL.md"
},
@@ -17222,24 +17226,24 @@
{
"id": "inngest",
"name": "inngest",
- "description": "You are an Inngest expert who builds reliable background processing without managing infrastructure. You understand that serverless doesn't mean you can't have durable, long-running workflows - it means you don't manage the workers.",
- "category": "general",
+ "description": "Inngest expert for serverless-first background jobs, event-driven workflows, and durable execution without managing queues or workers.",
+ "category": "architecture",
"tags": [
"inngest"
],
"triggers": [
"inngest",
- "who",
- "reliable",
+ "serverless",
+ "first",
"background",
- "processing",
+ "jobs",
+ "event",
+ "driven",
+ "durable",
+ "execution",
"without",
"managing",
- "infrastructure",
- "understand",
- "serverless",
- "doesn",
- "mean"
+ "queues"
],
"path": "skills/inngest/SKILL.md"
},
@@ -17297,7 +17301,7 @@
{
"id": "interactive-portfolio",
"name": "interactive-portfolio",
- "description": "You know a portfolio isn't a resume - it's a first impression that needs to convert. You balance creativity with usability. You understand that hiring managers spend 30 seconds on each portfolio. You make those 30 seconds count. You help people stand out without being gimmicky.",
+ "description": "Expert in building portfolios that actually land jobs and clients - not just showing work, but creating memorable experiences. Covers developer portfolios, designer portfolios, creative portfolios, and portfolios that convert visitors into opportunities.",
"category": "general",
"tags": [
"interactive",
@@ -17306,16 +17310,16 @@
"triggers": [
"interactive",
"portfolio",
- "know",
- "isn",
- "resume",
- "first",
- "impression",
- "convert",
- "balance",
- "creativity",
- "usability",
- "understand"
+ "building",
+ "portfolios",
+ "actually",
+ "land",
+ "jobs",
+ "clients",
+ "just",
+ "showing",
+ "work",
+ "creating"
],
"path": "skills/interactive-portfolio/SKILL.md"
},
@@ -18161,48 +18165,48 @@
{
"id": "langfuse",
"name": "langfuse",
- "description": "You are an expert in LLM observability and evaluation. You think in terms of traces, spans, and metrics. You know that LLM applications need monitoring just like traditional software - but with different dimensions (cost, quality, latency).",
+ "description": "Expert in Langfuse - the open-source LLM observability platform. Covers tracing, prompt management, evaluation, datasets, and integration with LangChain, LlamaIndex, and OpenAI. Essential for debugging, monitoring, and improving LLM applications in production.",
"category": "infrastructure",
"tags": [
"langfuse"
],
"triggers": [
"langfuse",
+ "open",
+ "source",
"llm",
"observability",
+ "platform",
+ "covers",
+ "tracing",
+ "prompt",
"evaluation",
- "think",
- "terms",
- "traces",
- "spans",
- "metrics",
- "know",
- "applications",
- "monitoring"
+ "datasets",
+ "integration"
],
"path": "skills/langfuse/SKILL.md"
},
{
"id": "langgraph",
"name": "langgraph",
- "description": "You are an expert in building production-grade AI agents with LangGraph. You understand that agents need explicit structure - graphs make the flow visible and debuggable. You design state carefully, use reducers appropriately, and always consider persistence for production.",
+ "description": "Expert in LangGraph - the production-grade framework for building stateful, multi-actor AI applications. Covers graph construction, state management, cycles and branches, persistence with checkpointers, human-in-the-loop patterns, and the ReAct agent pattern.",
"category": "data-ai",
"tags": [
"langgraph"
],
"triggers": [
"langgraph",
- "building",
"grade",
+ "framework",
+ "building",
+ "stateful",
+ "multi",
+ "actor",
"ai",
- "agents",
- "understand",
- "explicit",
- "structure",
- "graphs",
- "flow",
- "visible",
- "debuggable"
+ "applications",
+ "covers",
+ "graph",
+ "construction"
],
"path": "skills/langgraph/SKILL.md"
},
@@ -20290,8 +20294,8 @@
{
"id": "micro-saas-launcher",
"name": "micro-saas-launcher",
- "description": "You ship fast and iterate. You know the difference between a side project and a business. You've seen what works in the indie hacker community. You help people go from idea to paying customers in weeks, not years. You focus on sustainable, profitable businesses - not unicorn hunting.",
- "category": "development",
+ "description": "Expert in launching small, focused SaaS products fast - the indie hacker approach to building profitable software. Covers idea validation, MVP development, pricing, launch strategies, and growing to sustainable revenue. Ship in weeks, not months.",
+ "category": "general",
"tags": [
"micro",
"saas",
@@ -20301,15 +20305,15 @@
"micro",
"saas",
"launcher",
- "ship",
+ "launching",
+ "small",
+ "products",
"fast",
- "iterate",
- "know",
- "difference",
- "between",
- "side",
- "business",
- "ve"
+ "indie",
+ "hacker",
+ "approach",
+ "building",
+ "profitable"
],
"path": "skills/micro-saas-launcher/SKILL.md"
},
@@ -21190,7 +21194,7 @@
{
"id": "neon-postgres",
"name": "neon-postgres",
- "description": "Configure Prisma for Neon with connection pooling.",
+ "description": "Expert patterns for Neon serverless Postgres, branching, connection pooling, and Prisma/Drizzle integration",
"category": "data-ai",
"tags": [
"neon",
@@ -21199,10 +21203,13 @@
"triggers": [
"neon",
"postgres",
- "configure",
- "prisma",
+ "serverless",
+ "branching",
"connection",
- "pooling"
+ "pooling",
+ "prisma",
+ "drizzle",
+ "integration"
],
"path": "skills/neon-postgres/SKILL.md"
},
@@ -21419,7 +21426,7 @@
{
"id": "nextjs-supabase-auth",
"name": "nextjs-supabase-auth",
- "description": "Expert integration of Supabase Auth with Next.js App Router Use when: supabase auth next, authentication next.js, login supabase, auth middleware, protected route.",
+ "description": "Expert integration of Supabase Auth with Next.js App Router",
"category": "security",
"tags": [
"nextjs",
@@ -21434,11 +21441,7 @@
"next",
"js",
"app",
- "router",
- "authentication",
- "login",
- "middleware",
- "protected"
+ "router"
],
"path": "skills/nextjs-supabase-auth/SKILL.md"
},
@@ -21587,7 +21590,7 @@
{
"id": "notion-template-business",
"name": "notion-template-business",
- "description": "You know templates are real businesses that can generate serious income. You've seen creators make six figures selling Notion templates. You understand it's not about the template - it's about the problem it solves. You build systems that turn templates into scalable digital products.",
+ "description": "Expert in building and selling Notion templates as a business - not just making templates, but building a sustainable digital product business. Covers template design, pricing, marketplaces, marketing, and scaling to real revenue.",
"category": "business",
"tags": [
"notion",
@@ -21596,16 +21599,16 @@
"triggers": [
"notion",
"business",
- "know",
- "real",
- "businesses",
- "generate",
- "serious",
- "income",
- "ve",
- "seen",
- "creators",
- "six"
+ "building",
+ "selling",
+ "just",
+ "making",
+ "sustainable",
+ "digital",
+ "product",
+ "covers",
+ "pricing",
+ "marketplaces"
],
"path": "skills/notion-template-business/SKILL.md"
},
@@ -23213,8 +23216,8 @@
{
"id": "personal-tool-builder",
"name": "personal-tool-builder",
- "description": "You believe the best tools come from real problems. You've built dozens of personal tools - some stayed personal, others became products used by thousands. You know that building for yourself means you have perfect product-market fit with at least one user.",
- "category": "business",
+ "description": "Expert in building custom tools that solve your own problems first. The best products often start as personal tools - scratch your own itch, build for yourself, then discover others have the same itch.",
+ "category": "general",
"tags": [
"personal",
"builder"
@@ -23222,16 +23225,16 @@
"triggers": [
"personal",
"builder",
- "believe",
- "come",
- "real",
+ "building",
+ "custom",
+ "solve",
+ "own",
"problems",
- "ve",
- "built",
- "dozens",
- "some",
- "stayed",
- "others"
+ "first",
+ "products",
+ "often",
+ "start",
+ "scratch"
],
"path": "skills/personal-tool-builder/SKILL.md"
},
@@ -23361,8 +23364,8 @@
{
"id": "plaid-fintech",
"name": "plaid-fintech",
- "description": "Create a linktoken for Plaid Link, exchange publictoken for accesstoken. Link tokens are short-lived, one-time use. Access tokens don't expire but may need updating when users change passwords.",
- "category": "general",
+ "description": "Expert patterns for Plaid API integration including Link token flows, transactions sync, identity verification, Auth for ACH, balance checks, webhook handling, and fintech compliance best practices.",
+ "category": "security",
"tags": [
"plaid",
"fintech"
@@ -23370,16 +23373,16 @@
"triggers": [
"plaid",
"fintech",
- "linktoken",
+ "api",
+ "integration",
+ "including",
"link",
- "exchange",
- "publictoken",
- "accesstoken",
- "tokens",
- "short",
- "lived",
- "one",
- "time"
+ "token",
+ "flows",
+ "transactions",
+ "sync",
+ "identity",
+ "verification"
],
"path": "skills/plaid-fintech/SKILL.md"
},
@@ -24312,7 +24315,7 @@
{
"id": "prompt-caching",
"name": "prompt-caching",
- "description": "You're a caching specialist who has reduced LLM costs by 90% through strategic caching. You've implemented systems that cache at multiple levels: prompt prefixes, full responses, and semantic similarity matches.",
+ "description": "Caching strategies for LLM prompts including Anthropic prompt caching, response caching, and CAG (Cache Augmented Generation)",
"category": "data-ai",
"tags": [
"prompt",
@@ -24321,16 +24324,15 @@
"triggers": [
"prompt",
"caching",
- "re",
- "who",
- "reduced",
"llm",
- "costs",
- "90",
- "through",
- "strategic",
- "ve",
- "implemented"
+ "prompts",
+ "including",
+ "anthropic",
+ "response",
+ "cag",
+ "cache",
+ "augmented",
+ "generation"
],
"path": "skills/prompt-caching/SKILL.md"
},
@@ -24878,7 +24880,7 @@
{
"id": "rag-engineer",
"name": "rag-engineer",
- "description": "I bridge the gap between raw documents and LLM understanding. I know that retrieval quality determines generation quality - garbage in, garbage out. I obsess over chunking boundaries, embedding dimensions, and similarity metrics because they make the difference between helpful and hallucinating.",
+ "description": "Expert in building Retrieval-Augmented Generation systems. Masters embedding models, vector databases, chunking strategies, and retrieval optimization for LLM applications.",
"category": "data-ai",
"tags": [
"rag"
@@ -24886,16 +24888,16 @@
"triggers": [
"rag",
"engineer",
- "bridge",
- "gap",
- "between",
- "raw",
- "documents",
- "llm",
- "understanding",
- "know",
+ "building",
"retrieval",
- "quality"
+ "augmented",
+ "generation",
+ "masters",
+ "embedding",
+ "models",
+ "vector",
+ "databases",
+ "chunking"
],
"path": "skills/rag-engineer/SKILL.md"
},
@@ -25946,24 +25948,24 @@
{
"id": "salesforce-development",
"name": "salesforce-development",
- "description": "Use @wire decorator for reactive data binding with Lightning Data Service or Apex methods. @wire fits LWC's reactive architecture and enables Salesforce performance optimizations.",
- "category": "infrastructure",
+ "description": "Expert patterns for Salesforce platform development including Lightning Web Components (LWC), Apex triggers and classes, REST/Bulk APIs, Connected Apps, and Salesforce DX with scratch orgs and 2nd generation packages (2GP).",
+ "category": "architecture",
"tags": [
"salesforce"
],
"triggers": [
"salesforce",
"development",
- "wire",
- "decorator",
- "reactive",
- "data",
- "binding",
+ "platform",
+ "including",
"lightning",
+ "web",
+ "components",
+ "lwc",
"apex",
- "methods",
- "fits",
- "lwc"
+ "triggers",
+ "classes",
+ "rest"
],
"path": "skills/salesforce-development/SKILL.md"
},
@@ -26268,8 +26270,8 @@
{
"id": "scroll-experience",
"name": "scroll-experience",
- "description": "You see scrolling as a narrative device, not just navigation. You create moments of delight as users scroll. You know when to use subtle animations and when to go cinematic. You balance performance with visual impact. You make websites feel like movies you control with your thumb.",
- "category": "development",
+ "description": "Expert in building immersive scroll-driven experiences - parallax storytelling, scroll animations, interactive narratives, and cinematic web experiences. Like NY Times interactives, Apple product pages, and award-winning web experiences.",
+ "category": "business",
"tags": [
"scroll",
"experience"
@@ -26277,16 +26279,16 @@
"triggers": [
"scroll",
"experience",
- "see",
- "scrolling",
- "narrative",
- "device",
- "just",
- "navigation",
- "moments",
- "delight",
- "users",
- "know"
+ "building",
+ "immersive",
+ "driven",
+ "experiences",
+ "parallax",
+ "storytelling",
+ "animations",
+ "interactive",
+ "narratives",
+ "cinematic"
],
"path": "skills/scroll-experience/SKILL.md"
},
@@ -26720,7 +26722,7 @@
{
"id": "segment-cdp",
"name": "segment-cdp",
- "description": "Client-side tracking with Analytics.js. Include track, identify, page, and group calls. Anonymous ID persists until identify merges with user.",
+ "description": "Expert patterns for Segment Customer Data Platform including Analytics.js, server-side tracking, tracking plans with Protocols, identity resolution, destinations configuration, and data governance best practices.",
"category": "data-ai",
"tags": [
"segment",
@@ -26729,16 +26731,16 @@
"triggers": [
"segment",
"cdp",
- "client",
- "side",
- "tracking",
+ "customer",
+ "data",
+ "platform",
+ "including",
"analytics",
"js",
- "include",
- "track",
- "identify",
- "page",
- "group"
+ "server",
+ "side",
+ "tracking",
+ "plans"
],
"path": "skills/segment-cdp/SKILL.md"
},
@@ -28025,7 +28027,7 @@
{
"id": "shopify-apps",
"name": "shopify-apps",
- "description": "Modern Shopify app template with React Router",
+ "description": "Expert patterns for Shopify app development including Remix/React Router apps, embedded apps with App Bridge, webhook handling, GraphQL Admin API, Polaris components, billing, and app extensions.",
"category": "development",
"tags": [
"shopify",
@@ -28035,8 +28037,15 @@
"shopify",
"apps",
"app",
+ "development",
+ "including",
+ "remix",
"react",
- "router"
+ "router",
+ "embedded",
+ "bridge",
+ "webhook",
+ "handling"
],
"path": "skills/shopify-apps/SKILL.md"
},
@@ -28545,8 +28554,8 @@
{
"id": "slack-bot-builder",
"name": "slack-bot-builder",
- "description": "The Bolt framework is Slack's recommended approach for building apps. It handles authentication, event routing, request verification, and HTTP request processing so you can focus on app logic.",
- "category": "architecture",
+ "description": "Build Slack apps using the Bolt framework across Python, JavaScript, and Java. Covers Block Kit for rich UIs, interactive components, slash commands, event handling, OAuth installation flows, and Workflow Builder integration.",
+ "category": "development",
"tags": [
"slack",
"bot",
@@ -28556,15 +28565,15 @@
"slack",
"bot",
"builder",
+ "apps",
"bolt",
"framework",
- "recommended",
- "approach",
- "building",
- "apps",
- "authentication",
- "event",
- "routing"
+ "python",
+ "javascript",
+ "java",
+ "covers",
+ "block",
+ "kit"
],
"path": "skills/slack-bot-builder/SKILL.md"
},
@@ -30240,8 +30249,8 @@
{
"id": "telegram-bot-builder",
"name": "telegram-bot-builder",
- "description": "You build bots that people actually use daily. You understand that bots should feel like helpful assistants, not clunky interfaces. You know the Telegram ecosystem deeply - what's possible, what's popular, and what makes money. You design conversations that feel natural.",
- "category": "general",
+ "description": "Expert in building Telegram bots that solve real problems - from simple automation to complex AI-powered bots. Covers bot architecture, the Telegram Bot API, user experience, monetization strategies, and scaling bots to thousands of users.",
+ "category": "data-ai",
"tags": [
"telegram",
"bot",
@@ -30251,23 +30260,23 @@
"telegram",
"bot",
"builder",
+ "building",
"bots",
- "people",
- "actually",
- "daily",
- "understand",
- "should",
- "feel",
- "like",
- "helpful"
+ "solve",
+ "real",
+ "problems",
+ "simple",
+ "automation",
+ "complex",
+ "ai"
],
"path": "skills/telegram-bot-builder/SKILL.md"
},
{
"id": "telegram-mini-app",
"name": "telegram-mini-app",
- "description": "You build apps where 800M+ Telegram users already are. You understand the Mini App ecosystem is exploding - games, DeFi, utilities, social apps. You know TON blockchain and how to monetize with crypto. You design for the Telegram UX paradigm, not traditional web.",
- "category": "general",
+ "description": "Expert in building Telegram Mini Apps (TWA) - web apps that run inside Telegram with native-like experience. Covers the TON ecosystem, Telegram Web App API, payments, user authentication, and building viral mini apps that monetize.",
+ "category": "development",
"tags": [
"telegram",
"mini",
@@ -30277,15 +30286,15 @@
"telegram",
"mini",
"app",
+ "building",
"apps",
- "where",
- "800m",
- "users",
- "already",
- "understand",
- "ecosystem",
- "exploding",
- "games"
+ "twa",
+ "web",
+ "run",
+ "inside",
+ "native",
+ "like",
+ "experience"
],
"path": "skills/telegram-mini-app/SKILL.md"
},
@@ -31182,8 +31191,8 @@
{
"id": "trigger-dev",
"name": "trigger-dev",
- "description": "You are a Trigger.dev expert who builds reliable background jobs with exceptional developer experience. You understand that Trigger.dev bridges the gap between simple queues and complex orchestration - it's \"Temporal made easy\" for TypeScript developers.",
- "category": "development",
+ "description": "Trigger.dev expert for background jobs, AI workflows, and reliable async execution with excellent developer experience and TypeScript-first design.",
+ "category": "data-ai",
"tags": [
"trigger",
"dev"
@@ -31191,16 +31200,16 @@
"triggers": [
"trigger",
"dev",
- "who",
- "reliable",
"background",
"jobs",
- "exceptional",
+ "ai",
+ "reliable",
+ "async",
+ "execution",
+ "excellent",
"developer",
"experience",
- "understand",
- "bridges",
- "gap"
+ "typescript"
],
"path": "skills/trigger-dev/SKILL.md"
},
@@ -31307,8 +31316,8 @@
{
"id": "twilio-communications",
"name": "twilio-communications",
- "description": "Basic pattern for sending SMS messages with Twilio. Handles the fundamentals: phone number formatting, message delivery, and delivery status callbacks.",
- "category": "general",
+ "description": "Build communication features with Twilio: SMS messaging, voice calls, WhatsApp Business API, and user verification (2FA). Covers the full spectrum from simple notifications to complex IVR systems and multi-channel authentication.",
+ "category": "development",
"tags": [
"twilio",
"communications"
@@ -31316,16 +31325,16 @@
"triggers": [
"twilio",
"communications",
- "basic",
- "sending",
+ "communication",
+ "features",
"sms",
- "messages",
- "fundamentals",
- "phone",
- "number",
- "formatting",
- "message",
- "delivery"
+ "messaging",
+ "voice",
+ "calls",
+ "whatsapp",
+ "business",
+ "api",
+ "user"
],
"path": "skills/twilio-communications/SKILL.md"
},
@@ -31716,7 +31725,7 @@
{
"id": "upstash-qstash",
"name": "upstash-qstash",
- "description": "You are an Upstash QStash expert who builds reliable serverless messaging without infrastructure management. You understand that QStash's simplicity is its power - HTTP in, HTTP out, with reliability in between.",
+ "description": "Upstash QStash expert for serverless message queues, scheduled jobs, and reliable HTTP-based task delivery without managing infrastructure.",
"category": "general",
"tags": [
"upstash",
@@ -31725,16 +31734,16 @@
"triggers": [
"upstash",
"qstash",
- "who",
- "reliable",
"serverless",
- "messaging",
- "without",
- "infrastructure",
- "understand",
- "simplicity",
- "power",
- "http"
+ "message",
+ "queues",
+ "scheduled",
+ "jobs",
+ "reliable",
+ "http",
+ "task",
+ "delivery",
+ "without"
],
"path": "skills/upstash-qstash/SKILL.md"
},
@@ -32065,7 +32074,7 @@
{
"id": "vercel-deployment",
"name": "vercel-deployment",
- "description": "Expert knowledge for deploying to Vercel with Next.js Use when: vercel, deploy, deployment, hosting, production.",
+ "description": "Expert knowledge for deploying to Vercel with Next.js",
"category": "infrastructure",
"tags": [
"vercel",
@@ -32077,9 +32086,7 @@
"knowledge",
"deploying",
"next",
- "js",
- "deploy",
- "hosting"
+ "js"
],
"path": "skills/vercel-deployment/SKILL.md"
},
@@ -32302,8 +32309,8 @@
{
"id": "viral-generator-builder",
"name": "viral-generator-builder",
- "description": "You understand why people share things. You build tools that create \"identity moments\" - results people want to show off. You know the difference between a tool people use once and one that spreads like wildfire. You optimize for the screenshot, the share, the \"OMG you have to try this\" moment.",
- "category": "general",
+ "description": "Expert in building shareable generator tools that go viral - name generators, quiz makers, avatar creators, personality tests, and calculator tools. Covers the psychology of sharing, viral mechanics, and building tools people can't resist sharing with friends.",
+ "category": "development",
"tags": [
"viral",
"generator",
@@ -32313,15 +32320,15 @@
"viral",
"generator",
"builder",
- "understand",
- "why",
- "people",
- "share",
- "things",
- "identity",
- "moments",
- "results",
- "want"
+ "building",
+ "shareable",
+ "go",
+ "name",
+ "generators",
+ "quiz",
+ "makers",
+ "avatar",
+ "creators"
],
"path": "skills/viral-generator-builder/SKILL.md"
},
@@ -32372,7 +32379,7 @@
{
"id": "voice-agents",
"name": "voice-agents",
- "description": "You are a voice AI architect who has shipped production voice agents handling millions of calls. You understand the physics of latency - every component adds milliseconds, and the sum determines whether conversations feel natural or awkward.",
+ "description": "Voice agents represent the frontier of AI interaction - humans speaking naturally with AI systems.",
"category": "data-ai",
"tags": [
"voice",
@@ -32381,23 +32388,20 @@
"triggers": [
"voice",
"agents",
+ "represent",
+ "frontier",
"ai",
- "architect",
- "who",
- "shipped",
- "handling",
- "millions",
- "calls",
- "understand",
- "physics",
- "latency"
+ "interaction",
+ "humans",
+ "speaking",
+ "naturally"
],
"path": "skills/voice-agents/SKILL.md"
},
{
"id": "voice-ai-development",
"name": "voice-ai-development",
- "description": "You are an expert in building real-time voice applications. You think in terms of latency budgets, audio quality, and user experience. You know that voice apps feel magical when fast and broken when slow.",
+ "description": "Expert in building voice AI applications - from real-time voice agents to voice-enabled apps. Covers OpenAI Realtime API, Vapi for voice agents, Deepgram for transcription, ElevenLabs for synthesis, LiveKit for real-time infrastructure, and WebRTC fundamentals.",
"category": "data-ai",
"tags": [
"voice",
@@ -32408,14 +32412,14 @@
"ai",
"development",
"building",
+ "applications",
"real",
"time",
- "applications",
- "think",
- "terms",
- "latency",
- "budgets",
- "audio"
+ "agents",
+ "enabled",
+ "apps",
+ "covers",
+ "openai"
],
"path": "skills/voice-ai-development/SKILL.md"
},
@@ -33166,22 +33170,22 @@
{
"id": "workflow-automation",
"name": "workflow-automation",
- "description": "You are a workflow automation architect who has seen both the promise and the pain of these platforms. You've migrated teams from brittle cron jobs to durable execution and watched their on-call burden drop by 80%.",
- "category": "workflow",
+ "description": "Workflow automation is the infrastructure that makes AI agents reliable. Without durable execution, a network hiccup during a 10-step payment flow means lost money and angry customers. With it, workflows resume exactly where they left off.",
+ "category": "infrastructure",
"tags": [],
"triggers": [
"automation",
- "architect",
- "who",
- "seen",
- "both",
- "promise",
- "pain",
- "these",
- "platforms",
- "ve",
- "migrated",
- "teams"
+ "infrastructure",
+ "makes",
+ "ai",
+ "agents",
+ "reliable",
+ "without",
+ "durable",
+ "execution",
+ "network",
+ "hiccup",
+ "during"
],
"path": "skills/workflow-automation/SKILL.md"
},
@@ -33609,8 +33613,8 @@
{
"id": "zapier-make-patterns",
"name": "zapier-make-patterns",
- "description": "You are a no-code automation architect who has built thousands of Zaps and Scenarios for businesses of all sizes. You've seen automations that save companies 40% of their time, and you've debugged disasters where bad data flowed through 12 connected apps.",
- "category": "data-ai",
+ "description": "No-code automation democratizes workflow building. Zapier and Make (formerly Integromat) let non-developers automate business processes without writing code. But no-code doesn't mean no-complexity - these platforms have their own patterns, pitfalls, and breaking points.",
+ "category": "architecture",
"tags": [
"zapier",
"make"
@@ -33621,13 +33625,13 @@
"no",
"code",
"automation",
- "architect",
- "who",
- "built",
- "thousands",
- "zaps",
- "scenarios",
- "businesses"
+ "democratizes",
+ "building",
+ "formerly",
+ "integromat",
+ "let",
+ "non",
+ "developers"
],
"path": "skills/zapier-make-patterns/SKILL.md"
},
diff --git a/plugins/antigravity-awesome-skills-claude/skills/3d-web-experience/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/3d-web-experience/SKILL.md
index a299baf2..9a07aa8a 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/3d-web-experience/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/3d-web-experience/SKILL.md
@@ -1,13 +1,20 @@
---
name: 3d-web-experience
-description: "You bring the third dimension to the web. You know when 3D enhances and when it's just showing off. You balance visual impact with performance. You make 3D accessible to users who've never touched a 3D app. You create moments of wonder without sacrificing usability."
+description: Expert in building 3D experiences for the web - Three.js, React
+ Three Fiber, Spline, WebGL, and interactive 3D scenes. Covers product
+ configurators, 3D portfolios, immersive websites, and bringing depth to web
+ experiences.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# 3D Web Experience
+Expert in building 3D experiences for the web - Three.js, React Three Fiber,
+Spline, WebGL, and interactive 3D scenes. Covers product configurators, 3D
+portfolios, immersive websites, and bringing depth to web experiences.
+
**Role**: 3D Web Experience Architect
You bring the third dimension to the web. You know when 3D enhances
@@ -15,6 +22,16 @@ and when it's just showing off. You balance visual impact with
performance. You make 3D accessible to users who've never touched
a 3D app. You create moments of wonder without sacrificing usability.
+### Expertise
+
+- Three.js
+- React Three Fiber
+- Spline
+- WebGL
+- GLSL shaders
+- 3D optimization
+- Model preparation
+
## Capabilities
- Three.js implementation
@@ -34,7 +51,6 @@ Choosing the right 3D approach
**When to use**: When starting a 3D web project
-```python
## 3D Stack Selection
### Options Comparison
@@ -91,7 +107,6 @@ export default function Scene() {
);
}
```
-```
### 3D Model Pipeline
@@ -99,7 +114,6 @@ Getting models web-ready
**When to use**: When preparing 3D assets
-```python
## 3D Model Pipeline
### Format Selection
@@ -151,7 +165,6 @@ export default function Scene() {
);
}
```
-```
### Scroll-Driven 3D
@@ -159,7 +172,6 @@ export default function Scene() {
**When to use**: When integrating 3D with scroll
-```python
## Scroll-Driven 3D
### R3F + Scroll Controls
@@ -211,49 +223,152 @@ gsap.to(camera.position, {
- Reveal/hide elements
- Color/material changes
- Exploded view animations
+
+### Performance Optimization
+
+Keeping 3D fast
+
+**When to use**: Always - 3D is expensive
+
+## 3D Performance
+
+### Performance Targets
+| Device | Target FPS | Max Triangles |
+|--------|------------|---------------|
+| Desktop | 60fps | 500K |
+| Mobile | 30-60fps | 100K |
+| Low-end | 30fps | 50K |
+
+### Quick Wins
+```jsx
+// 1. Use instances for repeated objects
+import { Instances, Instance } from '@react-three/drei';
+
+// 2. Limit lights
+
+ // Just one
+
+// 3. Use LOD (Level of Detail)
+import { LOD } from 'three';
+
+// 4. Lazy load models
+const Model = lazy(() => import('./Model'));
```
-## Anti-Patterns
+### Mobile Detection
+```jsx
+const isMobile = /iPhone|iPad|Android/i.test(navigator.userAgent);
-### ❌ 3D For 3D's Sake
+
}>
+
Admin content here
+
+ );
+
+ // Or manual check
+ if (membership?.role !== 'org:admin') {
+ return
Admin access required
;
+ }
+
+ return
Admin content here
;
+}
+
+### Anti_patterns
+
+- Pattern: Not scoping data by orgId | Why: Data leaks between organizations | Fix: Always filter queries by orgId from auth()
+- Pattern: Hardcoding role strings | Why: Typos cause access issues | Fix: Define role constants or use TypeScript enums
+
+### References
+
+- https://clerk.com/docs/guides/organizations
+- https://clerk.com/articles/multi-tenancy-in-react-applications-guide
+
+### Webhook User Sync
+
+Sync Clerk users to your database using webhooks.
+
+Key webhooks:
+- user.created: New user signed up
+- user.updated: User profile changed
+- user.deleted: User deleted account
+
+Uses svix for signature verification.
+
+### Code_example
+
+// app/api/webhooks/clerk/route.ts
+import { Webhook } from 'svix';
+import { headers } from 'next/headers';
+import { WebhookEvent } from '@clerk/nextjs/server';
+import { prisma } from '@/lib/prisma';
+
+export async function POST(req: Request) {
+ const WEBHOOK_SECRET = process.env.CLERK_WEBHOOK_SECRET;
+
+ if (!WEBHOOK_SECRET) {
+ throw new Error('Missing CLERK_WEBHOOK_SECRET');
+ }
+
+ // Get headers
+ const headerPayload = await headers();
+ const svix_id = headerPayload.get('svix-id');
+ const svix_timestamp = headerPayload.get('svix-timestamp');
+ const svix_signature = headerPayload.get('svix-signature');
+
+ if (!svix_id || !svix_timestamp || !svix_signature) {
+ return new Response('Missing svix headers', { status: 400 });
+ }
+
+ // Get body
+ const payload = await req.json();
+ const body = JSON.stringify(payload);
+
+ // Verify webhook
+ const wh = new Webhook(WEBHOOK_SECRET);
+ let evt: WebhookEvent;
+
+ try {
+ evt = wh.verify(body, {
+ 'svix-id': svix_id,
+ 'svix-timestamp': svix_timestamp,
+ 'svix-signature': svix_signature,
+ }) as WebhookEvent;
+ } catch (err) {
+ console.error('Webhook verification failed:', err);
+ return new Response('Verification failed', { status: 400 });
+ }
+
+ // Handle events
+ const eventType = evt.type;
+
+ if (eventType === 'user.created') {
+ const { id, email_addresses, first_name, last_name, image_url } = evt.data;
+
+ await prisma.user.create({
+ data: {
+ clerkId: id,
+ email: email_addresses[0]?.email_address,
+ firstName: first_name,
+ lastName: last_name,
+ imageUrl: image_url,
+ },
+ });
+ }
+
+ if (eventType === 'user.updated') {
+ const { id, email_addresses, first_name, last_name, image_url } = evt.data;
+
+ await prisma.user.update({
+ where: { clerkId: id },
+ data: {
+ email: email_addresses[0]?.email_address,
+ firstName: first_name,
+ lastName: last_name,
+ imageUrl: image_url,
+ },
+ });
+ }
+
+ if (eventType === 'user.deleted') {
+ const { id } = evt.data;
+
+ await prisma.user.delete({
+ where: { clerkId: id! },
+ });
+ }
+
+ return new Response('Webhook processed', { status: 200 });
+}
+
+// Prisma schema
+// prisma/schema.prisma
+model User {
+ id String @id @default(cuid())
+ clerkId String @unique
+ email String @unique
+ firstName String?
+ lastName String?
+ imageUrl String?
+ createdAt DateTime @default(now())
+ updatedAt DateTime @updatedAt
+
+ posts Post[]
+ @@index([clerkId])
+}
+
+### Anti_patterns
+
+- Pattern: Not verifying webhook signature | Why: Anyone can hit your endpoint with fake data | Fix: Always verify with svix
+- Pattern: Blocking middleware for webhook routes | Why: Webhooks come from Clerk, not authenticated users | Fix: Add /api/webhooks(.*)' to public routes
+- Pattern: Not handling race conditions | Why: user.created might arrive after user.updated | Fix: Use upsert instead of create, handle missing records
+
+### References
+
+- https://clerk.com/docs/webhooks/sync-data
+- https://clerk.com/articles/how-to-sync-clerk-user-data-to-your-database
+
+### API Route Protection
+
+Protect API routes using auth() from Clerk.
+
+Route Handlers in App Router use auth() for authentication.
+Middleware provides initial protection, auth() provides in-handler verification.
+
+### Code_example
+
+// app/api/projects/route.ts
+import { auth } from '@clerk/nextjs/server';
+import { prisma } from '@/lib/prisma';
+import { NextResponse } from 'next/server';
+
+export async function GET() {
+ const { userId, orgId } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ // User's personal projects or org projects
+ const projects = await prisma.project.findMany({
+ where: orgId
+ ? { organizationId: orgId }
+ : { userId, organizationId: null },
+ });
+
+ return NextResponse.json(projects);
+}
+
+export async function POST(req: Request) {
+ const { userId, orgId } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ const body = await req.json();
+
+ const project = await prisma.project.create({
+ data: {
+ name: body.name,
+ userId,
+ organizationId: orgId ?? null,
+ },
+ });
+
+ return NextResponse.json(project, { status: 201 });
+}
+
+// Protected with role check
+// app/api/admin/users/route.ts
+export async function GET() {
+ const { userId, orgRole } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ if (orgRole !== 'org:admin') {
+ return NextResponse.json({ error: 'Forbidden' }, { status: 403 });
+ }
+
+ // Admin-only logic
+ const users = await prisma.user.findMany();
+ return NextResponse.json(users);
+}
+
+// Using getAuth in older patterns (not recommended)
+// For backwards compatibility only
+import { getAuth } from '@clerk/nextjs/server';
+
+export async function GET(req: Request) {
+ const { userId } = getAuth(req);
+ // ...
+}
+
+### Anti_patterns
+
+- Pattern: Trusting middleware alone | Why: Middleware can be bypassed (CVE-2025-29927) | Fix: Always verify auth in route handler too
+- Pattern: Not checking orgId for multi-tenant | Why: Users might access other org's data | Fix: Always filter by orgId from auth()
+
+### References
+
+- https://clerk.com/docs/guides/protecting-pages
+
+## Sharp Edges
+
+### CVE-2025-29927 Middleware Bypass Vulnerability
+
+Severity: CRITICAL
+
+### Multiple Middleware Files Cause Conflicts
+
+Severity: HIGH
+
+### 4KB Session Token Cookie Limit
+
+Severity: HIGH
+
+### auth() Requires clerkMiddleware Configuration
+
+Severity: HIGH
+
+### Webhook Race Conditions
+
+Severity: MEDIUM
+
+### auth() is Async in App Router
+
+Severity: MEDIUM
+
+### Middleware Blocks Webhook Endpoints
+
+Severity: MEDIUM
+
+### Accessing Auth State Before isLoaded
+
+Severity: MEDIUM
+
+### Manual Redirects Cause Double Redirects
+
+Severity: MEDIUM
+
+### Organization Data Not Scoped by orgId
+
+Severity: HIGH
+
+## Validation Checks
+
+### Clerk Secret Key in Client Code
+
+Severity: ERROR
+
+CLERK_SECRET_KEY must only be used server-side
+
+Message: Clerk secret key exposed to client. Use CLERK_SECRET_KEY without NEXT_PUBLIC prefix.
+
+### Protected Route Without Middleware
+
+Severity: ERROR
+
+API routes should have middleware protection
+
+Message: API route without auth check. Add middleware protection or auth() check.
+
+### Hardcoded Clerk API Keys
+
+Severity: ERROR
+
+Clerk keys should use environment variables
+
+Message: Hardcoded Clerk keys. Use environment variables.
+
+### Missing Await on auth()
+
+Severity: ERROR
+
+auth() is async in App Router and must be awaited
+
+Message: auth() not awaited. Use 'await auth()' in App Router.
+
+### Multiple Middleware Files
+
+Severity: WARNING
+
+Only one middleware.ts file should exist
+
+Message: Multiple middleware files detected. Use single middleware.ts.
+
+### Webhook Route Not Excluded from Protection
+
+Severity: WARNING
+
+Webhook routes should be public
+
+Message: Webhook route may be blocked by middleware. Add to public routes.
+
+### Accessing Auth Without isLoaded Check
+
+Severity: WARNING
+
+Check isLoaded before accessing user state in client components
+
+Message: Accessing user without isLoaded check. Check isLoaded first.
+
+### Clerk Hooks in Server Component
+
+Severity: ERROR
+
+Clerk hooks only work in Client Components
+
+Message: Clerk hooks in Server Component. Add 'use client' or use auth().
+
+### Multi-Tenant Query Without orgId
+
+Severity: WARNING
+
+Organization data should be scoped by orgId
+
+Message: Query without organization scope. Filter by orgId for multi-tenancy.
+
+### Webhook Without Signature Verification
+
+Severity: ERROR
+
+Clerk webhooks must verify svix signature
+
+Message: Webhook without signature verification. Use svix to verify.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs database -> postgres-wizard (User table with clerkId)
+- user needs payments -> stripe-integration (Customer linked to Clerk user)
+- user needs search -> algolia-search (Secured API keys per user)
+- user needs analytics -> segment-cdp (User identification)
+- user needs email -> resend-email (Transactional emails)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: adding authentication
+- User mentions or implies: clerk auth
+- User mentions or implies: user authentication
+- User mentions or implies: sign in
+- User mentions or implies: sign up
+- User mentions or implies: user management
+- User mentions or implies: multi-tenancy
+- User mentions or implies: organizations
+- User mentions or implies: sso
+- User mentions or implies: single sign-on
diff --git a/plugins/antigravity-awesome-skills-claude/skills/computer-use-agents/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/computer-use-agents/SKILL.md
index 4ad1afbc..9647697d 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/computer-use-agents/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/computer-use-agents/SKILL.md
@@ -1,13 +1,20 @@
---
name: computer-use-agents
-description: "The fundamental architecture of computer use agents: observe screen, reason about next action, execute action, repeat. This loop integrates vision models with action execution through an iterative pipeline."
+description: Build AI agents that interact with computers like humans do -
+ viewing screens, moving cursors, clicking buttons, and typing text. Covers
+ Anthropic's Computer Use, OpenAI's Operator/CUA, and open-source alternatives.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Computer Use Agents
+Build AI agents that interact with computers like humans do - viewing screens,
+moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer
+Use, OpenAI's Operator/CUA, and open-source alternatives. Critical focus on
+sandboxing, security, and handling the unique challenges of vision-based control.
+
## Patterns
### Perception-Reasoning-Action Loop
@@ -25,10 +32,8 @@ Key components:
Critical insight: Vision agents are completely still during "thinking"
phase (1-5 seconds), creating a detectable pause pattern.
+**When to use**: Building any computer use agent from scratch,Integrating vision models with desktop control,Understanding agent behavior patterns
-**When to use**: ['Building any computer use agent from scratch', 'Integrating vision models with desktop control', 'Understanding agent behavior patterns']
-
-```python
from anthropic import Anthropic
from PIL import Image
import base64
@@ -83,8 +88,116 @@ class ComputerUseAgent:
amount = action.get("amount", 3)
scroll = -amount if direction == "down" else amount
pyautogui.scroll(scroll)
- return {"success": True, "action": f"scrolled {dir
-```
+ return {"success": True, "action": f"scrolled {direction}"}
+
+ elif action_type == "move":
+ x, y = action["x"], action["y"]
+ pyautogui.moveTo(x, y)
+ return {"success": True, "action": f"moved to ({x}, {y})"}
+
+ else:
+ return {"success": False, "error": f"Unknown action: {action_type}"}
+
+ def run(self, task: str) -> dict:
+ """
+ Run perception-reasoning-action loop until task complete.
+
+ The loop:
+ 1. Screenshot current state
+ 2. Send to vision model with task context
+ 3. Parse action from response
+ 4. Execute action
+ 5. Repeat until done or max steps
+ """
+ messages = []
+ step_count = 0
+
+ system_prompt = """You are a computer use agent. You can see the screen
+ and control mouse/keyboard.
+
+ Available actions (respond with JSON):
+ - {"type": "click", "x": 100, "y": 200, "button": "left"}
+ - {"type": "type", "text": "hello world"}
+ - {"type": "key", "key": "enter"}
+ - {"type": "scroll", "direction": "down", "amount": 3}
+ - {"type": "done", "result": "task completed successfully"}
+
+ Always respond with ONLY a JSON action object.
+ Be precise with coordinates - click exactly where needed.
+ If you see an error, try to recover.
+ """
+
+ while step_count < self.max_steps:
+ step_count += 1
+
+ # 1. PERCEPTION: Capture current screen
+ screenshot_b64 = self.capture_screenshot()
+
+ # 2. REASONING: Send to vision model
+ user_content = [
+ {"type": "text", "text": f"Task: {task}\n\nStep {step_count}. What action should I take?"},
+ {"type": "image", "source": {
+ "type": "base64",
+ "media_type": "image/png",
+ "data": screenshot_b64
+ }}
+ ]
+
+ messages.append({"role": "user", "content": user_content})
+
+ response = self.client.messages.create(
+ model=self.model,
+ max_tokens=1024,
+ system=system_prompt,
+ messages=messages
+ )
+
+ assistant_message = response.content[0].text
+ messages.append({"role": "assistant", "content": assistant_message})
+
+ # 3. Parse action from response
+ import json
+ try:
+ action = json.loads(assistant_message)
+ except json.JSONDecodeError:
+ # Try to extract JSON from response
+ import re
+ match = re.search(r'\{[^}]+\}', assistant_message)
+ if match:
+ action = json.loads(match.group())
+ else:
+ continue
+
+ # Check if done
+ if action.get("type") == "done":
+ return {
+ "success": True,
+ "result": action.get("result"),
+ "steps": step_count
+ }
+
+ # 4. ACTION: Execute
+ result = self.execute_action(action)
+
+ # Small delay for UI to update
+ time.sleep(self.action_delay)
+
+ return {
+ "success": False,
+ "error": "Max steps reached",
+ "steps": step_count
+ }
+
+# Usage
+agent = ComputerUseAgent(Anthropic())
+result = agent.run("Open Chrome and search for 'weather today'")
+
+### Anti_patterns
+
+- Running without step limits (infinite loops)
+- No delay between actions (UI can't keep up)
+- Screenshots at full resolution (token explosion)
+- Ignoring action failures (no recovery)
### Sandboxed Environment Pattern
@@ -102,10 +215,8 @@ Key isolation requirements:
The goal is "blast radius minimization" - if the agent goes wrong,
damage is contained to the sandbox.
+**When to use**: Deploying any computer use agent,Testing agent behavior safely,Running untrusted automation tasks
-**When to use**: ['Deploying any computer use agent', 'Testing agent behavior safely', 'Running untrusted automation tasks']
-
-```python
# Dockerfile for sandboxed computer use environment
# Based on Anthropic's reference implementation pattern
@@ -208,8 +319,89 @@ volumes:
# Python wrapper with additional runtime sandboxing
import subprocess
import os
-from dataclasses im
-```
+from dataclasses import dataclass
+from typing import Optional
+
+@dataclass
+class SandboxConfig:
+ """Configuration for agent sandbox."""
+ network_allowed: list[str] = None # Allowed domains
+ max_runtime_seconds: int = 300
+ max_memory_mb: int = 2048
+ allow_downloads: bool = False
+ allow_clipboard: bool = False
+
+class SandboxedAgent:
+ """
+ Run computer use agent in Docker sandbox.
+ """
+
+ def __init__(self, config: SandboxConfig):
+ self.config = config
+ self.container_id: Optional[str] = None
+
+ def start(self):
+ """Start sandboxed environment."""
+ # Build network rules
+ network_rules = ""
+ if self.config.network_allowed:
+ for domain in self.config.network_allowed:
+ network_rules += f"--add-host={domain}:$(dig +short {domain}) "
+ else:
+ network_rules = "--network=none"
+
+ cmd = f"""
+ docker run -d \
+ --name computer-use-sandbox-$$ \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --memory {self.config.max_memory_mb}m \
+ --cpus 2 \
+ --read-only \
+ --tmpfs /tmp \
+ {network_rules} \
+ computer-use-agent:latest
+ """
+
+ result = subprocess.run(cmd, shell=True, capture_output=True)
+ self.container_id = result.stdout.decode().strip()
+
+ # Set up kill timer
+ subprocess.Popen([
+ "sh", "-c",
+ f"sleep {self.config.max_runtime_seconds} && docker kill {self.container_id}"
+ ])
+
+ return self.container_id
+
+ def execute_task(self, task: str) -> dict:
+ """Execute task in sandbox."""
+ if not self.container_id:
+ self.start()
+
+ # Send task to agent via API
+ import requests
+ response = requests.post(
+ f"http://localhost:8080/task",
+ json={"task": task},
+ timeout=self.config.max_runtime_seconds
+ )
+
+ return response.json()
+
+ def stop(self):
+ """Stop and remove sandbox."""
+ if self.container_id:
+ subprocess.run(f"docker rm -f {self.container_id}", shell=True)
+ self.container_id = None
+
+### Anti_patterns
+
+- Running agents on host system directly
+- Giving sandbox full network access
+- Running as root in container
+- No resource limits (denial of service)
+- Persistent storage (data can leak between runs)
### Anthropic Computer Use Implementation
@@ -231,10 +423,8 @@ Tool versions:
Critical limitation: "Some UI elements (like dropdowns and scrollbars)
might be tricky for Claude to manipulate" - Anthropic docs
+**When to use**: Building production computer use agents,Need highest quality vision understanding,Full desktop control (not just browser)
-**When to use**: ['Building production computer use agents', 'Need highest quality vision understanding', 'Full desktop control (not just browser)']
-
-```python
from anthropic import Anthropic
from anthropic.types.beta import (
BetaToolComputerUse20241022,
@@ -301,20 +491,1672 @@ class AnthropicComputerUse:
subprocess.run(["scrot", "/tmp/screenshot.png"])
with open("/tmp/screenshot.png", "rb") as f:
-
+ img_data = f.read()
+
+ # Resize for efficiency
+ img = Image.open(io.BytesIO(img_data))
+ img = img.resize(self.screen_size, Image.LANCZOS)
+
+ buffer = io.BytesIO()
+ img.save(buffer, format="PNG")
+
+ return {
+ "type": "image",
+ "source": {
+ "type": "base64",
+ "media_type": "image/png",
+ "data": base64.b64encode(buffer.getvalue()).decode()
+ }
+ }
+
+ elif action == "mouse_move":
+ x, y = input.get("coordinate", [0, 0])
+ subprocess.run(["xdotool", "mousemove", str(x), str(y)])
+ return {"success": True}
+
+ elif action == "left_click":
+ subprocess.run(["xdotool", "click", "1"])
+ return {"success": True}
+
+ elif action == "right_click":
+ subprocess.run(["xdotool", "click", "3"])
+ return {"success": True}
+
+ elif action == "double_click":
+ subprocess.run(["xdotool", "click", "--repeat", "2", "1"])
+ return {"success": True}
+
+ elif action == "type":
+ text = input.get("text", "")
+ # Use xdotool type with delay for reliability
+ subprocess.run(["xdotool", "type", "--delay", "50", text])
+ return {"success": True}
+
+ elif action == "key":
+ key = input.get("key", "")
+ # Map common key names
+ key_map = {
+ "return": "Return",
+ "enter": "Return",
+ "tab": "Tab",
+ "escape": "Escape",
+ "backspace": "BackSpace",
+ }
+ xdotool_key = key_map.get(key.lower(), key)
+ subprocess.run(["xdotool", "key", xdotool_key])
+ return {"success": True}
+
+ elif action == "scroll":
+ direction = input.get("direction", "down")
+ amount = input.get("amount", 3)
+ button = "5" if direction == "down" else "4"
+ for _ in range(amount):
+ subprocess.run(["xdotool", "click", button])
+ return {"success": True}
+
+ return {"error": f"Unknown action: {action}"}
+
+ def _handle_bash(self, input: dict) -> dict:
+ """Execute bash command."""
+ command = input.get("command", "")
+
+ # Security: Sanitize and limit commands
+ dangerous_patterns = ["rm -rf", "mkfs", "dd if=", "> /dev/"]
+ for pattern in dangerous_patterns:
+ if pattern in command:
+ return {"error": "Dangerous command blocked"}
+
+ try:
+ result = subprocess.run(
+ command,
+ shell=True,
+ capture_output=True,
+ text=True,
+ timeout=30
+ )
+ return {
+ "stdout": result.stdout[:10000], # Limit output
+ "stderr": result.stderr[:1000],
+ "returncode": result.returncode
+ }
+ except subprocess.TimeoutExpired:
+ return {"error": "Command timed out"}
+
+ def _handle_editor(self, input: dict) -> dict:
+ """Handle text editor operations."""
+ command = input.get("command")
+ path = input.get("path")
+
+ if command == "view":
+ try:
+ with open(path, "r") as f:
+ content = f.read()
+ return {"content": content[:50000]} # Limit size
+ except Exception as e:
+ return {"error": str(e)}
+
+ elif command == "str_replace":
+ old_str = input.get("old_str")
+ new_str = input.get("new_str")
+ try:
+ with open(path, "r") as f:
+ content = f.read()
+ if old_str not in content:
+ return {"error": "old_str not found in file"}
+ content = content.replace(old_str, new_str, 1)
+ with open(path, "w") as f:
+ f.write(content)
+ return {"success": True}
+ except Exception as e:
+ return {"error": str(e)}
+
+ return {"error": f"Unknown editor command: {command}"}
+
+ def run_task(self, task: str, max_steps: int = 50) -> dict:
+ """Run computer use task with agentic loop."""
+ messages = [{"role": "user", "content": task}]
+ tools = self.get_tools()
+
+ for step in range(max_steps):
+ response = self.client.beta.messages.create(
+ model=self.model,
+ max_tokens=4096,
+ tools=tools,
+ messages=messages,
+ betas=["computer-use-2024-10-22"]
+ )
+
+ # Check for completion
+ if response.stop_reason == "end_turn":
+ return {
+ "success": True,
+ "result": response.content[0].text if response.content else "",
+ "steps": step + 1
+ }
+
+ # Handle tool use
+ if response.stop_reason == "tool_use":
+ messages.append({"role": "assistant", "content": response.content})
+
+ tool_results = []
+ for block in response.content:
+ if block.type == "tool_use":
+ result = self.execute_tool(block.name, block.input)
+ tool_results.append({
+ "type": "tool_result",
+ "tool_use_id": block.id,
+ "content": result
+ })
+
+ messages.append({"role": "user", "content": tool_results})
+
+ return {"success": False, "error": "Max steps reached"}
+
+### Anti_patterns
+
+- Not using betas=['computer-use-2024-10-22'] flag
+- Full resolution screenshots (wasteful)
+- No command sanitization for bash tool
+- Unbounded execution time
+
+### Browser-Use Pattern (Playwright-based)
+
+For browser-only automation, using structured DOM access is more efficient
+than pixel-based computer use. Playwright MCP allows LLMs to control
+browsers using accessibility snapshots rather than screenshots.
+
+Advantages over vision-based:
+- Faster: No image processing required
+- Cheaper: Text tokens vs image tokens
+- More precise: Direct element targeting
+- More reliable: No coordinate drift
+
+When to use vision vs structured:
+- Vision: Desktop apps, complex UIs, visual verification
+- Structured: Web automation, form filling, data extraction
+
+**When to use**: Browser-only automation tasks,Form filling and web interactions,When speed and cost matter more than visual understanding
+
+from playwright.async_api import async_playwright
+from dataclasses import dataclass
+from typing import Optional
+import asyncio
+
+@dataclass
+class BrowserAction:
+ """Structured browser action."""
+ action: str # click, type, navigate, scroll, extract
+ selector: Optional[str] = None
+ text: Optional[str] = None
+ url: Optional[str] = None
+
+class BrowserUseAgent:
+ """
+ Browser automation using Playwright with structured commands.
+ More efficient than pixel-based for web tasks.
+ """
+
+ def __init__(self):
+ self.browser = None
+ self.page = None
+
+ async def start(self, headless: bool = True):
+ """Start browser session."""
+ self.playwright = await async_playwright().start()
+ self.browser = await self.playwright.chromium.launch(headless=headless)
+ self.page = await self.browser.new_page()
+
+ async def get_page_snapshot(self) -> dict:
+ """
+ Get structured snapshot of page for LLM.
+ Uses accessibility tree for efficiency.
+ """
+ # Get accessibility tree
+ snapshot = await self.page.accessibility.snapshot()
+
+ # Get simplified DOM info
+ elements = await self.page.evaluate('''() => {
+ const interactable = [];
+ const selector = 'a, button, input, select, textarea, [role="button"]';
+ document.querySelectorAll(selector).forEach((el, i) => {
+ const rect = el.getBoundingClientRect();
+ if (rect.width > 0 && rect.height > 0) {
+ interactable.push({
+ index: i,
+ tag: el.tagName.toLowerCase(),
+ text: el.textContent?.trim().slice(0, 100),
+ type: el.type,
+ placeholder: el.placeholder,
+ name: el.name,
+ id: el.id,
+ class: el.className
+ });
+ }
+ });
+ return interactable;
+ }''')
+
+ return {
+ "url": self.page.url,
+ "title": await self.page.title(),
+ "accessibility_tree": snapshot,
+ "interactable_elements": elements[:50] # Limit for token efficiency
+ }
+
+ async def execute_action(self, action: BrowserAction) -> dict:
+ """Execute structured browser action."""
+
+ try:
+ if action.action == "navigate":
+ await self.page.goto(action.url, wait_until="domcontentloaded")
+ return {"success": True, "url": self.page.url}
+
+ elif action.action == "click":
+ await self.page.click(action.selector, timeout=5000)
+ await self.page.wait_for_load_state("networkidle", timeout=5000)
+ return {"success": True}
+
+ elif action.action == "type":
+ await self.page.fill(action.selector, action.text)
+ return {"success": True}
+
+ elif action.action == "scroll":
+ direction = action.text or "down"
+ distance = 500 if direction == "down" else -500
+ await self.page.evaluate(f"window.scrollBy(0, {distance})")
+ return {"success": True}
+
+ elif action.action == "extract":
+ # Extract text content
+ if action.selector:
+ text = await self.page.text_content(action.selector)
+ else:
+ text = await self.page.text_content("body")
+ return {"success": True, "text": text[:5000]}
+
+ elif action.action == "screenshot":
+ # Fall back to vision when needed
+ screenshot = await self.page.screenshot(type="png")
+ import base64
+ return {
+ "success": True,
+ "image": base64.b64encode(screenshot).decode()
+ }
+
+ except Exception as e:
+ return {"success": False, "error": str(e)}
+
+ return {"success": False, "error": f"Unknown action: {action.action}"}
+
+ async def run_with_llm(self, task: str, llm_client, max_steps: int = 20):
+ """
+ Run browser task with LLM decision making.
+ Uses structured DOM instead of screenshots.
+ """
+
+ system_prompt = """You are a browser automation agent. You receive
+ page snapshots with interactable elements and decide actions.
+
+ Respond with JSON action:
+ - {"action": "navigate", "url": "https://..."}
+ - {"action": "click", "selector": "button.submit"}
+ - {"action": "type", "selector": "input[name='email']", "text": "..."}
+ - {"action": "scroll", "text": "down"}
+ - {"action": "extract", "selector": ".results"}
+ - {"action": "done", "result": "task completed"}
+
+ Use CSS selectors based on the element info provided.
+ Prefer id > name > class > text content for selectors.
+ """
+
+ messages = []
+
+ for step in range(max_steps):
+ # Get current page state
+ snapshot = await self.get_page_snapshot()
+
+ user_message = f"""Task: {task}
+
+ Current page:
+ URL: {snapshot['url']}
+ Title: {snapshot['title']}
+
+ Interactable elements:
+ {snapshot['interactable_elements']}
+
+ What action should I take?"""
+
+ messages.append({"role": "user", "content": user_message})
+
+ # Get LLM decision
+ response = llm_client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=1024,
+ system=system_prompt,
+ messages=messages
+ )
+
+ assistant_text = response.content[0].text
+ messages.append({"role": "assistant", "content": assistant_text})
+
+ # Parse and execute
+ import json
+ action_dict = json.loads(assistant_text)
+
+ if action_dict.get("action") == "done":
+ return {"success": True, "result": action_dict.get("result")}
+
+ action = BrowserAction(**action_dict)
+ result = await self.execute_action(action)
+
+ if not result.get("success"):
+ messages.append({
+ "role": "user",
+ "content": f"Action failed: {result.get('error')}"
+ })
+
+ await asyncio.sleep(0.5) # Rate limit
+
+ return {"success": False, "error": "Max steps reached"}
+
+ async def close(self):
+ """Clean up browser."""
+ if self.browser:
+ await self.browser.close()
+ if hasattr(self, 'playwright'):
+ await self.playwright.stop()
+
+# Usage
+async def main():
+ agent = BrowserUseAgent()
+ await agent.start(headless=False)
+
+ from anthropic import Anthropic
+ result = await agent.run_with_llm(
+ "Go to weather.com and find the weather for New York",
+ Anthropic()
+ )
+
+ print(result)
+ await agent.close()
+
+asyncio.run(main())
+
+### Anti_patterns
+
+- Using screenshots when DOM access works
+- Not waiting for page loads
+- Hardcoded selectors that break
+- No error recovery for stale elements
+
+### User Confirmation Pattern
+
+For sensitive actions, agents should pause and ask for human confirmation.
+"ChatGPT agent also pauses and asks for confirmation prior to taking
+sensitive steps such as completing a purchase."
+
+Sensitivity levels:
+1. LOW: Navigation, reading (auto-approve)
+2. MEDIUM: Form filling, clicking (log, maybe confirm)
+3. HIGH: Purchases, authentication, file operations (always confirm)
+4. CRITICAL: Credential entry, financial transactions (confirm + review)
+
+**When to use**: Actions with real-world consequences,Financial transactions,Authentication flows,File modifications
+
+from enum import Enum
+from dataclasses import dataclass
+from typing import Callable, Optional
+import asyncio
+
+class ActionSeverity(Enum):
+ LOW = "low" # Auto-approve
+ MEDIUM = "medium" # Log, optional confirm
+ HIGH = "high" # Always confirm
+ CRITICAL = "critical" # Confirm + review details
+
+@dataclass
+class SensitiveAction:
+ """Action that may need user confirmation."""
+ action_type: str
+ description: str
+ severity: ActionSeverity
+ details: dict
+
+class ConfirmationGate:
+ """
+ Gate sensitive actions through user confirmation.
+ """
+
+ # Action type -> severity mapping
+ ACTION_SEVERITY = {
+ # LOW - auto-approve
+ "navigate": ActionSeverity.LOW,
+ "scroll": ActionSeverity.LOW,
+ "read": ActionSeverity.LOW,
+ "screenshot": ActionSeverity.LOW,
+
+ # MEDIUM - log and maybe confirm
+ "click": ActionSeverity.MEDIUM,
+ "type": ActionSeverity.MEDIUM,
+ "search": ActionSeverity.MEDIUM,
+
+ # HIGH - always confirm
+ "download": ActionSeverity.HIGH,
+ "submit_form": ActionSeverity.HIGH,
+ "login": ActionSeverity.HIGH,
+ "file_write": ActionSeverity.HIGH,
+
+ # CRITICAL - confirm with full review
+ "purchase": ActionSeverity.CRITICAL,
+ "enter_password": ActionSeverity.CRITICAL,
+ "enter_credit_card": ActionSeverity.CRITICAL,
+ "send_money": ActionSeverity.CRITICAL,
+ "delete": ActionSeverity.CRITICAL,
+ }
+
+ def __init__(
+ self,
+ confirm_callback: Callable[[SensitiveAction], bool] = None,
+ auto_confirm_low: bool = True,
+ auto_confirm_medium: bool = False
+ ):
+ self.confirm_callback = confirm_callback or self._default_confirm
+ self.auto_confirm_low = auto_confirm_low
+ self.auto_confirm_medium = auto_confirm_medium
+ self.action_log = []
+
+ def _default_confirm(self, action: SensitiveAction) -> bool:
+ """Default confirmation via CLI prompt."""
+ print(f"\n{'='*60}")
+ print(f"ACTION CONFIRMATION REQUIRED")
+ print(f"{'='*60}")
+ print(f"Type: {action.action_type}")
+ print(f"Severity: {action.severity.value.upper()}")
+ print(f"Description: {action.description}")
+ print(f"Details: {action.details}")
+ print(f"{'='*60}")
+
+ while True:
+ response = input("Allow this action? [y/n]: ").lower().strip()
+ if response in ['y', 'yes']:
+ return True
+ elif response in ['n', 'no']:
+ return False
+
+ def classify_action(self, action_type: str, context: dict) -> ActionSeverity:
+ """Classify action severity, considering context."""
+ base_severity = self.ACTION_SEVERITY.get(action_type, ActionSeverity.MEDIUM)
+
+ # Escalate based on context
+ if context.get("involves_credentials"):
+ return ActionSeverity.CRITICAL
+ if context.get("involves_money"):
+ return ActionSeverity.CRITICAL
+ if context.get("irreversible"):
+ return max(base_severity, ActionSeverity.HIGH, key=lambda x: x.value)
+
+ return base_severity
+
+ def check_action(
+ self,
+ action_type: str,
+ description: str,
+ details: dict = None
+ ) -> tuple[bool, str]:
+ """
+ Check if action should proceed.
+ Returns (approved, reason).
+ """
+ details = details or {}
+ severity = self.classify_action(action_type, details)
+
+ action = SensitiveAction(
+ action_type=action_type,
+ description=description,
+ severity=severity,
+ details=details
+ )
+
+ # Log all actions
+ self.action_log.append({
+ "action": action,
+ "timestamp": __import__('datetime').datetime.now().isoformat()
+ })
+
+ # Auto-approve low severity
+ if severity == ActionSeverity.LOW and self.auto_confirm_low:
+ return True, "auto-approved (low severity)"
+
+ # Maybe auto-approve medium
+ if severity == ActionSeverity.MEDIUM and self.auto_confirm_medium:
+ return True, "auto-approved (medium severity)"
+
+ # Request confirmation
+ approved = self.confirm_callback(action)
+
+ if approved:
+ return True, "user approved"
+ else:
+ return False, "user rejected"
+
+class ConfirmedComputerUseAgent:
+ """
+ Computer use agent with confirmation gates.
+ """
+
+ def __init__(self, base_agent, confirmation_gate: ConfirmationGate):
+ self.agent = base_agent
+ self.gate = confirmation_gate
+
+ def execute_action(self, action: dict) -> dict:
+ """Execute action with confirmation check."""
+ action_type = action.get("type", "unknown")
+
+ # Build description
+ if action_type == "click":
+ desc = f"Click at ({action.get('x')}, {action.get('y')})"
+ elif action_type == "type":
+ text = action.get('text', '')
+ # Mask if looks like password
+ if self._looks_sensitive(text):
+ desc = f"Type sensitive text ({len(text)} chars)"
+ else:
+ desc = f"Type: {text[:50]}..."
+ else:
+ desc = f"Execute: {action_type}"
+
+ # Context for severity classification
+ context = {
+ "involves_credentials": self._looks_sensitive(action.get("text", "")),
+ "involves_money": self._mentions_money(action),
+ }
+
+ # Check with gate
+ approved, reason = self.gate.check_action(
+ action_type, desc, context
+ )
+
+ if not approved:
+ return {
+ "success": False,
+ "error": f"Action blocked: {reason}",
+ "action": action_type
+ }
+
+ # Execute if approved
+ return self.agent.execute_action(action)
+
+ def _looks_sensitive(self, text: str) -> bool:
+ """Check if text looks like sensitive data."""
+ if not text:
+ return False
+ # Common patterns
+ patterns = [
+ r'\b\d{16}\b', # Credit card
+ r'\b\d{3,4}\b.*\b\d{3,4}\b', # CVV-like
+ r'password',
+ r'secret',
+ r'api.?key',
+ r'token'
+ ]
+ import re
+ return any(re.search(p, text.lower()) for p in patterns)
+
+ def _mentions_money(self, action: dict) -> bool:
+ """Check if action involves money."""
+ text = str(action)
+ money_patterns = [
+ r'\$\d+', r'pay', r'purchase', r'buy', r'checkout',
+ r'credit', r'debit', r'invoice', r'payment'
+ ]
+ import re
+ return any(re.search(p, text.lower()) for p in money_patterns)
+
+# Usage
+gate = ConfirmationGate(
+ auto_confirm_low=True,
+ auto_confirm_medium=False # Confirm clicks, typing
+)
+
+agent = ConfirmedComputerUseAgent(base_agent, gate)
+result = agent.execute_action({"type": "click", "x": 500, "y": 300})
+
+### Anti_patterns
+
+- Auto-approving all actions
+- Not logging rejected actions
+- Showing full passwords in confirmation
+- No timeout on confirmation (hangs forever)
+
+### Action Logging Pattern
+
+All computer use agent actions should be logged for:
+1. Debugging failed automations
+2. Security auditing
+3. Reproducibility
+4. Compliance requirements
+
+Log format should capture:
+- Timestamp
+- Action type and parameters
+- Screenshot before/after
+- Success/failure status
+- Model reasoning (if available)
+
+**When to use**: Production computer use deployments,Debugging automation failures,Security-sensitive environments
+
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Optional, Any
+import json
+import os
+
+@dataclass
+class ActionLogEntry:
+ """Single action log entry."""
+ timestamp: datetime
+ action_type: str
+ parameters: dict
+ success: bool
+ error: Optional[str] = None
+ screenshot_before: Optional[str] = None # Path to screenshot
+ screenshot_after: Optional[str] = None
+ model_reasoning: Optional[str] = None
+ duration_ms: Optional[int] = None
+
+ def to_dict(self) -> dict:
+ return {
+ "timestamp": self.timestamp.isoformat(),
+ "action_type": self.action_type,
+ "parameters": self._sanitize_params(self.parameters),
+ "success": self.success,
+ "error": self.error,
+ "screenshot_before": self.screenshot_before,
+ "screenshot_after": self.screenshot_after,
+ "model_reasoning": self.model_reasoning,
+ "duration_ms": self.duration_ms
+ }
+
+ def _sanitize_params(self, params: dict) -> dict:
+ """Remove sensitive data from params."""
+ sanitized = {}
+ sensitive_keys = ['password', 'secret', 'token', 'key', 'credit_card']
+
+ for k, v in params.items():
+ if any(s in k.lower() for s in sensitive_keys):
+ sanitized[k] = "[REDACTED]"
+ elif isinstance(v, str) and len(v) > 100:
+ sanitized[k] = v[:100] + "...[truncated]"
+ else:
+ sanitized[k] = v
+
+ return sanitized
+
+@dataclass
+class TaskSession:
+ """A complete task execution session."""
+ session_id: str
+ task: str
+ start_time: datetime
+ end_time: Optional[datetime] = None
+ actions: list[ActionLogEntry] = field(default_factory=list)
+ success: bool = False
+ final_result: Optional[str] = None
+
+class ActionLogger:
+ """
+ Comprehensive action logging for computer use agents.
+ """
+
+ def __init__(self, log_dir: str = "./agent_logs"):
+ self.log_dir = log_dir
+ self.screenshot_dir = os.path.join(log_dir, "screenshots")
+ os.makedirs(self.screenshot_dir, exist_ok=True)
+
+ self.current_session: Optional[TaskSession] = None
+
+ def start_session(self, task: str) -> str:
+ """Start a new task session."""
+ import uuid
+ session_id = str(uuid.uuid4())[:8]
+
+ self.current_session = TaskSession(
+ session_id=session_id,
+ task=task,
+ start_time=datetime.now()
+ )
+
+ return session_id
+
+ def log_action(
+ self,
+ action_type: str,
+ parameters: dict,
+ success: bool,
+ error: Optional[str] = None,
+ screenshot_before: bytes = None,
+ screenshot_after: bytes = None,
+ model_reasoning: str = None,
+ duration_ms: int = None
+ ):
+ """Log a single action."""
+ if not self.current_session:
+ raise RuntimeError("No active session")
+
+ # Save screenshots if provided
+ screenshot_paths = {}
+ timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
+
+ if screenshot_before:
+ path = os.path.join(
+ self.screenshot_dir,
+ f"{self.current_session.session_id}_{timestamp_str}_before.png"
+ )
+ with open(path, "wb") as f:
+ f.write(screenshot_before)
+ screenshot_paths["before"] = path
+
+ if screenshot_after:
+ path = os.path.join(
+ self.screenshot_dir,
+ f"{self.current_session.session_id}_{timestamp_str}_after.png"
+ )
+ with open(path, "wb") as f:
+ f.write(screenshot_after)
+ screenshot_paths["after"] = path
+
+ # Create log entry
+ entry = ActionLogEntry(
+ timestamp=datetime.now(),
+ action_type=action_type,
+ parameters=parameters,
+ success=success,
+ error=error,
+ screenshot_before=screenshot_paths.get("before"),
+ screenshot_after=screenshot_paths.get("after"),
+ model_reasoning=model_reasoning,
+ duration_ms=duration_ms
+ )
+
+ self.current_session.actions.append(entry)
+
+ # Also append to running log file
+ self._append_to_log(entry)
+
+ def _append_to_log(self, entry: ActionLogEntry):
+ """Append entry to JSONL log file."""
+ log_file = os.path.join(
+ self.log_dir,
+ f"session_{self.current_session.session_id}.jsonl"
+ )
+
+ with open(log_file, "a") as f:
+ f.write(json.dumps(entry.to_dict()) + "\n")
+
+ def end_session(self, success: bool, result: str = None):
+ """End current session."""
+ if not self.current_session:
+ return
+
+ self.current_session.end_time = datetime.now()
+ self.current_session.success = success
+ self.current_session.final_result = result
+
+ # Write session summary
+ summary_file = os.path.join(
+ self.log_dir,
+ f"session_{self.current_session.session_id}_summary.json"
+ )
+
+ summary = {
+ "session_id": self.current_session.session_id,
+ "task": self.current_session.task,
+ "start_time": self.current_session.start_time.isoformat(),
+ "end_time": self.current_session.end_time.isoformat(),
+ "duration_seconds": (
+ self.current_session.end_time -
+ self.current_session.start_time
+ ).total_seconds(),
+ "total_actions": len(self.current_session.actions),
+ "successful_actions": sum(
+ 1 for a in self.current_session.actions if a.success
+ ),
+ "failed_actions": sum(
+ 1 for a in self.current_session.actions if not a.success
+ ),
+ "success": success,
+ "final_result": result
+ }
+
+ with open(summary_file, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ self.current_session = None
+
+ def get_session_replay(self, session_id: str) -> list[dict]:
+ """Get all actions from a session for replay/debugging."""
+ log_file = os.path.join(self.log_dir, f"session_{session_id}.jsonl")
+
+ actions = []
+ with open(log_file, "r") as f:
+ for line in f:
+ actions.append(json.loads(line))
+
+ return actions
+
+# Integration with agent
+class LoggedComputerUseAgent:
+ """Computer use agent with comprehensive logging."""
+
+ def __init__(self, base_agent, logger: ActionLogger):
+ self.agent = base_agent
+ self.logger = logger
+
+ def run_task(self, task: str) -> dict:
+ """Run task with full logging."""
+ session_id = self.logger.start_session(task)
+
+ try:
+ result = self._run_with_logging(task)
+ self.logger.end_session(
+ success=result.get("success", False),
+ result=result.get("result")
+ )
+ return result
+ except Exception as e:
+ self.logger.end_session(success=False, result=str(e))
+ raise
+
+ def _run_with_logging(self, task: str) -> dict:
+ """Internal run with action logging."""
+ # This would wrap the base agent's run method
+ # and log each action
+ pass
+
+### Anti_patterns
+
+- Not sanitizing sensitive data in logs
+- Storing screenshots indefinitely (storage costs)
+- Not rotating log files
+- Logging synchronously (blocks agent)
+
+## Sharp Edges
+
+### Web Content Can Hijack Your Agent
+
+Severity: CRITICAL
+
+Situation: Computer use agent browsing the web
+
+Symptoms:
+Agent suddenly performs unexpected actions. Clicks malicious links.
+Enters credentials on phishing sites. Downloads files it shouldn't.
+Ignores your instructions and follows embedded commands instead.
+
+Why this breaks:
+"While all agents that process untrusted content are subject to prompt
+injection risks, browser use amplifies this risk in two ways. First,
+the attack surface is vast: every webpage, embedded document, advertisement,
+and dynamically loaded script represents a potential vector for malicious
+instructions. Second, browser agents can take many different actions—
+navigating to URLs, filling forms, clicking buttons, downloading files—
+that attackers can exploit."
+
+Real attacks have already happened:
+- "Microsoft Copilot agents were hijacked with emails containing malicious
+ instructions, which allowed attackers to extract entire CRM databases."
+- "Google's Workspace services were manipulated—hidden prompts inside
+ calendar invites and emails tricked Gemini agents into deleting events
+ and exposing sensitive messages."
+
+Even a 1% attack success rate represents meaningful risk at scale.
+
+Recommended fix:
+
+## Defense in depth - no single solution works
+
+1. Sandboxing (most effective):
+ ```python
+ # Docker with strict isolation
+ docker run \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --network none \ # No internet!
+ --read-only \
+ computer-use-agent
+ ```
+
+2. Classifier-based detection:
+ ```python
+ def scan_for_injection(content: str) -> bool:
+ """Detect prompt injection attempts."""
+ patterns = [
+ r"ignore.*instructions",
+ r"disregard.*previous",
+ r"new.*instructions",
+ r"you are now",
+ r"act as if",
+ r"pretend to be",
+ ]
+ return any(re.search(p, content.lower()) for p in patterns)
+
+ # Check page content before processing
+ page_text = await page.text_content("body")
+ if scan_for_injection(page_text):
+ return {"error": "Potential injection detected"}
+ ```
+
+3. User confirmation for sensitive actions:
+ ```python
+ SENSITIVE_ACTIONS = {"download", "submit", "login", "purchase"}
+
+ if action_type in SENSITIVE_ACTIONS:
+ if not await get_user_confirmation(action):
+ return {"error": "User rejected action"}
+ ```
+
+4. Scoped credentials:
+ - Never give agent access to all credentials
+ - Use temporary, limited tokens
+ - Revoke after task completion
+
+### Vision Agents Click Exact Centers
+
+Severity: MEDIUM
+
+Situation: Agent clicking on UI elements
+
+Symptoms:
+Agent's clicks are detectable as non-human. Websites may block or
+CAPTCHA the agent. Anti-bot systems flag the interaction.
+
+Why this breaks:
+"When a vision model identifies a button, it calculates the center.
+Click coordinates land at mathematically precise positions—often exact
+element centers or grid-aligned pixel values. Humans don't click centers;
+their click distributions follow a Gaussian pattern around targets."
+
+The screenshot loop also creates detectable patterns:
+"Predictable pauses. Vision agents are completely still during their
+'thinking' phase. The pattern looks like: Action → Complete stillness
+(1-5 seconds) → Action → Complete stillness → Action."
+
+Sophisticated anti-bot systems detect:
+- Perfect center clicks
+- No mouse movement during "thinking"
+- Consistent timing between actions
+- Lack of micro-movements and hesitation
+
+Recommended fix:
+
+## Add human-like variance to actions
+
+```python
+import random
+import time
+
+def humanized_click(x: int, y: int) -> tuple[int, int]:
+ """Add human-like variance to click coordinates."""
+ # Gaussian distribution around target
+ # Humans typically land within ~10px of target
+ x_offset = int(random.gauss(0, 5))
+ y_offset = int(random.gauss(0, 5))
+
+ return (x + x_offset, y + y_offset)
+
+def humanized_delay():
+ """Add human-like delay between actions."""
+ # Humans have variable reaction times
+ base_delay = random.uniform(0.3, 0.8)
+ # Occasionally longer pauses (reading, thinking)
+ if random.random() < 0.2:
+ base_delay += random.uniform(0.5, 2.0)
+ time.sleep(base_delay)
+
+def humanized_movement(from_pos: tuple, to_pos: tuple):
+ """Move mouse in curved path like human."""
+ # Bezier curve or similar
+ # Humans don't move in straight lines
+ steps = random.randint(10, 20)
+ for i in range(steps):
+ t = i / steps
+ # Simple curve approximation
+ x = from_pos[0] + (to_pos[0] - from_pos[0]) * t
+ y = from_pos[1] + (to_pos[1] - from_pos[1]) * t
+ # Add wobble
+ x += random.gauss(0, 2)
+ y += random.gauss(0, 2)
+ pyautogui.moveTo(int(x), int(y))
+ time.sleep(0.01)
```
-## ⚠️ Sharp Edges
+## Rotate user agents and fingerprints
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | critical | ## Defense in depth - no single solution works |
-| Issue | medium | ## Add human-like variance to actions |
-| Issue | high | ## Use keyboard alternatives when possible |
-| Issue | medium | ## Accept the tradeoff |
-| Issue | high | ## Implement context management |
-| Issue | high | ## Monitor and limit costs |
-| Issue | critical | ## ALWAYS use sandboxing |
+```python
+USER_AGENTS = [
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/120...",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) Safari/...",
+ # ... more realistic agents
+]
+
+await page.set_extra_http_headers({
+ "User-Agent": random.choice(USER_AGENTS)
+})
+```
+
+### Dropdowns, Scrollbars, and Drags Are Unreliable
+
+Severity: HIGH
+
+Situation: Agent interacting with complex UI elements
+
+Symptoms:
+Agent fails to select dropdown options. Scroll doesn't work as expected.
+Drag and drop completely fails. Hover menus disappear before clicking.
+
+Why this breaks:
+"Computer Use currently struggles with certain interface interactions,
+particularly scrolling, dragging, and zooming operations. Some UI elements
+(like dropdowns and scrollbars) might be tricky for Claude to manipulate."
+- Anthropic documentation
+
+Why these are hard:
+1. Dropdowns: Options appear after click, need second click to select
+2. Scrollbars: Small targets, need precise positioning
+3. Drag: Requires coordinated mouse down, move, mouse up
+4. Hover menus: Disappear when mouse moves away
+5. Canvas elements: No semantic information visible
+
+Vision models see pixels, not DOM structure. They don't "know" that
+a dropdown is a dropdown - they have to infer from visual cues.
+
+Recommended fix:
+
+## Use keyboard alternatives when possible
+
+```python
+# Instead of clicking dropdown, use keyboard
+async def select_dropdown_option(page, dropdown_selector, option_text):
+ # Focus the dropdown
+ await page.click(dropdown_selector)
+ await asyncio.sleep(0.3)
+
+ # Use keyboard to find option
+ await page.keyboard.type(option_text[:3]) # Type first letters
+ await asyncio.sleep(0.2)
+ await page.keyboard.press("Enter")
+```
+
+## Break complex actions into steps
+
+```python
+# Instead of drag-and-drop
+async def reliable_drag(page, source, target):
+ # Step 1: Click and hold
+ await page.mouse.move(source["x"], source["y"])
+ await page.mouse.down()
+ await asyncio.sleep(0.2)
+
+ # Step 2: Move in steps
+ steps = 10
+ for i in range(steps):
+ x = source["x"] + (target["x"] - source["x"]) * i / steps
+ y = source["y"] + (target["y"] - source["y"]) * i / steps
+ await page.mouse.move(x, y)
+ await asyncio.sleep(0.05)
+
+ # Step 3: Release
+ await page.mouse.move(target["x"], target["y"])
+ await asyncio.sleep(0.1)
+ await page.mouse.up()
+```
+
+## Fall back to DOM access for web
+
+```python
+# If vision fails, try direct DOM manipulation
+async def robust_select(page, select_selector, value):
+ try:
+ # Try vision approach first
+ await vision_agent.select(select_selector, value)
+ except Exception:
+ # Fall back to direct DOM
+ await page.select_option(select_selector, value=value)
+```
+
+## Add verification after action
+
+```python
+async def verified_scroll(page, direction):
+ # Get current scroll position
+ before = await page.evaluate("window.scrollY")
+
+ # Attempt scroll
+ await page.mouse.wheel(0, 500 if direction == "down" else -500)
+ await asyncio.sleep(0.3)
+
+ # Verify it worked
+ after = await page.evaluate("window.scrollY")
+ if before == after:
+ # Try alternative method
+ await page.keyboard.press("PageDown" if direction == "down" else "PageUp")
+```
+
+### Agents Are 2-5x Slower Than Humans
+
+Severity: MEDIUM
+
+Situation: Automating any computer task
+
+Symptoms:
+Task that takes human 1 minute takes agent 3-5 minutes.
+Users complain about speed. Timeouts occur.
+
+Why this breaks:
+"The technology can be slow compared to human operators, often requiring
+multiple screenshots and analysis cycles."
+
+Why so slow:
+1. Screenshot capture: 100-500ms
+2. Vision model inference: 1-5 seconds per screenshot
+3. Action execution: 200-500ms
+4. Wait for UI update: 500-1000ms
+5. Total per action: 2-7 seconds
+
+A task requiring 20 actions takes 40-140 seconds minimum.
+Humans do the same actions in 20-30 seconds.
+
+Recommended fix:
+
+## Accept the tradeoff
+
+Computer use is for:
+- Tasks humans don't want to do (repetitive)
+- Tasks that can run in background
+- Tasks where accuracy > speed
+
+## Optimize where possible
+
+```python
+# 1. Reduce screenshot resolution
+SCREEN_SIZE = (1280, 800) # Not 4K
+
+# 2. Batch similar actions
+# Instead of: type "hello", wait, type " world"
+await page.type("hello world")
+
+# 3. Parallelize independent tasks
+# Run multiple sandboxed agents concurrently
+
+# 4. Cache repeated computations
+# If same screenshot, reuse analysis
+
+# 5. Use smaller models for simple decisions
+simple_model = "claude-haiku-..." # For "is task done?"
+complex_model = "claude-sonnet-..." # For complex reasoning
+```
+
+## Set realistic expectations
+
+```python
+# Estimate task duration
+def estimate_duration(task_complexity: str) -> int:
+ """Estimate task duration in seconds."""
+ estimates = {
+ "simple": 30, # Single page, few actions
+ "medium": 120, # Multi-page, moderate actions
+ "complex": 300, # Many pages, complex interactions
+ }
+ return estimates.get(task_complexity, 120)
+
+# Inform users
+estimated = estimate_duration("medium")
+print(f"Estimated completion: {estimated // 60}m {estimated % 60}s")
+```
+
+### Screenshots Fill Up Context Window Fast
+
+Severity: HIGH
+
+Situation: Long-running computer use tasks
+
+Symptoms:
+Agent forgets earlier steps. Starts repeating actions.
+Errors increase as task progresses. Costs explode.
+
+Why this breaks:
+Each screenshot is ~1500-3000 tokens. A task with 30 screenshots
+uses 45,000-90,000 tokens just for images - before any text.
+
+Claude's context window is finite. When full:
+- Older context gets dropped
+- Agent loses memory of earlier steps
+- Task coherence decreases
+
+"Getting agents to make consistent progress across multiple context
+windows remains an open problem. The core challenge is that they must
+work in discrete sessions, and each new session begins with no memory
+of what came before." - Anthropic engineering blog
+
+Recommended fix:
+
+## Implement context management
+
+```python
+class ContextManager:
+ """Manage context window usage for computer use."""
+
+ MAX_SCREENSHOTS = 10 # Keep only recent screenshots
+ MAX_TOKENS = 100000
+
+ def __init__(self):
+ self.messages = []
+ self.screenshot_count = 0
+
+ def add_screenshot(self, screenshot_b64: str, description: str):
+ """Add screenshot with automatic pruning."""
+ self.screenshot_count += 1
+
+ # Keep only recent screenshots
+ if self.screenshot_count > self.MAX_SCREENSHOTS:
+ self._prune_old_screenshots()
+
+ # Store with description for context
+ self.messages.append({
+ "role": "user",
+ "content": [
+ {"type": "text", "text": description},
+ {"type": "image", "source": {...}}
+ ]
+ })
+
+ def _prune_old_screenshots(self):
+ """Remove old screenshots, keep text summaries."""
+ new_messages = []
+ screenshots_kept = 0
+
+ for msg in reversed(self.messages):
+ if self._has_image(msg):
+ if screenshots_kept < self.MAX_SCREENSHOTS:
+ new_messages.insert(0, msg)
+ screenshots_kept += 1
+ else:
+ # Convert to text summary
+ summary = self._summarize_screenshot(msg)
+ new_messages.insert(0, {
+ "role": msg["role"],
+ "content": summary
+ })
+ else:
+ new_messages.insert(0, msg)
+
+ self.messages = new_messages
+
+ def _summarize_screenshot(self, msg) -> str:
+ """Summarize screenshot to text."""
+ # Extract any text description
+ for content in msg.get("content", []):
+ if content.get("type") == "text":
+ return f"[Previous screenshot: {content['text']}]"
+ return "[Previous screenshot - details pruned]"
+
+ def add_checkpoint(self):
+ """Create a checkpoint summary."""
+ summary = self._create_progress_summary()
+ self.messages.append({
+ "role": "user",
+ "content": f"CHECKPOINT: {summary}"
+ })
+```
+
+## Use checkpointing for long tasks
+
+```python
+async def run_with_checkpoints(task: str, checkpoint_every: int = 10):
+ """Run task with periodic checkpoints."""
+ context = ContextManager()
+ step = 0
+
+ while not task_complete:
+ step += 1
+
+ # Take action...
+
+ if step % checkpoint_every == 0:
+ # Create checkpoint
+ context.add_checkpoint()
+
+ # Optional: persist to disk
+ save_checkpoint(context, step)
+```
+
+## Break into subtasks
+
+```python
+# Instead of one 50-step task:
+subtasks = [
+ "Navigate to the website and login",
+ "Find the settings page",
+ "Update the email address to ...",
+ "Save and verify the change"
+]
+
+for subtask in subtasks:
+ result = await agent.run(subtask)
+ if not result["success"]:
+ handle_error(subtask, result)
+ break
+```
+
+### Costs Can Explode Quickly
+
+Severity: HIGH
+
+Situation: Running computer use at scale
+
+Symptoms:
+API bill is 10x higher than expected. Single task costs $5+ instead of $0.50.
+Monthly costs reach thousands of dollars quickly.
+
+Why this breaks:
+Vision tokens are expensive. Each screenshot:
+- ~2000-3000 tokens per image
+- At $10/million tokens, that's $0.02-0.03 per screenshot
+- Task with 30 screenshots = $0.60-0.90 just for images
+
+But it compounds:
+- Screenshots accumulate in context
+- Model sees ALL previous screenshots each turn
+- Turn 10 processes 10 screenshots = $0.20-0.30
+- Turn 20 processes 20 screenshots = $0.40-0.60
+- Quadratic growth!
+
+Complex task: 50 turns × average 25 images in context = 1250 image tokens
+Plus text = could easily hit $5-10 per task.
+
+Recommended fix:
+
+## Monitor and limit costs
+
+```python
+class CostTracker:
+ """Track and limit computer use costs."""
+
+ # Anthropic pricing (approximate)
+ INPUT_COST_PER_1K = 0.003 # Text
+ OUTPUT_COST_PER_1K = 0.015
+ IMAGE_COST_PER_1K = 0.01 # Roughly
+
+ def __init__(self, max_cost_per_task: float = 1.0):
+ self.max_cost = max_cost_per_task
+ self.current_cost = 0.0
+ self.total_tokens = 0
+
+ def add_turn(
+ self,
+ input_tokens: int,
+ output_tokens: int,
+ image_tokens: int
+ ):
+ """Track cost of a single turn."""
+ cost = (
+ input_tokens / 1000 * self.INPUT_COST_PER_1K +
+ output_tokens / 1000 * self.OUTPUT_COST_PER_1K +
+ image_tokens / 1000 * self.IMAGE_COST_PER_1K
+ )
+ self.current_cost += cost
+ self.total_tokens += input_tokens + output_tokens + image_tokens
+
+ if self.current_cost > self.max_cost:
+ raise CostLimitExceeded(
+ f"Cost limit exceeded: ${self.current_cost:.2f} > ${self.max_cost:.2f}"
+ )
+
+ return cost
+
+class CostLimitExceeded(Exception):
+ pass
+
+# Usage
+tracker = CostTracker(max_cost_per_task=2.0)
+
+try:
+ for turn in turns:
+ tracker.add_turn(turn.input, turn.output, turn.images)
+except CostLimitExceeded:
+ print("Task aborted due to cost limit")
+```
+
+## Reduce image costs
+
+```python
+# 1. Lower resolution
+SCREEN_SIZE = (1024, 768) # Smaller = fewer tokens
+
+# 2. JPEG instead of PNG (when quality ok)
+screenshot.save(buffer, format="JPEG", quality=70)
+
+# 3. Crop to relevant region
+def crop_relevant(screenshot: Image, focus_area: tuple):
+ """Crop to area of interest."""
+ return screenshot.crop(focus_area)
+
+# 4. Don't include screenshot every turn
+if not needs_visual_update:
+ # Text-only turn
+ messages.append({"role": "user", "content": "Continue..."})
+```
+
+## Use cheaper models strategically
+
+```python
+async def tiered_model_selection(task_complexity: str):
+ """Use appropriate model for task."""
+ if task_complexity == "simple":
+ return "claude-haiku-..." # Cheapest
+ elif task_complexity == "medium":
+ return "claude-sonnet-4-20250514" # Balanced
+ else:
+ return "claude-opus-4-5-..." # Best but expensive
+```
+
+### Running Agent on Your Actual Computer
+
+Severity: CRITICAL
+
+Situation: Testing or deploying computer use
+
+Symptoms:
+Agent deletes important files. Sends emails from your account.
+Posts on social media. Accesses sensitive documents.
+
+Why this breaks:
+Computer use agents make mistakes. They can:
+- Misinterpret instructions
+- Click wrong buttons
+- Type in wrong fields
+- Follow prompt injection attacks
+
+Without sandboxing, these mistakes happen on your real system.
+There's no undo for "agent sent email to all contacts" or
+"agent deleted project folder."
+
+"Autonomous agents that can access external systems and APIs
+introduce new security risks. They may be vulnerable to prompt
+injection attacks, unauthorized access to sensitive data, or
+manipulation by malicious actors."
+
+Recommended fix:
+
+## ALWAYS use sandboxing
+
+```python
+# Minimum viable sandbox: Docker with restrictions
+
+docker run -it --rm \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --network none \
+ --read-only \
+ --tmpfs /tmp \
+ --memory 2g \
+ --cpus 1 \
+ computer-use-sandbox
+```
+
+## Layer your defenses
+
+```python
+# Defense 1: Docker isolation
+# Defense 2: Non-root user
+# Defense 3: Network restrictions
+# Defense 4: Filesystem restrictions
+# Defense 5: Resource limits
+# Defense 6: Action confirmation
+# Defense 7: Action logging
+
+@dataclass
+class SandboxConfig:
+ docker_image: str = "computer-use-sandbox:latest"
+ network: str = "none" # or specific allowlist
+ readonly_root: bool = True
+ max_memory_mb: int = 2048
+ max_cpu: float = 1.0
+ max_runtime_seconds: int = 300
+ require_confirmation: list = field(default_factory=lambda: [
+ "download", "submit", "login", "delete"
+ ])
+ log_all_actions: bool = True
+```
+
+## Test in isolated environment first
+
+```python
+class SandboxedTestRunner:
+ """Run tests in throwaway containers."""
+
+ async def run_test(self, test_task: str) -> dict:
+ # Spin up fresh container
+ container_id = await self.create_container()
+
+ try:
+ # Run task
+ result = await self.execute_in_container(container_id, test_task)
+
+ # Capture state for verification
+ state = await self.capture_container_state(container_id)
+
+ return {
+ "result": result,
+ "final_state": state,
+ "logs": await self.get_logs(container_id)
+ }
+ finally:
+ # Always destroy container
+ await self.destroy_container(container_id)
+```
+
+## Validation Checks
+
+### Computer Use Without Sandbox
+
+Severity: ERROR
+
+Computer use agents MUST run in sandboxed environments
+
+Message: Computer use without sandboxing detected. Use Docker containers with restrictions.
+
+### Sandbox With Full Network Access
+
+Severity: ERROR
+
+Sandboxed agents should have restricted network access
+
+Message: Sandbox has full network access. Use --network=none or specific allowlist.
+
+### Running as Root in Container
+
+Severity: ERROR
+
+Container agents should run as non-root user
+
+Message: Container running as root. Add --user flag or USER directive in Dockerfile.
+
+### Container Without Capability Drops
+
+Severity: WARNING
+
+Containers should drop unnecessary capabilities
+
+Message: Container has full capabilities. Add --cap-drop ALL.
+
+### Container Without Seccomp Profile
+
+Severity: WARNING
+
+Containers should use seccomp profiles for syscall filtering
+
+Message: No security options set. Consider --security-opt seccomp:profile.json
+
+### No Maximum Step Limit
+
+Severity: WARNING
+
+Computer use loops should have maximum step limits
+
+Message: Infinite loop risk. Add max_steps limit (recommended: 50).
+
+### No Execution Timeout
+
+Severity: WARNING
+
+Computer use should have timeout limits
+
+Message: No timeout on execution. Add timeout (recommended: 5-10 minutes).
+
+### Container Without Memory Limit
+
+Severity: WARNING
+
+Containers should have memory limits to prevent DoS
+
+Message: No memory limit on container. Add --memory 2g or similar.
+
+### No Cost Tracking
+
+Severity: WARNING
+
+Computer use should track API costs
+
+Message: No cost tracking. Monitor token usage to prevent bill surprises.
+
+### No Maximum Cost Limit
+
+Severity: INFO
+
+Consider adding cost limits per task
+
+Message: Consider adding max_cost_per_task to prevent expensive runaway tasks.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs web-only automation -> browser-automation (Playwright/Selenium more efficient for web)
+- user needs security review -> security-specialist (Review sandboxing, prompt injection defenses)
+- user needs container orchestration -> devops (Kubernetes, Docker Swarm for scaling)
+- user needs vision model optimization -> llm-architect (Model selection, prompt engineering)
+- user needs multi-agent coordination -> multi-agent-orchestration (Multiple computer use agents working together)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: computer use
+- User mentions or implies: desktop automation agent
+- User mentions or implies: screen control AI
+- User mentions or implies: vision-based agent
+- User mentions or implies: GUI automation
+- User mentions or implies: Claude computer
+- User mentions or implies: OpenAI Operator
+- User mentions or implies: browser agent
+- User mentions or implies: visual agent
+- User mentions or implies: RPA with AI
diff --git a/plugins/antigravity-awesome-skills-claude/skills/context-window-management/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/context-window-management/SKILL.md
index fa4717dd..e42fe233 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/context-window-management/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/context-window-management/SKILL.md
@@ -1,23 +1,15 @@
---
name: context-window-management
-description: "You're a context engineering specialist who has optimized LLM applications handling millions of conversations. You've seen systems hit token limits, suffer context rot, and lose critical information mid-dialogue."
+description: Strategies for managing LLM context windows including
+ summarization, trimming, routing, and avoiding context rot
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Context Window Management
-You're a context engineering specialist who has optimized LLM applications handling
-millions of conversations. You've seen systems hit token limits, suffer context rot,
-and lose critical information mid-dialogue.
-
-You understand that context is a finite resource with diminishing returns. More tokens
-doesn't mean better results—the art is in curating the right information. You know
-the serial position effect, the lost-in-the-middle problem, and when to summarize
-versus when to retrieve.
-
-Your cor
+Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot
## Capabilities
@@ -28,31 +20,292 @@ Your cor
- token-counting
- context-prioritization
+## Prerequisites
+
+- Knowledge: LLM fundamentals, Tokenization basics, Prompt engineering
+- Skills_recommended: prompt-engineering
+
+## Scope
+
+- Does_not_cover: RAG implementation details, Model fine-tuning, Embedding models
+- Boundaries: Focus is context optimization, Covers strategies not specific implementations
+
+## Ecosystem
+
+### Primary_tools
+
+- tiktoken - OpenAI's tokenizer for counting tokens
+- LangChain - Framework with context management utilities
+- Claude API - 200K+ context with caching support
+
## Patterns
### Tiered Context Strategy
Different strategies based on context size
+**When to use**: Building any multi-turn conversation system
+
+interface ContextTier {
+ maxTokens: number;
+ strategy: 'full' | 'summarize' | 'rag';
+ model: string;
+}
+
+const TIERS: ContextTier[] = [
+ { maxTokens: 8000, strategy: 'full', model: 'claude-3-haiku' },
+ { maxTokens: 32000, strategy: 'full', model: 'claude-3-5-sonnet' },
+ { maxTokens: 100000, strategy: 'summarize', model: 'claude-3-5-sonnet' },
+ { maxTokens: Infinity, strategy: 'rag', model: 'claude-3-5-sonnet' }
+];
+
+async function selectStrategy(messages: Message[]): ContextTier {
+ const tokens = await countTokens(messages);
+
+ for (const tier of TIERS) {
+ if (tokens <= tier.maxTokens) {
+ return tier;
+ }
+ }
+ return TIERS[TIERS.length - 1];
+}
+
+async function prepareContext(messages: Message[]): PreparedContext {
+ const tier = await selectStrategy(messages);
+
+ switch (tier.strategy) {
+ case 'full':
+ return { messages, model: tier.model };
+
+ case 'summarize':
+ const summary = await summarizeOldMessages(messages);
+ return { messages: [summary, ...recentMessages(messages)], model: tier.model };
+
+ case 'rag':
+ const relevant = await retrieveRelevant(messages);
+ return { messages: [...relevant, ...recentMessages(messages)], model: tier.model };
+ }
+}
+
### Serial Position Optimization
Place important content at start and end
+**When to use**: Constructing prompts with significant context
+
+// LLMs weight beginning and end more heavily
+// Structure prompts to leverage this
+
+function buildOptimalPrompt(components: {
+ systemPrompt: string;
+ criticalContext: string;
+ conversationHistory: Message[];
+ currentQuery: string;
+}): string {
+ // START: System instructions (always first)
+ const parts = [components.systemPrompt];
+
+ // CRITICAL CONTEXT: Right after system (high primacy)
+ if (components.criticalContext) {
+ parts.push(`## Key Context\n${components.criticalContext}`);
+ }
+
+ // MIDDLE: Conversation history (lower weight)
+ // Summarize if long, keep recent messages full
+ const history = components.conversationHistory;
+ if (history.length > 10) {
+ const oldSummary = summarize(history.slice(0, -5));
+ const recent = history.slice(-5);
+ parts.push(`## Earlier Conversation (Summary)\n${oldSummary}`);
+ parts.push(`## Recent Messages\n${formatMessages(recent)}`);
+ } else {
+ parts.push(`## Conversation\n${formatMessages(history)}`);
+ }
+
+ // END: Current query (high recency)
+ // Restate critical requirements here
+ parts.push(`## Current Request\n${components.currentQuery}`);
+
+ // FINAL: Reminder of key constraints
+ parts.push(`Remember: ${extractKeyConstraints(components.systemPrompt)}`);
+
+ return parts.join('\n\n');
+}
+
### Intelligent Summarization
Summarize by importance, not just recency
-## Anti-Patterns
+**When to use**: Context exceeds optimal size
-### ❌ Naive Truncation
+interface MessageWithMetadata extends Message {
+ importance: number; // 0-1 score
+ hasCriticalInfo: boolean; // User preferences, decisions
+ referenced: boolean; // Was this referenced later?
+}
-### ❌ Ignoring Token Costs
+async function smartSummarize(
+ messages: MessageWithMetadata[],
+ targetTokens: number
+): Message[] {
+ // Sort by importance, preserve order for tied scores
+ const sorted = [...messages].sort((a, b) =>
+ (b.importance + (b.hasCriticalInfo ? 0.5 : 0) + (b.referenced ? 0.3 : 0)) -
+ (a.importance + (a.hasCriticalInfo ? 0.5 : 0) + (a.referenced ? 0.3 : 0))
+ );
-### ❌ One-Size-Fits-All
+ const keep: Message[] = [];
+ const summarizePool: Message[] = [];
+ let currentTokens = 0;
+
+ for (const msg of sorted) {
+ const msgTokens = await countTokens([msg]);
+ if (currentTokens + msgTokens < targetTokens * 0.7) {
+ keep.push(msg);
+ currentTokens += msgTokens;
+ } else {
+ summarizePool.push(msg);
+ }
+ }
+
+ // Summarize the low-importance messages
+ if (summarizePool.length > 0) {
+ const summary = await llm.complete(`
+ Summarize these messages, preserving:
+ - Any user preferences or decisions
+ - Key facts that might be referenced later
+ - The overall flow of conversation
+
+ Messages:
+ ${formatMessages(summarizePool)}
+ `);
+
+ keep.unshift({ role: 'system', content: `[Earlier context: ${summary}]` });
+ }
+
+ // Restore original order
+ return keep.sort((a, b) => a.timestamp - b.timestamp);
+}
+
+### Token Budget Allocation
+
+Allocate token budget across context components
+
+**When to use**: Need predictable context management
+
+interface TokenBudget {
+ system: number; // System prompt
+ criticalContext: number; // User prefs, key info
+ history: number; // Conversation history
+ query: number; // Current query
+ response: number; // Reserved for response
+}
+
+function allocateBudget(totalTokens: number): TokenBudget {
+ return {
+ system: Math.floor(totalTokens * 0.10), // 10%
+ criticalContext: Math.floor(totalTokens * 0.15), // 15%
+ history: Math.floor(totalTokens * 0.40), // 40%
+ query: Math.floor(totalTokens * 0.10), // 10%
+ response: Math.floor(totalTokens * 0.25), // 25%
+ };
+}
+
+async function buildWithBudget(
+ components: ContextComponents,
+ modelMaxTokens: number
+): PreparedContext {
+ const budget = allocateBudget(modelMaxTokens);
+
+ // Truncate/summarize each component to fit budget
+ const prepared = {
+ system: truncateToTokens(components.system, budget.system),
+ criticalContext: truncateToTokens(
+ components.criticalContext, budget.criticalContext
+ ),
+ history: await summarizeToTokens(components.history, budget.history),
+ query: truncateToTokens(components.query, budget.query),
+ };
+
+ // Reallocate unused budget
+ const used = await countTokens(Object.values(prepared).join('\n'));
+ const remaining = modelMaxTokens - used - budget.response;
+
+ if (remaining > 0) {
+ // Give extra to history (most valuable for conversation)
+ prepared.history = await summarizeToTokens(
+ components.history,
+ budget.history + remaining
+ );
+ }
+
+ return prepared;
+}
+
+## Validation Checks
+
+### No Token Counting
+
+Severity: WARNING
+
+Message: Building context without token counting. May exceed model limits.
+
+Fix action: Count tokens before sending, implement budget allocation
+
+### Naive Message Truncation
+
+Severity: WARNING
+
+Message: Truncating messages without summarization. Critical context may be lost.
+
+Fix action: Summarize old messages instead of simply removing them
+
+### Hardcoded Token Limit
+
+Severity: INFO
+
+Message: Hardcoded token limit. Consider making configurable per model.
+
+Fix action: Use model-specific limits from configuration
+
+### No Context Management Strategy
+
+Severity: WARNING
+
+Message: LLM calls without context management strategy.
+
+Fix action: Implement context management: budgets, summarization, or RAG
+
+## Collaboration
+
+### Delegation Triggers
+
+- retrieval|rag|search -> rag-implementation (Need retrieval system)
+- memory|persistence|remember -> conversation-memory (Need memory storage)
+- cache|caching -> prompt-caching (Need caching optimization)
+
+### Complete Context System
+
+Skills: context-window-management, rag-implementation, conversation-memory, prompt-caching
+
+Workflow:
+
+```
+1. Design context strategy
+2. Implement RAG for large corpuses
+3. Set up memory persistence
+4. Add caching for performance
+```
## Related Skills
Works well with: `rag-implementation`, `conversation-memory`, `prompt-caching`, `llm-npc-dialogue`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: context window
+- User mentions or implies: token limit
+- User mentions or implies: context management
+- User mentions or implies: context engineering
+- User mentions or implies: long context
+- User mentions or implies: context overflow
diff --git a/plugins/antigravity-awesome-skills-claude/skills/conversation-memory/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/conversation-memory/SKILL.md
index 3a57f20b..e081bdf7 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/conversation-memory/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/conversation-memory/SKILL.md
@@ -1,23 +1,15 @@
---
name: conversation-memory
-description: "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory persistence, long-term memory, chat history."
+description: Persistent memory systems for LLM conversations including
+ short-term, long-term, and entity-based memory
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Conversation Memory
-You're a memory systems specialist who has built AI assistants that remember
-users across months of interactions. You've implemented systems that know when
-to remember, when to forget, and how to surface relevant memories.
-
-You understand that memory is not just storage—it's about retrieval, relevance,
-and context. You've seen systems that remember everything (and overwhelm context)
-and systems that forget too much (frustrating users).
-
-Your core principles:
-1. Memory types differ—short-term, lo
+Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory
## Capabilities
@@ -28,39 +20,476 @@ Your core principles:
- memory-retrieval
- memory-consolidation
+## Prerequisites
+
+- Knowledge: LLM conversation patterns, Database basics, Key-value stores
+- Skills_recommended: context-window-management, rag-implementation
+
+## Scope
+
+- Does_not_cover: Knowledge graph construction, Semantic search implementation, Database administration
+- Boundaries: Focus is memory patterns for LLMs, Covers storage and retrieval strategies
+
+## Ecosystem
+
+### Primary_tools
+
+- Mem0 - Memory layer for AI applications
+- LangChain Memory - Memory utilities in LangChain
+- Redis - In-memory data store for session memory
+
## Patterns
### Tiered Memory System
Different memory tiers for different purposes
+**When to use**: Building any conversational AI
+
+interface MemorySystem {
+ // Buffer: Current conversation (in context)
+ buffer: ConversationBuffer;
+
+ // Short-term: Recent interactions (session)
+ shortTerm: ShortTermMemory;
+
+ // Long-term: Persistent across sessions
+ longTerm: LongTermMemory;
+
+ // Entity: Facts about people, places, things
+ entity: EntityMemory;
+}
+
+class TieredMemory implements MemorySystem {
+ async addMessage(message: Message): Promise {
+ // Always add to buffer
+ this.buffer.add(message);
+
+ // Extract entities
+ const entities = await extractEntities(message);
+ for (const entity of entities) {
+ await this.entity.upsert(entity);
+ }
+
+ // Check for memorable content
+ if (await isMemoryWorthy(message)) {
+ await this.shortTerm.add({
+ content: message.content,
+ timestamp: Date.now(),
+ importance: await scoreImportance(message)
+ });
+ }
+ }
+
+ async consolidate(): Promise {
+ // Move important short-term to long-term
+ const memories = await this.shortTerm.getOld(24 * 60 * 60 * 1000);
+ for (const memory of memories) {
+ if (memory.importance > 0.7 || memory.referenced > 2) {
+ await this.longTerm.add(memory);
+ }
+ await this.shortTerm.remove(memory.id);
+ }
+ }
+
+ async buildContext(query: string): Promise {
+ const parts: string[] = [];
+
+ // Relevant long-term memories
+ const longTermRelevant = await this.longTerm.search(query, 3);
+ if (longTermRelevant.length) {
+ parts.push('## Relevant Memories\n' +
+ longTermRelevant.map(m => `- ${m.content}`).join('\n'));
+ }
+
+ // Relevant entities
+ const entities = await this.entity.getRelevant(query);
+ if (entities.length) {
+ parts.push('## Known Entities\n' +
+ entities.map(e => `- ${e.name}: ${e.facts.join(', ')}`).join('\n'));
+ }
+
+ // Recent conversation
+ const recent = this.buffer.getRecent(10);
+ parts.push('## Recent Conversation\n' + formatMessages(recent));
+
+ return parts.join('\n\n');
+ }
+}
+
### Entity Memory
Store and update facts about entities
+**When to use**: Need to remember details about people, places, things
+
+interface Entity {
+ id: string;
+ name: string;
+ type: 'person' | 'place' | 'thing' | 'concept';
+ facts: Fact[];
+ lastMentioned: number;
+ mentionCount: number;
+}
+
+interface Fact {
+ content: string;
+ confidence: number;
+ source: string; // Which message this came from
+ timestamp: number;
+}
+
+class EntityMemory {
+ async extractAndStore(message: Message): Promise {
+ // Use LLM to extract entities and facts
+ const extraction = await llm.complete(`
+ Extract entities and facts from this message.
+ Return JSON: { "entities": [
+ { "name": "...", "type": "...", "facts": ["..."] }
+ ]}
+
+ Message: "${message.content}"
+ `);
+
+ const { entities } = JSON.parse(extraction);
+ for (const entity of entities) {
+ await this.upsert(entity, message.id);
+ }
+ }
+
+ async upsert(entity: ExtractedEntity, sourceId: string): Promise {
+ const existing = await this.store.get(entity.name.toLowerCase());
+
+ if (existing) {
+ // Merge facts, avoiding duplicates
+ for (const fact of entity.facts) {
+ if (!this.hasSimilarFact(existing.facts, fact)) {
+ existing.facts.push({
+ content: fact,
+ confidence: 0.9,
+ source: sourceId,
+ timestamp: Date.now()
+ });
+ }
+ }
+ existing.lastMentioned = Date.now();
+ existing.mentionCount++;
+ await this.store.set(existing.id, existing);
+ } else {
+ // Create new entity
+ await this.store.set(entity.name.toLowerCase(), {
+ id: generateId(),
+ name: entity.name,
+ type: entity.type,
+ facts: entity.facts.map(f => ({
+ content: f,
+ confidence: 0.9,
+ source: sourceId,
+ timestamp: Date.now()
+ })),
+ lastMentioned: Date.now(),
+ mentionCount: 1
+ });
+ }
+ }
+}
+
### Memory-Aware Prompting
Include relevant memories in prompts
-## Anti-Patterns
+**When to use**: Making LLM calls with memory context
-### ❌ Remember Everything
+async function promptWithMemory(
+ query: string,
+ memory: MemorySystem,
+ systemPrompt: string
+): Promise {
+ // Retrieve relevant memories
+ const relevantMemories = await memory.longTerm.search(query, 5);
+ const entities = await memory.entity.getRelevant(query);
+ const recentContext = memory.buffer.getRecent(5);
-### ❌ No Memory Retrieval
+ // Build memory-augmented prompt
+ const prompt = `
+${systemPrompt}
-### ❌ Single Memory Store
+## User Context
+${entities.length ? `Known about user:\n${entities.map(e =>
+ `- ${e.name}: ${e.facts.map(f => f.content).join('; ')}`
+).join('\n')}` : ''}
-## ⚠️ Sharp Edges
+${relevantMemories.length ? `Relevant past interactions:\n${relevantMemories.map(m =>
+ `- [${formatDate(m.timestamp)}] ${m.content}`
+).join('\n')}` : ''}
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Memory store grows unbounded, system slows | high | // Implement memory lifecycle management |
-| Retrieved memories not relevant to current query | high | // Intelligent memory retrieval |
-| Memories from one user accessible to another | critical | // Strict user isolation in memory |
+## Recent Conversation
+${formatMessages(recentContext)}
+
+## Current Query
+${query}
+ `.trim();
+
+ const response = await llm.complete(prompt);
+
+ // Extract any new memories from response
+ await memory.addMessage({ role: 'assistant', content: response });
+
+ return response;
+}
+
+## Sharp Edges
+
+### Memory store grows unbounded, system slows
+
+Severity: HIGH
+
+Situation: System slows over time, costs increase
+
+Symptoms:
+- Slow memory retrieval
+- High storage costs
+- Increasing latency over time
+
+Why this breaks:
+Every message stored as memory.
+No cleanup or consolidation.
+Retrieval over millions of items.
+
+Recommended fix:
+
+// Implement memory lifecycle management
+
+class ManagedMemory {
+ // Limits
+ private readonly SHORT_TERM_MAX = 100;
+ private readonly LONG_TERM_MAX = 10000;
+ private readonly CONSOLIDATION_INTERVAL = 24 * 60 * 60 * 1000;
+
+ async add(memory: Memory): Promise {
+ // Score importance before storing
+ const score = await this.scoreImportance(memory);
+ if (score < 0.3) return; // Don't store low-importance
+
+ memory.importance = score;
+ await this.shortTerm.add(memory);
+
+ // Check limits
+ await this.enforceShortTermLimit();
+ }
+
+ async enforceShortTermLimit(): Promise {
+ const count = await this.shortTerm.count();
+ if (count > this.SHORT_TERM_MAX) {
+ // Consolidate: move important to long-term, delete rest
+ const memories = await this.shortTerm.getAll();
+ memories.sort((a, b) => b.importance - a.importance);
+
+ const toKeep = memories.slice(0, this.SHORT_TERM_MAX * 0.7);
+ const toConsolidate = memories.slice(this.SHORT_TERM_MAX * 0.7);
+
+ for (const m of toConsolidate) {
+ if (m.importance > 0.7) {
+ await this.longTerm.add(m);
+ }
+ await this.shortTerm.remove(m.id);
+ }
+ }
+ }
+
+ async scoreImportance(memory: Memory): Promise {
+ const factors = {
+ hasUserPreference: /prefer|like|don't like|hate|love/i.test(memory.content) ? 0.3 : 0,
+ hasDecision: /decided|chose|will do|won't do/i.test(memory.content) ? 0.3 : 0,
+ hasFactAboutUser: /my|I am|I have|I work/i.test(memory.content) ? 0.2 : 0,
+ length: memory.content.length > 100 ? 0.1 : 0,
+ userMessage: memory.role === 'user' ? 0.1 : 0,
+ };
+
+ return Object.values(factors).reduce((a, b) => a + b, 0);
+ }
+}
+
+### Retrieved memories not relevant to current query
+
+Severity: HIGH
+
+Situation: Memories included in context but don't help
+
+Symptoms:
+- Memories in context seem random
+- User asks about things already in memory
+- Confusion from irrelevant context
+
+Why this breaks:
+Simple keyword matching.
+No relevance scoring.
+Including all retrieved memories.
+
+Recommended fix:
+
+// Intelligent memory retrieval
+
+async function retrieveRelevant(
+ query: string,
+ memories: MemoryStore,
+ maxResults: number = 5
+): Promise {
+ // 1. Semantic search
+ const candidates = await memories.semanticSearch(query, maxResults * 3);
+
+ // 2. Score relevance with context
+ const scored = await Promise.all(candidates.map(async (m) => {
+ const relevanceScore = await llm.complete(`
+ Rate 0-1 how relevant this memory is to the query.
+ Query: "${query}"
+ Memory: "${m.content}"
+ Return just the number.
+ `);
+ return { ...m, relevance: parseFloat(relevanceScore) };
+ }));
+
+ // 3. Filter low relevance
+ const relevant = scored.filter(m => m.relevance > 0.5);
+
+ // 4. Sort and limit
+ return relevant
+ .sort((a, b) => b.relevance - a.relevance)
+ .slice(0, maxResults);
+}
+
+### Memories from one user accessible to another
+
+Severity: CRITICAL
+
+Situation: User sees information from another user's sessions
+
+Symptoms:
+- User sees other user's information
+- Privacy complaints
+- Compliance violations
+
+Why this breaks:
+No user isolation in memory store.
+Shared memory namespace.
+Cross-user retrieval.
+
+Recommended fix:
+
+// Strict user isolation in memory
+
+class IsolatedMemory {
+ private getKey(userId: string, memoryId: string): string {
+ // Namespace all keys by user
+ return `user:${userId}:memory:${memoryId}`;
+ }
+
+ async add(userId: string, memory: Memory): Promise {
+ // Validate userId is authenticated
+ if (!isValidUserId(userId)) {
+ throw new Error('Invalid user ID');
+ }
+
+ const key = this.getKey(userId, memory.id);
+ memory.userId = userId; // Tag with user
+ await this.store.set(key, memory);
+ }
+
+ async search(userId: string, query: string): Promise {
+ // CRITICAL: Filter by user in query
+ return await this.store.search({
+ query,
+ filter: { userId: userId }, // Mandatory filter
+ limit: 10
+ });
+ }
+
+ async delete(userId: string, memoryId: string): Promise {
+ const memory = await this.get(userId, memoryId);
+ // Verify ownership before delete
+ if (memory.userId !== userId) {
+ throw new Error('Access denied');
+ }
+ await this.store.delete(this.getKey(userId, memoryId));
+ }
+
+ // User data export (GDPR compliance)
+ async exportUserData(userId: string): Promise {
+ return await this.store.getAll({ userId });
+ }
+
+ // User data deletion (GDPR compliance)
+ async deleteUserData(userId: string): Promise {
+ const memories = await this.exportUserData(userId);
+ for (const m of memories) {
+ await this.store.delete(this.getKey(userId, m.id));
+ }
+ }
+}
+
+## Validation Checks
+
+### No User Isolation in Memory
+
+Severity: CRITICAL
+
+Message: Memory operations without user isolation. Privacy vulnerability.
+
+Fix action: Add userId to all memory operations, filter by user on retrieval
+
+### No Importance Filtering
+
+Severity: WARNING
+
+Message: Storing memories without importance filtering. May cause memory explosion.
+
+Fix action: Score importance before storing, filter low-importance content
+
+### Memory Storage Without Retrieval
+
+Severity: WARNING
+
+Message: Storing memories but no retrieval logic. Memories won't be used.
+
+Fix action: Implement memory retrieval and include in prompts
+
+### No Memory Cleanup
+
+Severity: INFO
+
+Message: No memory cleanup mechanism. Storage will grow unbounded.
+
+Fix action: Implement consolidation and cleanup based on age/importance
+
+## Collaboration
+
+### Delegation Triggers
+
+- context window|token -> context-window-management (Need context optimization)
+- rag|retrieval|vector -> rag-implementation (Need retrieval system)
+- cache|caching -> prompt-caching (Need caching strategies)
+
+### Complete Memory System
+
+Skills: conversation-memory, context-window-management, rag-implementation
+
+Workflow:
+
+```
+1. Design memory tiers
+2. Implement storage and retrieval
+3. Integrate with context management
+4. Add consolidation and cleanup
+```
## Related Skills
Works well with: `context-window-management`, `rag-implementation`, `prompt-caching`, `llm-npc-dialogue`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: conversation memory
+- User mentions or implies: remember
+- User mentions or implies: memory persistence
+- User mentions or implies: long-term memory
+- User mentions or implies: chat history
diff --git a/plugins/antigravity-awesome-skills-claude/skills/crewai/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/crewai/SKILL.md
index 0fa51972..9e3acada 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/crewai/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/crewai/SKILL.md
@@ -1,13 +1,19 @@
---
name: crewai
-description: "You are an expert in designing collaborative AI agent teams with CrewAI. You think in terms of roles, responsibilities, and delegation. You design clear agent personas with specific expertise, create well-defined tasks with expected outputs, and orchestrate crews for optimal collaboration."
+description: Expert in CrewAI - the leading role-based multi-agent framework
+ used by 60% of Fortune 500 companies.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# CrewAI
+Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500
+companies. Covers agent design with roles and goals, task definition, crew orchestration,
+process types (sequential, hierarchical, parallel), memory systems, and flows for complex
+workflows. Essential for building collaborative AI agent teams.
+
**Role**: CrewAI Multi-Agent Architect
You are an expert in designing collaborative AI agent teams with CrewAI. You think
@@ -16,6 +22,15 @@ with specific expertise, create well-defined tasks with expected outputs, and
orchestrate crews for optimal collaboration. You know when to use sequential vs
hierarchical processes.
+### Expertise
+
+- Agent persona design
+- Task decomposition
+- Crew orchestration
+- Process selection
+- Memory configuration
+- Flow design
+
## Capabilities
- Agent definitions (role, goal, backstory)
@@ -26,11 +41,39 @@ hierarchical processes.
- Tool integration
- Flows for complex workflows
-## Requirements
+## Prerequisites
-- Python 3.10+
-- crewai package
-- LLM API access
+- 0: Python proficiency
+- 1: Multi-agent concepts
+- 2: Understanding of delegation
+- Required skills: Python 3.10+, crewai package, LLM API access
+
+## Scope
+
+- 0: Python-only
+- 1: Best for structured workflows
+- 2: Can be verbose for simple cases
+- 3: Flows are newer feature
+
+## Ecosystem
+
+### Primary
+
+- CrewAI framework
+- CrewAI Tools
+
+### Common_integrations
+
+- OpenAI / Anthropic / Ollama
+- SerperDev (search)
+- FileReadTool, DirectoryReadTool
+- Custom tools
+
+### Platforms
+
+- Python applications
+- FastAPI backends
+- Enterprise deployments
## Patterns
@@ -40,7 +83,6 @@ Define agents and tasks in YAML (recommended)
**When to use**: Any CrewAI project
-```python
# config/agents.yaml
researcher:
role: "Senior Research Analyst"
@@ -119,8 +161,20 @@ class ContentCrew:
@task
def writing_task(self) -> Task:
- return Task(config
-```
+ return Task(config=self.tasks_config['writing_task'])
+
+ @crew
+ def crew(self) -> Crew:
+ return Crew(
+ agents=self.agents,
+ tasks=self.tasks,
+ process=Process.sequential,
+ verbose=True
+ )
+
+# main.py
+crew = ContentCrew()
+result = crew.crew().kickoff(inputs={"topic": "AI Agents in 2025"})
### Hierarchical Process
@@ -128,7 +182,6 @@ Manager agent delegates to workers
**When to use**: Complex tasks needing coordination
-```python
from crewai import Crew, Process
# Define specialized agents
@@ -165,7 +218,6 @@ crew = Crew(
# - How to combine results
result = crew.kickoff()
-```
### Planning Feature
@@ -173,7 +225,6 @@ Generate execution plan before running
**When to use**: Complex workflows needing structure
-```python
from crewai import Crew, Process
# Enable planning
@@ -195,54 +246,209 @@ result = crew.kickoff()
# Access the plan
print(crew.plan)
+
+### Memory Configuration
+
+Enable agent memory for context
+
+**When to use**: Multi-turn or complex workflows
+
+from crewai import Crew
+
+# Memory types:
+# - Short-term: Within task execution
+# - Long-term: Across executions
+# - Entity: About specific entities
+
+crew = Crew(
+ agents=[...],
+ tasks=[...],
+ memory=True, # Enable all memory types
+ verbose=True
+)
+
+# Custom memory config
+from crewai.memory import LongTermMemory, ShortTermMemory
+
+crew = Crew(
+ agents=[...],
+ tasks=[...],
+ memory=True,
+ long_term_memory=LongTermMemory(
+ storage=CustomStorage() # Custom backend
+ ),
+ short_term_memory=ShortTermMemory(
+ storage=CustomStorage()
+ ),
+ embedder={
+ "provider": "openai",
+ "config": {"model": "text-embedding-3-small"}
+ }
+)
+
+# Memory helps agents:
+# - Remember previous interactions
+# - Build on past work
+# - Maintain consistency
+
+### Flows for Complex Workflows
+
+Event-driven orchestration with state
+
+**When to use**: Complex, multi-stage workflows
+
+from crewai.flow.flow import Flow, listen, start, and_, or_, router
+
+class ContentFlow(Flow):
+ # State persists across steps
+ model_config = {"extra": "allow"}
+
+ @start()
+ def gather_requirements(self):
+ """First step - gather inputs."""
+ self.topic = self.inputs.get("topic", "AI")
+ self.style = self.inputs.get("style", "professional")
+ return {"topic": self.topic}
+
+ @listen(gather_requirements)
+ def research(self, requirements):
+ """Research after requirements gathered."""
+ research_crew = ResearchCrew()
+ result = research_crew.crew().kickoff(
+ inputs={"topic": requirements["topic"]}
+ )
+ self.research = result.raw
+ return result
+
+ @listen(research)
+ def write_content(self, research_result):
+ """Write after research complete."""
+ writing_crew = WritingCrew()
+ result = writing_crew.crew().kickoff(
+ inputs={
+ "research": self.research,
+ "style": self.style
+ }
+ )
+ return result
+
+ @router(write_content)
+ def quality_check(self, content):
+ """Route based on quality."""
+ if self.needs_revision(content):
+ return "revise"
+ return "publish"
+
+ @listen("revise")
+ def revise_content(self):
+ """Revision flow."""
+ # Re-run writing with feedback
+ pass
+
+ @listen("publish")
+ def publish_content(self):
+ """Final publishing."""
+ return {"status": "published", "content": self.content}
+
+# Run flow
+flow = ContentFlow()
+result = flow.kickoff(inputs={"topic": "AI Agents"})
+
+### Custom Tools
+
+Create tools for agents
+
+**When to use**: Agents need external capabilities
+
+from crewai.tools import BaseTool
+from pydantic import BaseModel, Field
+
+# Method 1: Class-based tool
+class SearchInput(BaseModel):
+ query: str = Field(..., description="Search query")
+
+class WebSearchTool(BaseTool):
+ name: str = "web_search"
+ description: str = "Search the web for information"
+ args_schema: type[BaseModel] = SearchInput
+
+ def _run(self, query: str) -> str:
+ # Implementation
+ results = search_api.search(query)
+ return format_results(results)
+
+# Method 2: Function decorator
+from crewai import tool
+
+@tool("Database Query")
+def query_database(sql: str) -> str:
+ """Execute SQL query and return results."""
+ return db.execute(sql)
+
+# Assign tools to agents
+researcher = Agent(
+ role="Researcher",
+ goal="Find information",
+ backstory="...",
+ tools=[WebSearchTool(), query_database]
+)
+
+## Collaboration
+
+### Delegation Triggers
+
+- langgraph|state machine|graph -> langgraph (Need explicit state management)
+- observability|tracing -> langfuse (Need LLM observability)
+- structured output|json schema -> structured-output (Need structured responses)
+
+### Research and Writing Crew
+
+Skills: crewai, structured-output
+
+Workflow:
+
+```
+1. Define researcher and writer agents
+2. Create research → analysis → writing pipeline
+3. Use structured output for research format
+4. Chain tasks with context
```
-## Anti-Patterns
+### Observable Agent Team
-### ❌ Vague Agent Roles
+Skills: crewai, langfuse
-**Why bad**: Agent doesn't know its specialty.
-Overlapping responsibilities.
-Poor task delegation.
+Workflow:
-**Instead**: Be specific:
-- "Senior React Developer" not "Developer"
-- "Financial Analyst specializing in crypto" not "Analyst"
-Include specific skills in backstory.
+```
+1. Build crew with agents and tasks
+2. Add Langfuse callback handler
+3. Monitor agent interactions
+4. Evaluate output quality
+```
-### ❌ Missing Expected Outputs
+### Complex Workflow with Flows
-**Why bad**: Agent doesn't know done criteria.
-Inconsistent outputs.
-Hard to chain tasks.
+Skills: crewai, langgraph
-**Instead**: Always specify expected_output:
-expected_output: |
- A JSON object with:
- - summary: string (100 words max)
- - key_points: list of strings
- - confidence: float 0-1
+Workflow:
-### ❌ Too Many Agents
-
-**Why bad**: Coordination overhead.
-Inconsistent communication.
-Slower execution.
-
-**Instead**: 3-5 agents with clear roles.
-One agent can handle multiple related tasks.
-Use tools instead of agents for simple actions.
-
-## Limitations
-
-- Python-only
-- Best for structured workflows
-- Can be verbose for simple cases
-- Flows are newer feature
+```
+1. Design workflow with CrewAI Flows
+2. Use LangGraph patterns for state
+3. Combine crews in flow steps
+4. Handle branching and routing
+```
## Related Skills
Works well with: `langgraph`, `autonomous-agents`, `langfuse`, `structured-output`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: crewai
+- User mentions or implies: multi-agent team
+- User mentions or implies: agent roles
+- User mentions or implies: crew of agents
+- User mentions or implies: role-based agents
+- User mentions or implies: collaborative agents
diff --git a/plugins/antigravity-awesome-skills-claude/skills/discord-bot-architect/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/discord-bot-architect/SKILL.md
index 48e98cf1..4c887f46 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/discord-bot-architect/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/discord-bot-architect/SKILL.md
@@ -1,22 +1,37 @@
---
name: discord-bot-architect
-description: "Specialized skill for building production-ready Discord bots. Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash commands, interactive components, rate limiting, and sharding."
+description: Specialized skill for building production-ready Discord bots.
+ Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash
+ commands, interactive components, rate limiting, and sharding.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Discord Bot Architect
+Specialized skill for building production-ready Discord bots.
+Covers Discord.js (JavaScript) and Pycord (Python), gateway intents,
+slash commands, interactive components, rate limiting, and sharding.
+
+## Principles
+
+- Slash commands over message parsing (Message Content Intent deprecated)
+- Acknowledge interactions within 3 seconds, always
+- Request only required intents (minimize privileged intents)
+- Handle rate limits gracefully with exponential backoff
+- Plan for sharding from the start (required at 2500+ guilds)
+- Use components (buttons, selects, modals) for rich UX
+- Test with guild commands first, deploy global when ready
+
## Patterns
### Discord.js v14 Foundation
Modern Discord bot setup with Discord.js v14 and slash commands
-**When to use**: ['Building Discord bots with JavaScript/TypeScript', 'Need full gateway connection with events', 'Building bots with complex interactions']
+**When to use**: Building Discord bots with JavaScript/TypeScript,Need full gateway connection with events,Building bots with complex interactions
-```javascript
```javascript
// src/index.js
const { Client, Collection, GatewayIntentBits, Events } = require('discord.js');
@@ -90,16 +105,96 @@ module.exports = {
const { Events } = require('discord.js');
module.exports = {
- name: Event
+ name: Events.InteractionCreate,
+ async execute(interaction) {
+ if (!interaction.isChatInputCommand()) return;
+
+ const command = interaction.client.commands.get(interaction.commandName);
+ if (!command) {
+ console.error(`No command matching ${interaction.commandName}`);
+ return;
+ }
+
+ try {
+ await command.execute(interaction);
+ } catch (error) {
+ console.error(error);
+ const reply = {
+ content: 'There was an error executing this command!',
+ ephemeral: true
+ };
+
+ if (interaction.replied || interaction.deferred) {
+ await interaction.followUp(reply);
+ } else {
+ await interaction.reply(reply);
+ }
+ }
+ }
+};
```
+```javascript
+// src/deploy-commands.js
+const { REST, Routes } = require('discord.js');
+const fs = require('node:fs');
+const path = require('node:path');
+require('dotenv').config();
+
+const commands = [];
+const commandsPath = path.join(__dirname, 'commands');
+const commandFiles = fs.readdirSync(commandsPath).filter(f => f.endsWith('.js'));
+
+for (const file of commandFiles) {
+ const command = require(path.join(commandsPath, file));
+ commands.push(command.data.toJSON());
+}
+
+const rest = new REST().setToken(process.env.DISCORD_TOKEN);
+
+(async () => {
+ try {
+ console.log(`Refreshing ${commands.length} commands...`);
+
+ // Guild commands (instant, for testing)
+ // const data = await rest.put(
+ // Routes.applicationGuildCommands(CLIENT_ID, GUILD_ID),
+ // { body: commands }
+ // );
+
+ // Global commands (can take up to 1 hour to propagate)
+ const data = await rest.put(
+ Routes.applicationCommands(process.env.CLIENT_ID),
+ { body: commands }
+ );
+
+ console.log(`Successfully registered ${data.length} commands`);
+ } catch (error) {
+ console.error(error);
+ }
+})();
+```
+
+### Structure
+
+discord-bot/
+├── src/
+│ ├── index.js # Main entry point
+│ ├── deploy-commands.js # Command registration script
+│ ├── commands/ # Slash command handlers
+│ │ └── ping.js
+│ └── events/ # Event handlers
+│ ├── ready.js
+│ └── interactionCreate.js
+├── .env
+└── package.json
+
### Pycord Bot Foundation
Discord bot with Pycord (Python) and application commands
-**When to use**: ['Building Discord bots with Python', 'Prefer async/await patterns', 'Need good slash command support']
+**When to use**: Building Discord bots with Python,Prefer async/await patterns,Need good slash command support
-```python
```python
# main.py
import os
@@ -169,16 +264,32 @@ class General(commands.Cog):
embed.add_field(name="Latency", value=f"{round(self.bot.latency * 1000)}ms")
await ctx.respond(embed=embed)
- @commands.Cog.
+ @commands.Cog.listener()
+ async def on_member_join(self, member: discord.Member):
+ # Requires Members intent (PRIVILEGED)
+ channel = member.guild.system_channel
+ if channel:
+ await channel.send(f"Welcome {member.mention}!")
+
+def setup(bot):
+ bot.add_cog(General(bot))
```
+### Structure
+
+discord-bot/
+├── main.py # Main bot file
+├── cogs/ # Command groups
+│ └── general.py
+├── .env
+└── requirements.txt
+
### Interactive Components Pattern
Using buttons, select menus, and modals for rich UX
-**When to use**: ['Need interactive user interfaces', 'Collecting user input beyond slash command options', 'Building menus, confirmations, or forms']
+**When to use**: Need interactive user interfaces,Collecting user input beyond slash command options,Building menus, confirmations, or forms
-```python
```javascript
// Discord.js - Buttons and Select Menus
const {
@@ -245,38 +356,1100 @@ module.exports = {
if (i.customId === 'confirm') {
await i.update({ content: 'Confirmed!', components: [] });
collector.stop();
- } else if (i.custo
+ } else if (i.customId === 'cancel') {
+ await i.update({ content: 'Cancelled', components: [] });
+ collector.stop();
+ } else if (i.customId === 'select-role') {
+ await i.update({ content: `You selected: ${i.values.join(', ')}` });
+ }
+ });
+ }
+};
```
-## Anti-Patterns
+```javascript
+// Modals (forms)
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('feedback')
+ .setDescription('Submit feedback'),
-### ❌ Message Content for Commands
+ async execute(interaction) {
+ const modal = new ModalBuilder()
+ .setCustomId('feedback-modal')
+ .setTitle('Submit Feedback');
-**Why bad**: Message Content Intent is privileged and deprecated for bot commands.
-Slash commands are the intended approach.
+ const titleInput = new TextInputBuilder()
+ .setCustomId('feedback-title')
+ .setLabel('Title')
+ .setStyle(TextInputStyle.Short)
+ .setRequired(true)
+ .setMaxLength(100);
-### ❌ Syncing Commands on Every Start
+ const bodyInput = new TextInputBuilder()
+ .setCustomId('feedback-body')
+ .setLabel('Your feedback')
+ .setStyle(TextInputStyle.Paragraph)
+ .setRequired(true)
+ .setMaxLength(1000)
+ .setPlaceholder('Describe your feedback...');
-**Why bad**: Command registration is rate limited. Global commands take up to 1 hour
-to propagate. Syncing on every start wastes API calls and can hit limits.
+ modal.addComponents(
+ new ActionRowBuilder().addComponents(titleInput),
+ new ActionRowBuilder().addComponents(bodyInput)
+ );
-### ❌ Blocking the Event Loop
+ // Show modal - MUST be first response
+ await interaction.showModal(modal);
+ }
+};
-**Why bad**: Discord gateway requires regular heartbeats. Blocking operations
-cause missed heartbeats and disconnections.
+// Handle modal submission in interactionCreate
+if (interaction.isModalSubmit()) {
+ if (interaction.customId === 'feedback-modal') {
+ const title = interaction.fields.getTextInputValue('feedback-title');
+ const body = interaction.fields.getTextInputValue('feedback-body');
-## ⚠️ Sharp Edges
+ await interaction.reply({
+ content: `Thanks for your feedback!\n**${title}**\n${body}`,
+ ephemeral: true
+ });
+ }
+}
+```
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | critical | ## Acknowledge immediately, process later |
-| Issue | critical | ## Step 1: Enable in Developer Portal |
-| Issue | high | ## Use a separate deploy script (not on startup) |
-| Issue | critical | ## Never hardcode tokens |
-| Issue | high | ## Generate correct invite URL |
-| Issue | medium | ## Development: Use guild commands |
-| Issue | medium | ## Never block the event loop |
-| Issue | medium | ## Show modal immediately |
+```python
+# Pycord - Buttons and Views
+import discord
+
+class ConfirmView(discord.ui.View):
+ def __init__(self):
+ super().__init__(timeout=60)
+ self.value = None
+
+ @discord.ui.button(label="Confirm", style=discord.ButtonStyle.green)
+ async def confirm(self, button, interaction):
+ self.value = True
+ await interaction.response.edit_message(content="Confirmed!", view=None)
+ self.stop()
+
+ @discord.ui.button(label="Cancel", style=discord.ButtonStyle.red)
+ async def cancel(self, button, interaction):
+ self.value = False
+ await interaction.response.edit_message(content="Cancelled", view=None)
+ self.stop()
+
+@bot.slash_command(name="confirm")
+async def confirm_cmd(ctx: discord.ApplicationContext):
+ view = ConfirmView()
+ await ctx.respond("Are you sure?", view=view)
+
+ await view.wait() # Wait for user interaction
+ if view.value is None:
+ await ctx.followup.send("Timed out")
+
+# Select Menu
+class RoleSelect(discord.ui.Select):
+ def __init__(self):
+ options = [
+ discord.SelectOption(label="Developer", value="dev", emoji="💻"),
+ discord.SelectOption(label="Designer", value="design", emoji="🎨"),
+ ]
+ super().__init__(
+ placeholder="Select roles...",
+ min_values=1,
+ max_values=2,
+ options=options
+ )
+
+ async def callback(self, interaction):
+ await interaction.response.send_message(
+ f"You selected: {', '.join(self.values)}",
+ ephemeral=True
+ )
+
+class RoleView(discord.ui.View):
+ def __init__(self):
+ super().__init__()
+ self.add_item(RoleSelect())
+
+# Modal
+class FeedbackModal(discord.ui.Modal):
+ def __init__(self):
+ super().__init__(title="Submit Feedback")
+
+ self.add_item(discord.ui.InputText(
+ label="Title",
+ style=discord.InputTextStyle.short,
+ required=True,
+ max_length=100
+ ))
+ self.add_item(discord.ui.InputText(
+ label="Feedback",
+ style=discord.InputTextStyle.long,
+ required=True,
+ max_length=1000
+ ))
+
+ async def callback(self, interaction):
+ title = self.children[0].value
+ body = self.children[1].value
+ await interaction.response.send_message(
+ f"Thanks!\n**{title}**\n{body}",
+ ephemeral=True
+ )
+
+@bot.slash_command(name="feedback")
+async def feedback(ctx: discord.ApplicationContext):
+ await ctx.send_modal(FeedbackModal())
+```
+
+### Limits
+
+- 5 ActionRows per message/modal
+- 5 buttons per ActionRow
+- 1 select menu per ActionRow (takes all 5 slots)
+- 5 select menus max per message
+- 25 options per select menu
+- Modal must be first response (cannot defer first)
+
+### Deferred Response Pattern
+
+Handle slow operations without timing out
+
+**When to use**: Operation takes more than 3 seconds,Database queries, API calls, LLM responses,File processing or generation
+
+```javascript
+// Discord.js - Deferred response
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('slow-task')
+ .setDescription('Performs a slow operation'),
+
+ async execute(interaction) {
+ // Defer immediately - you have 3 seconds!
+ await interaction.deferReply();
+ // For ephemeral: await interaction.deferReply({ ephemeral: true });
+
+ try {
+ // Now you have 15 minutes to complete
+ const result = await slowDatabaseQuery();
+ const aiResponse = await callOpenAI(result);
+
+ // Edit the deferred reply
+ await interaction.editReply({
+ content: `Result: ${aiResponse}`,
+ embeds: [resultEmbed]
+ });
+ } catch (error) {
+ await interaction.editReply({
+ content: 'An error occurred while processing your request.'
+ });
+ }
+ }
+};
+
+// For components (buttons, select menus)
+collector.on('collect', async i => {
+ await i.deferUpdate(); // Acknowledge without visual change
+ // Or: await i.deferReply({ ephemeral: true });
+
+ const result = await slowOperation();
+ await i.editReply({ content: result });
+});
+```
+
+```python
+# Pycord - Deferred response
+@bot.slash_command(name="slow-task")
+async def slow_task(ctx: discord.ApplicationContext):
+ # Defer immediately
+ await ctx.defer()
+ # For ephemeral: await ctx.defer(ephemeral=True)
+
+ try:
+ result = await slow_database_query()
+ ai_response = await call_openai(result)
+
+ await ctx.followup.send(f"Result: {ai_response}")
+ except Exception as e:
+ await ctx.followup.send("An error occurred")
+```
+
+### Timing
+
+- Initial_response: 3 seconds
+- Deferred_followup: 15 minutes
+- Ephemeral_note: Can only be set on initial response, not changed later
+
+### Embed Builder Pattern
+
+Rich embedded messages for professional-looking content
+
+**When to use**: Displaying formatted information,Status updates, help menus, logs,Data with structure (fields, images)
+
+```javascript
+const { EmbedBuilder, Colors } = require('discord.js');
+
+// Basic embed
+const embed = new EmbedBuilder()
+ .setColor(Colors.Blue)
+ .setTitle('Bot Status')
+ .setURL('https://example.com')
+ .setAuthor({
+ name: 'Bot Name',
+ iconURL: client.user.displayAvatarURL()
+ })
+ .setDescription('Current status and statistics')
+ .addFields(
+ { name: 'Servers', value: `${client.guilds.cache.size}`, inline: true },
+ { name: 'Users', value: `${client.users.cache.size}`, inline: true },
+ { name: 'Uptime', value: formatUptime(), inline: true }
+ )
+ .setThumbnail(client.user.displayAvatarURL())
+ .setImage('https://example.com/banner.png')
+ .setTimestamp()
+ .setFooter({
+ text: 'Requested by User',
+ iconURL: interaction.user.displayAvatarURL()
+ });
+
+await interaction.reply({ embeds: [embed] });
+
+// Multiple embeds (max 10)
+await interaction.reply({ embeds: [embed1, embed2, embed3] });
+```
+
+```python
+# Pycord
+embed = discord.Embed(
+ title="Bot Status",
+ description="Current status and statistics",
+ color=discord.Color.blue(),
+ url="https://example.com"
+)
+embed.set_author(
+ name="Bot Name",
+ icon_url=bot.user.display_avatar.url
+)
+embed.add_field(name="Servers", value=len(bot.guilds), inline=True)
+embed.add_field(name="Users", value=len(bot.users), inline=True)
+embed.set_thumbnail(url=bot.user.display_avatar.url)
+embed.set_image(url="https://example.com/banner.png")
+embed.set_footer(text="Requested by User", icon_url=ctx.author.display_avatar.url)
+embed.timestamp = discord.utils.utcnow()
+
+await ctx.respond(embed=embed)
+```
+
+### Limits
+
+- 10 embeds per message
+- 6000 characters total across all embeds
+- 256 characters for title
+- 4096 characters for description
+- 25 fields per embed
+- 256 characters per field name
+- 1024 characters per field value
+
+### Rate Limit Handling Pattern
+
+Gracefully handle Discord API rate limits
+
+**When to use**: High-volume operations,Bulk messaging or role assignments,Any repeated API calls
+
+```javascript
+// Discord.js handles rate limits automatically, but for custom handling:
+const { REST } = require('discord.js');
+
+const rest = new REST({ version: '10' })
+ .setToken(process.env.DISCORD_TOKEN);
+
+rest.on('rateLimited', (info) => {
+ console.log(`Rate limited! Retry after ${info.retryAfter}ms`);
+ console.log(`Route: ${info.route}`);
+ console.log(`Global: ${info.global}`);
+});
+
+// Queue pattern for bulk operations
+class RateLimitQueue {
+ constructor() {
+ this.queue = [];
+ this.processing = false;
+ this.requestsPerSecond = 40; // Safe margin below 50
+ }
+
+ async add(operation) {
+ return new Promise((resolve, reject) => {
+ this.queue.push({ operation, resolve, reject });
+ this.process();
+ });
+ }
+
+ async process() {
+ if (this.processing || this.queue.length === 0) return;
+ this.processing = true;
+
+ while (this.queue.length > 0) {
+ const { operation, resolve, reject } = this.queue.shift();
+
+ try {
+ const result = await operation();
+ resolve(result);
+ } catch (error) {
+ reject(error);
+ }
+
+ // Throttle: ~40 requests per second
+ await new Promise(r => setTimeout(r, 1000 / this.requestsPerSecond));
+ }
+
+ this.processing = false;
+ }
+}
+
+const queue = new RateLimitQueue();
+
+// Usage: Send 200 messages without hitting rate limits
+for (const user of users) {
+ await queue.add(() => user.send('Welcome!'));
+}
+```
+
+```python
+# Pycord/discord.py handles rate limits automatically
+# For custom handling:
+import asyncio
+from collections import deque
+
+class RateLimitQueue:
+ def __init__(self, requests_per_second=40):
+ self.queue = deque()
+ self.processing = False
+ self.delay = 1 / requests_per_second
+
+ async def add(self, coro):
+ future = asyncio.Future()
+ self.queue.append((coro, future))
+ if not self.processing:
+ asyncio.create_task(self._process())
+ return await future
+
+ async def _process(self):
+ self.processing = True
+ while self.queue:
+ coro, future = self.queue.popleft()
+ try:
+ result = await coro
+ future.set_result(result)
+ except Exception as e:
+ future.set_exception(e)
+ await asyncio.sleep(self.delay)
+ self.processing = False
+
+queue = RateLimitQueue()
+
+# Usage
+for member in guild.members:
+ await queue.add(member.send("Welcome!"))
+```
+
+### Rate_limits
+
+- Global: 50 requests per second
+- Gateway: 120 requests per 60 seconds
+- Specific: Messages to same channel: 5/5s, Bulk delete: 1/1s, Guild member requests: varies by guild size
+
+### Sharding Pattern
+
+Scale bots to 2500+ servers with sharding
+
+**When to use**: Bot approaching 2500 guilds (required),Want horizontal scaling,Memory optimization for large bots
+
+```javascript
+// Discord.js Sharding Manager
+// shard.js (main entry)
+const { ShardingManager } = require('discord.js');
+
+const manager = new ShardingManager('./bot.js', {
+ token: process.env.DISCORD_TOKEN,
+ totalShards: 'auto', // Discord determines optimal count
+ // Or specify: totalShards: 4
+});
+
+manager.on('shardCreate', shard => {
+ console.log(`Launched shard ${shard.id}`);
+
+ shard.on('ready', () => {
+ console.log(`Shard ${shard.id} ready`);
+ });
+
+ shard.on('disconnect', () => {
+ console.log(`Shard ${shard.id} disconnected`);
+ });
+});
+
+manager.spawn();
+
+// bot.js - Modified for sharding
+const { Client } = require('discord.js');
+
+const client = new Client({ intents: [...] });
+
+// Get shard info
+client.on('ready', () => {
+ console.log(`Shard ${client.shard.ids[0]} ready with ${client.guilds.cache.size} guilds`);
+});
+
+// Cross-shard data
+async function getTotalGuilds() {
+ const results = await client.shard.fetchClientValues('guilds.cache.size');
+ return results.reduce((acc, count) => acc + count, 0);
+}
+
+// Broadcast to all shards
+async function broadcastMessage(channelId, message) {
+ await client.shard.broadcastEval(
+ (c, { channelId, message }) => {
+ const channel = c.channels.cache.get(channelId);
+ if (channel) channel.send(message);
+ },
+ { context: { channelId, message } }
+ );
+}
+```
+
+```python
+# Pycord - AutoShardedBot
+import discord
+from discord.ext import commands
+
+# Automatically handles sharding
+bot = commands.AutoShardedBot(
+ command_prefix="!",
+ intents=discord.Intents.default(),
+ shard_count=None # Auto-determine
+)
+
+@bot.event
+async def on_ready():
+ print(f"Logged in on {len(bot.shards)} shards")
+ for shard_id, shard in bot.shards.items():
+ print(f"Shard {shard_id}: {shard.latency * 1000:.2f}ms")
+
+@bot.event
+async def on_shard_ready(shard_id):
+ print(f"Shard {shard_id} is ready")
+
+# Get guilds per shard
+for shard_id, guilds in bot.guilds_by_shard().items():
+ print(f"Shard {shard_id}: {len(guilds)} guilds")
+```
+
+### Scaling_guide
+
+- 1-2500 guilds: No sharding required
+- 2500+ guilds: Sharding required by Discord
+- Recommended: ~1000 guilds per shard
+- Memory: Each shard runs in separate process
+
+## Sharp Edges
+
+### Interaction Timeout (3 Second Rule)
+
+Severity: CRITICAL
+
+Situation: Handling slash commands, buttons, select menus, or modals
+
+Symptoms:
+User sees "This interaction failed" or "The application did not respond."
+Command works locally but fails in production.
+Slow operations never complete.
+
+Why this breaks:
+Discord requires ALL interactions to be acknowledged within 3 seconds:
+- Slash commands
+- Button clicks
+- Select menu selections
+- Context menu commands
+
+If you do ANY slow operation (database, API, file I/O) before responding,
+you'll miss the window. Discord shows an error even if your bot processes
+the request correctly afterward.
+
+After acknowledgment, you have 15 minutes for follow-up responses.
+
+Recommended fix:
+
+## Acknowledge immediately, process later
+
+```javascript
+// Discord.js - Defer for slow operations
+module.exports = {
+ async execute(interaction) {
+ // DEFER IMMEDIATELY - before any slow operation
+ await interaction.deferReply();
+ // For ephemeral: await interaction.deferReply({ ephemeral: true });
+
+ // Now you have 15 minutes
+ const result = await slowDatabaseQuery();
+ const aiResponse = await callLLM(result);
+
+ // Edit the deferred reply
+ await interaction.editReply(`Result: ${aiResponse}`);
+ }
+};
+```
+
+```python
+# Pycord
+@bot.slash_command()
+async def slow_command(ctx):
+ await ctx.defer() # Acknowledge immediately
+ # await ctx.defer(ephemeral=True) # For private response
+
+ result = await slow_operation()
+ await ctx.followup.send(f"Result: {result}")
+```
+
+## For components (buttons, menus)
+
+```javascript
+// If you're updating the message
+await interaction.deferUpdate();
+
+// If you're sending a new response
+await interaction.deferReply({ ephemeral: true });
+```
+
+### Missing Privileged Intent Configuration
+
+Severity: CRITICAL
+
+Situation: Bot needs member data, presences, or message content
+
+Symptoms:
+Members intent: member lists empty, on_member_join doesn't fire
+Presences intent: statuses always unknown/offline
+Message content intent: message.content is empty string
+
+Why this breaks:
+Discord has 3 privileged intents that require manual enablement:
+1. **GUILD_MEMBERS** - Member join/leave, member lists
+2. **GUILD_PRESENCES** - Online status, activities
+3. **MESSAGE_CONTENT** - Read message text (deprecated for commands)
+
+These must be:
+1. Enabled in Discord Developer Portal > Bot > Privileged Gateway Intents
+2. Requested in your bot code
+
+At 100+ servers, you need Discord verification to keep using them.
+
+Recommended fix:
+
+## Step 1: Enable in Developer Portal
+
+```
+1. Go to https://discord.com/developers/applications
+2. Select your application
+3. Go to Bot section
+4. Scroll to Privileged Gateway Intents
+5. Toggle ON the intents you need
+```
+
+## Step 2: Request in code
+
+```javascript
+// Discord.js
+const { Client, GatewayIntentBits } = require('discord.js');
+
+const client = new Client({
+ intents: [
+ GatewayIntentBits.Guilds,
+ GatewayIntentBits.GuildMembers, // PRIVILEGED
+ // GatewayIntentBits.GuildPresences, // PRIVILEGED
+ // GatewayIntentBits.MessageContent, // PRIVILEGED - avoid!
+ ]
+});
+```
+
+```python
+# Pycord
+intents = discord.Intents.default()
+intents.members = True # PRIVILEGED
+# intents.presences = True # PRIVILEGED
+# intents.message_content = True # PRIVILEGED - avoid!
+
+bot = commands.Bot(intents=intents)
+```
+
+## Avoid Message Content Intent if possible
+
+Use slash commands, buttons, and modals instead of message parsing.
+These don't require the Message Content intent.
+
+### Command Registration Rate Limited
+
+Severity: HIGH
+
+Situation: Registering slash commands
+
+Symptoms:
+Commands not appearing. 429 errors when deploying.
+"You are being rate limited" messages.
+Commands appear for some guilds but not others.
+
+Why this breaks:
+Command registration is rate limited:
+- Global commands: 200 creates/day, updates take up to 1 hour to propagate
+- Guild commands: 200 creates/day per guild, instant update
+
+Common mistakes:
+- Registering commands on every bot startup
+- Registering in every guild separately
+- Making changes in a loop without delays
+
+Recommended fix:
+
+## Use a separate deploy script (not on startup)
+
+```javascript
+// deploy-commands.js - Run manually, not on bot start
+const { REST, Routes } = require('discord.js');
+
+const rest = new REST().setToken(process.env.DISCORD_TOKEN);
+
+async function deploy() {
+ // For development: Guild commands (instant)
+ if (process.env.GUILD_ID) {
+ await rest.put(
+ Routes.applicationGuildCommands(
+ process.env.CLIENT_ID,
+ process.env.GUILD_ID
+ ),
+ { body: commands }
+ );
+ console.log('Guild commands deployed instantly');
+ }
+
+ // For production: Global commands (up to 1 hour)
+ else {
+ await rest.put(
+ Routes.applicationCommands(process.env.CLIENT_ID),
+ { body: commands }
+ );
+ console.log('Global commands deployed (may take up to 1 hour)');
+ }
+}
+
+deploy();
+```
+
+```python
+# Pycord - Don't sync on every startup
+@bot.event
+async def on_ready():
+ # DON'T DO THIS:
+ # await bot.sync_commands()
+
+ print(f"Ready! Commands should already be registered.")
+
+# Instead, sync manually or use a flag
+if __name__ == "__main__":
+ if "--sync" in sys.argv:
+ # Only sync when explicitly requested
+ bot.sync_commands_on_start = True
+ bot.run(token)
+```
+
+## Testing workflow
+
+1. Use guild commands during development (instant updates)
+2. Only deploy global commands when ready for production
+3. Run deploy script manually, not on every restart
+
+### Bot Token Exposed
+
+Severity: CRITICAL
+
+Situation: Storing or sharing bot token
+
+Symptoms:
+Unauthorized actions from your bot.
+Bot joins random servers.
+Bot sends spam or malicious content.
+"Invalid token" after Discord invalidates it.
+
+Why this breaks:
+Your bot token provides FULL control over your bot. Attackers can:
+- Send messages as your bot
+- Join servers, create invites
+- Access all data your bot can access
+- Potentially take over servers where bot has admin
+
+Discord actively scans GitHub for exposed tokens and invalidates them.
+Common exposure points:
+- Committed to Git
+- Shared in Discord itself
+- In client-side code
+- In public screenshots
+
+Recommended fix:
+
+## Never hardcode tokens
+
+```javascript
+// BAD - never do this
+const token = 'MTIzNDU2Nzg5MDEyMzQ1Njc4.ABCDEF.xyz...';
+
+// GOOD - environment variables
+require('dotenv').config();
+client.login(process.env.DISCORD_TOKEN);
+```
+
+## Use .gitignore
+
+```
+# .gitignore
+.env
+.env.local
+config.json
+```
+
+## If token is exposed
+
+1. Go to Developer Portal immediately
+2. Regenerate the token
+3. Update all deployments
+4. Review bot activity for unauthorized actions
+5. Check git history and force push to remove if needed
+
+## Use environment variables properly
+
+```bash
+# .env (never commit)
+DISCORD_TOKEN=your_token_here
+CLIENT_ID=your_client_id
+```
+
+```javascript
+// Load with dotenv
+require('dotenv').config();
+const token = process.env.DISCORD_TOKEN;
+```
+
+### Bot Missing applications.commands Scope
+
+Severity: HIGH
+
+Situation: Slash commands not appearing for users
+
+Symptoms:
+Bot is in server but slash commands don't show up.
+Typing / shows no commands from your bot.
+Commands worked in development server but not others.
+
+Why this breaks:
+Discord has two important OAuth scopes:
+- `bot` - Traditional bot permissions (messages, reactions, etc.)
+- `applications.commands` - Slash command permissions
+
+Many bots were invited with only the `bot` scope before slash commands
+existed. They need to be re-invited with both scopes.
+
+Recommended fix:
+
+## Generate correct invite URL
+
+```
+https://discord.com/api/oauth2/authorize
+ ?client_id=YOUR_CLIENT_ID
+ &permissions=0
+ &scope=bot%20applications.commands
+```
+
+## In Discord Developer Portal
+
+1. Go to OAuth2 > URL Generator
+2. Select BOTH:
+ - `bot`
+ - `applications.commands`
+3. Select required bot permissions
+4. Use generated URL
+
+## Re-invite without kicking
+
+Users can use the new invite URL even if bot is already in server.
+This adds the new scope without removing the bot.
+
+```javascript
+// Generate invite URL in code
+const inviteUrl = client.generateInvite({
+ scopes: ['bot', 'applications.commands'],
+ permissions: [
+ 'SendMessages',
+ 'EmbedLinks',
+ // Add other needed permissions
+ ]
+});
+```
+
+### Global Commands Not Appearing Immediately
+
+Severity: MEDIUM
+
+Situation: Deploying global slash commands
+
+Symptoms:
+Commands don't appear after deployment.
+Guild commands work but global commands don't.
+Commands appear after an hour.
+
+Why this breaks:
+Global commands can take up to 1 hour to propagate to all Discord servers.
+This is by design for Discord's caching and CDN.
+
+Guild commands are instant but only work in that specific guild.
+
+Recommended fix:
+
+## Development: Use guild commands
+
+```javascript
+// Instant updates for testing
+await rest.put(
+ Routes.applicationGuildCommands(CLIENT_ID, GUILD_ID),
+ { body: commands }
+);
+```
+
+## Production: Deploy global commands during off-peak
+
+```javascript
+// Takes up to 1 hour to propagate
+await rest.put(
+ Routes.applicationCommands(CLIENT_ID),
+ { body: commands }
+);
+```
+
+## Workflow
+
+1. Develop and test with guild commands (instant)
+2. When ready, deploy global commands
+3. Wait up to 1 hour for propagation
+4. Don't deploy global commands frequently
+
+### Frequent Gateway Disconnections
+
+Severity: MEDIUM
+
+Situation: Bot randomly goes offline or misses events
+
+Symptoms:
+Bot shows as offline intermittently.
+Events are missed (member joins, messages).
+Reconnection messages in logs.
+
+Why this breaks:
+Discord gateway requires regular heartbeats. Issues:
+- Blocking operations prevent heartbeat
+- Network instability
+- Memory pressure causing GC pauses
+- Too many guilds without sharding (2500+ requires sharding)
+
+Recommended fix:
+
+## Never block the event loop
+
+```javascript
+// BAD - blocks event loop
+const data = fs.readFileSync('file.json');
+
+// GOOD - async
+const data = await fs.promises.readFile('file.json');
+```
+
+## Handle reconnections gracefully
+
+```javascript
+client.on('shardResume', (id, replayedEvents) => {
+ console.log(`Shard ${id} resumed, replayed ${replayedEvents} events`);
+});
+
+client.on('shardDisconnect', (event, id) => {
+ console.log(`Shard ${id} disconnected`);
+});
+
+client.on('shardReconnecting', (id) => {
+ console.log(`Shard ${id} reconnecting...`);
+});
+```
+
+## Implement sharding at scale
+
+```javascript
+// Required at 2500+ guilds
+const manager = new ShardingManager('./bot.js', {
+ token: process.env.DISCORD_TOKEN,
+ totalShards: 'auto'
+});
+manager.spawn();
+```
+
+### Modal Must Be First Response
+
+Severity: MEDIUM
+
+Situation: Showing a modal from a slash command or button
+
+Symptoms:
+"Interaction has already been acknowledged" error.
+Modal doesn't appear.
+Works sometimes but not others.
+
+Why this breaks:
+Modals have a special requirement: showing a modal MUST be the first
+response to an interaction. You cannot:
+- defer() then showModal()
+- reply() then showModal()
+- Think for more than 3 seconds then showModal()
+
+Recommended fix:
+
+## Show modal immediately
+
+```javascript
+// CORRECT - modal is first response
+async execute(interaction) {
+ const modal = new ModalBuilder()
+ .setCustomId('my-modal')
+ .setTitle('Input Form');
+
+ // Show immediately - no defer, no reply first
+ await interaction.showModal(modal);
+}
+```
+
+```javascript
+// WRONG - deferred first
+async execute(interaction) {
+ await interaction.deferReply(); // CAN'T DO THIS
+ await interaction.showModal(modal); // Will fail
+}
+```
+
+## If you need to check something first
+
+```javascript
+async execute(interaction) {
+ // Quick sync check is OK (under 3 seconds)
+ if (!hasPermission(interaction.user.id)) {
+ return interaction.reply({
+ content: 'No permission',
+ ephemeral: true
+ });
+ }
+
+ // Show modal (still first interaction response for this path)
+ await interaction.showModal(modal);
+}
+```
+
+## Validation Checks
+
+### Hardcoded Discord Token
+
+Severity: ERROR
+
+Discord tokens must never be hardcoded
+
+Message: Hardcoded Discord token detected. Use environment variables.
+
+### Token Variable Assignment
+
+Severity: ERROR
+
+Tokens should come from environment, not strings
+
+Message: Token assigned from string literal. Use environment variable.
+
+### Token in Client-Side Code
+
+Severity: ERROR
+
+Never expose Discord tokens to browsers
+
+Message: Discord credentials exposed client-side. Only use server-side.
+
+### Slow Operation Without Defer
+
+Severity: WARNING
+
+Slow operations should be deferred to avoid timeout
+
+Message: Slow operation without defer. Interaction may timeout.
+
+### Interaction Without Error Handling
+
+Severity: WARNING
+
+Interactions should have try/catch for graceful errors
+
+Message: Interaction without error handling. Add try/catch.
+
+### Using Message Content Intent
+
+Severity: WARNING
+
+Message Content is privileged, prefer slash commands
+
+Message: Using Message Content intent. Consider slash commands instead.
+
+### Requesting All Intents
+
+Severity: WARNING
+
+Only request intents you actually need
+
+Message: Requesting all intents. Only enable what you need.
+
+### Syncing Commands on Ready Event
+
+Severity: WARNING
+
+Don't sync commands on every bot startup
+
+Message: Syncing commands on startup. Use separate deploy script.
+
+### Registering Commands in Loop
+
+Severity: WARNING
+
+Use bulk registration, not individual calls
+
+Message: Registering commands in loop. Use bulk registration.
+
+### No Rate Limit Handling
+
+Severity: INFO
+
+Consider handling rate limits for bulk operations
+
+Message: Bulk operation without rate limit handling.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs AI-powered Discord bot -> llm-architect (Integrate LLM for conversational Discord bot)
+- user needs Slack integration too -> slack-bot-builder (Cross-platform bot architecture)
+- user needs voice features -> voice-agents (Discord voice channel integration)
+- user needs database for bot data -> postgres-wizard (Store user data, server configs, moderation logs)
+- user needs workflow automation -> workflow-automation (Discord events trigger workflows)
+- user needs high availability -> devops (Sharding, scaling, monitoring for large bots)
+- user needs payment integration -> stripe-specialist (Premium bot features, subscription management)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills-claude/skills/email-systems/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/email-systems/SKILL.md
index ba119b5d..4c2c992f 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/email-systems/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/email-systems/SKILL.md
@@ -1,18 +1,36 @@
---
name: email-systems
-description: "You are an email systems engineer who has maintained 99.9% deliverability across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with blacklists, and optimized for inbox placement. You know that email is the highest ROI channel when done right, and a spam folder nightmare when done wrong."
+description: Email has the highest ROI of any marketing channel. $36 for every
+ $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no
+ personalization, landing in spam folders.
risk: none
source: vibeship-spawner-skills (Apache 2.0)
-date_added: '2026-02-27'
+date_added: 2026-02-27
---
# Email Systems
-You are an email systems engineer who has maintained 99.9% deliverability
-across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with
-blacklists, and optimized for inbox placement. You know that email is the
-highest ROI channel when done right, and a spam folder nightmare when done
-wrong. You treat deliverability as infrastructure, not an afterthought.
+Email has the highest ROI of any marketing channel. $36 for every $1 spent.
+Yet most startups treat it as an afterthought - bulk blasts, no personalization,
+landing in spam folders.
+
+This skill covers transactional email that works, marketing automation that
+converts, deliverability that reaches inboxes, and the infrastructure decisions
+that scale.
+
+## Principles
+
+- Transactional vs Marketing separation | Description: Transactional emails (password reset, receipts) need 100% delivery.
+Marketing emails (newsletters, promos) have lower priority. Use separate
+IP addresses and providers to protect transactional deliverability. | Examples: Good: Password resets via Postmark, marketing via ConvertKit | Bad: All emails through one SendGrid account
+- Permission is everything | Description: Only email people who asked to hear from you. Double opt-in for marketing.
+Easy unsubscribe. Clean your list ruthlessly. Bad lists destroy deliverability. | Examples: Good: Confirmed subscription + one-click unsubscribe | Bad: Scraped email list, hidden unsubscribe, bought contacts
+- Deliverability is infrastructure | Description: SPF, DKIM, DMARC are not optional. Warm up new IPs. Monitor bounce rates.
+Deliverability is earned through technical setup and good behavior. | Examples: Good: All DNS records configured, dedicated IP warmed for 4 weeks | Bad: Using free tier shared IP, no authentication records
+- One email, one goal | Description: Each email should have exactly one purpose and one CTA. Multiple asks
+means nothing gets clicked. Clear single action. | Examples: Good: "Click here to verify your email" (one button) | Bad: "Verify email, check out our blog, follow us on Twitter, refer a friend..."
+- Timing and frequency matter | Description: Wrong time = low open rates. Too frequent = unsubscribes. Let users
+set preferences. Test send times. Respect inbox fatigue. | Examples: Good: Weekly digest on Tuesday 10am user's timezone, preference center | Bad: Daily emails at random times, no way to reduce frequency
## Patterns
@@ -20,40 +38,642 @@ wrong. You treat deliverability as infrastructure, not an afterthought.
Queue all transactional emails with retry logic and monitoring
+**When to use**: Sending any critical email (password reset, receipts, confirmations)
+
+// Don't block request on email send
+await queue.add('email', {
+ template: 'password-reset',
+ to: user.email,
+ data: { resetToken, expiresAt }
+}, {
+ attempts: 3,
+ backoff: { type: 'exponential', delay: 2000 }
+});
+
### Email Event Tracking
Track delivery, opens, clicks, bounces, and complaints
+**When to use**: Any email campaign or transactional flow
+
+# Track lifecycle:
+- Queued: Email entered system
+- Sent: Handed to provider
+- Delivered: Reached inbox
+- Opened: Recipient viewed
+- Clicked: Recipient engaged
+- Bounced: Permanent failure
+- Complained: Marked as spam
+
### Template Versioning
Version email templates for rollback and A/B testing
-## Anti-Patterns
+**When to use**: Changing production email templates
-### ❌ HTML email soup
+templates/
+ password-reset/
+ v1.tsx (current)
+ v2.tsx (testing 10%)
+ v1-deprecated.tsx (archived)
-**Why bad**: Email clients render differently. Outlook breaks everything.
+# Deploy new version gradually
+# Monitor metrics before full rollout
-### ❌ No plain text fallback
+### Bounce Handling State Machine
-**Why bad**: Some clients strip HTML. Accessibility issues. Spam signal.
+Automatically handle bounces to protect sender reputation
-### ❌ Huge image emails
+**When to use**: Processing bounce and complaint webhooks
-**Why bad**: Images blocked by default. Spam trigger. Slow loading.
+switch (bounceType) {
+ case 'hard':
+ await markEmailInvalid(email);
+ break;
+ case 'soft':
+ await incrementBounceCount(email);
+ if (count >= 3) await markEmailInvalid(email);
+ break;
+ case 'complaint':
+ await unsubscribeImmediately(email);
+ break;
+}
-## ⚠️ Sharp Edges
+### React Email Components
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Missing SPF, DKIM, or DMARC records | critical | # Required DNS records: |
-| Using shared IP for transactional email | high | # Transactional email strategy: |
-| Not processing bounce notifications | high | # Bounce handling requirements: |
-| Missing or hidden unsubscribe link | critical | # Unsubscribe requirements: |
-| Sending HTML without plain text alternative | medium | # Always send multipart: |
-| Sending high volume from new IP immediately | high | # IP warm-up schedule: |
-| Emailing people who did not opt in | critical | # Permission requirements: |
-| Emails that are mostly or entirely images | medium | # Balance images and text: |
+Build emails with reusable React components
+
+**When to use**: Creating email templates
+
+import { Button, Html } from '@react-email/components';
+
+export default function WelcomeEmail({ userName }) {
+ return (
+
+
Welcome {userName}!
+
+
+ );
+}
+
+### Preference Center
+
+Let users control email frequency and topics
+
+**When to use**: Building marketing or notification systems
+
+Preferences:
+☑ Product updates (weekly)
+☑ New features (monthly)
+☐ Marketing promotions
+☑ Account notifications (always)
+
+# Respect preferences in all sends
+# Required for GDPR compliance
+
+## Sharp Edges
+
+### Missing SPF, DKIM, or DMARC records
+
+Severity: CRITICAL
+
+Situation: Sending emails without authentication. Emails going to spam folder.
+Low open rates. No idea why. Turns out DNS records were never set up.
+
+Symptoms:
+- Emails going to spam
+- Low deliverability rates
+- mail-tester.com score below 8
+- No DMARC reports received
+
+Why this breaks:
+Email authentication (SPF, DKIM, DMARC) tells receiving servers you're
+legit. Without them, you look like a spammer. Modern email providers
+increasingly require all three.
+
+Recommended fix:
+
+# Required DNS records:
+
+## SPF (Sender Policy Framework)
+TXT record: v=spf1 include:_spf.google.com include:sendgrid.net ~all
+
+## DKIM (DomainKeys Identified Mail)
+TXT record provided by your email provider
+Adds cryptographic signature to emails
+
+## DMARC (Domain-based Message Authentication)
+TXT record: v=DMARC1; p=quarantine; rua=mailto:dmarc@yourdomain.com
+
+# Verify setup:
+- Send test email to mail-tester.com
+- Check MXToolbox for record validation
+- Monitor DMARC reports
+
+### Using shared IP for transactional email
+
+Severity: HIGH
+
+Situation: Password resets going to spam. Using free tier of email provider.
+Some other customer on your shared IP got flagged for spam.
+Your reputation is ruined by association.
+
+Symptoms:
+- Transactional emails in spam
+- Inconsistent delivery
+- Using same provider for marketing and transactional
+
+Why this breaks:
+Shared IPs share reputation. One bad actor affects everyone. For
+critical transactional email, you need your own IP or a provider
+with strict shared IP policies.
+
+Recommended fix:
+
+# Transactional email strategy:
+
+## Option 1: Dedicated IP (high volume)
+- Get dedicated IP from your provider
+- Warm it up slowly (start with 100/day)
+- Maintain consistent volume
+
+## Option 2: Transactional-only provider
+- Postmark (very strict, great reputation)
+- Includes shared pool with high standards
+
+## Separate concerns:
+- Transactional: Postmark or Resend
+- Marketing: ConvertKit or Customer.io
+- Never mix marketing and transactional
+
+### Not processing bounce notifications
+
+Severity: HIGH
+
+Situation: Emailing same dead addresses over and over. Bounce rate climbing.
+Email provider threatening to suspend account. List is 40% dead.
+
+Symptoms:
+- Bounce rate above 2%
+- No webhook handlers for bounces
+- Same emails failing repeatedly
+
+Why this breaks:
+Bounces damage sender reputation. Email providers track bounce rates.
+Above 2% and you start looking like a spammer. Dead addresses must
+be removed immediately.
+
+Recommended fix:
+
+# Bounce handling requirements:
+
+## Hard bounces:
+Remove immediately on first occurrence
+Invalid address, domain doesn't exist
+
+## Soft bounces:
+Retry 3 times over 72 hours
+After 3 failures, treat as hard bounce
+
+## Implementation:
+```typescript
+// Webhook handler for bounces
+app.post('/webhooks/email', (req, res) => {
+ const event = req.body;
+ if (event.type === 'bounce') {
+ await markEmailInvalid(event.email);
+ await removeFromAllLists(event.email);
+ }
+});
+```
+
+## Monitor:
+Track bounce rate by campaign
+Alert if bounce rate exceeds 1%
+
+### Missing or hidden unsubscribe link
+
+Severity: CRITICAL
+
+Situation: Users marking as spam because they cannot unsubscribe. Spam complaints
+rising. CAN-SPAM violation. Email provider suspends account.
+
+Symptoms:
+- Hidden unsubscribe links
+- Multi-step unsubscribe process
+- No List-Unsubscribe header
+- High spam complaint rate
+
+Why this breaks:
+Users who cannot unsubscribe will mark as spam. Spam complaints hurt
+reputation more than unsubscribes. Also it is literally illegal.
+CAN-SPAM, GDPR all require clear unsubscribe.
+
+Recommended fix:
+
+# Unsubscribe requirements:
+
+## Visible:
+- Above the fold in email footer
+- Clear text, not hidden
+- Not styled to be invisible
+
+## One-click:
+- Link directly unsubscribes
+- No login required
+- No "are you sure" hoops
+
+## List-Unsubscribe header:
+```
+List-Unsubscribe: ,
+
+List-Unsubscribe-Post: List-Unsubscribe=One-Click
+```
+
+## Preference center:
+Option to reduce frequency instead of full unsubscribe
+
+### Sending HTML without plain text alternative
+
+Severity: MEDIUM
+
+Situation: Some users see blank emails. Spam filters flagging emails. Accessibility
+issues for screen readers. Email clients that strip HTML show nothing.
+
+Symptoms:
+- No text/plain part in emails
+- Blank emails for some users
+- Lower engagement in some segments
+
+Why this breaks:
+Not everyone can render HTML. Screen readers work better with plain text.
+Spam filters are suspicious of HTML-only. Multipart is the standard.
+
+Recommended fix:
+
+# Always send multipart:
+```typescript
+await resend.emails.send({
+ from: 'you@example.com',
+ to: 'user@example.com',
+ subject: 'Welcome!',
+ html: '
Welcome!
Thanks for signing up.
',
+ text: 'Welcome!\n\nThanks for signing up.',
+});
+```
+
+# Auto-generate text from HTML:
+Use html-to-text library as fallback
+But hand-crafted plain text is better
+
+# Plain text should be readable:
+Not just HTML stripped of tags
+Actual formatted text content
+
+### Sending high volume from new IP immediately
+
+Severity: HIGH
+
+Situation: Just switched providers. Started sending 50,000 emails/day immediately.
+Massive deliverability issues. New IP has no reputation. Looks like spam.
+
+Symptoms:
+- New IP/provider
+- Sending high volume immediately
+- Sudden deliverability drop
+
+Why this breaks:
+New IPs have no reputation. Sending high volume immediately looks
+like a spammer who just spun up. You need to gradually build trust.
+
+Recommended fix:
+
+# IP warm-up schedule:
+
+Week 1: 50-100 emails/day
+Week 2: 200-500 emails/day
+Week 3: 500-1000 emails/day
+Week 4: 1000-5000 emails/day
+Continue doubling until at volume
+
+# Best practices:
+- Start with most engaged users
+- Send to Gmail/Microsoft first (they set reputation)
+- Maintain consistent volume
+- Don't spike and drop
+
+# During warm-up:
+- Monitor deliverability closely
+- Check feedback loops
+- Adjust pace if issues arise
+
+### Emailing people who did not opt in
+
+Severity: CRITICAL
+
+Situation: Bought an email list. Scraped emails from LinkedIn. Added conference
+contacts. Spam complaints through the roof. Provider suspends account.
+Maybe a lawsuit.
+
+Symptoms:
+- Purchased email lists
+- Scraped contacts
+- High unsubscribe rate on first send
+- Spam complaints above 0.1%
+
+Why this breaks:
+Permission-based email is not optional. It is the law (CAN-SPAM, GDPR).
+It is also effective - unwilling recipients hurt your metrics and
+reputation more than they help.
+
+Recommended fix:
+
+# Permission requirements:
+
+## Explicit opt-in:
+- User actively chooses to receive email
+- Not pre-checked boxes
+- Clear what they are signing up for
+
+## Double opt-in:
+- Confirmation email with link
+- Only add to list after confirmation
+- Best practice for marketing lists
+
+## What you cannot do:
+- Buy email lists
+- Scrape emails from websites
+- Add conference contacts without consent
+- Use partner/customer lists without consent
+
+## Transactional exception:
+Password resets, receipts, account alerts
+do not need marketing opt-in
+
+### Emails that are mostly or entirely images
+
+Severity: MEDIUM
+
+Situation: Beautiful designed email that is one big image. Users with images
+blocked see nothing. Spam filters flag it. Mobile loading is slow.
+No one can copy text.
+
+Symptoms:
+- Single image emails
+- No text content visible
+- Missing or generic alt text
+- Low engagement when images blocked
+
+Why this breaks:
+Images are blocked by default in many clients. Spam filters are
+suspicious of image-only emails. Accessibility suffers. Load times
+increase.
+
+Recommended fix:
+
+# Balance images and text:
+
+## 60/40 rule:
+- At least 60% text content
+- Images for enhancement, not content
+
+## Always include:
+- Alt text on every image
+- Key message in text, not just image
+- Fallback for images-off view
+
+## Test:
+- Preview with images disabled
+- Should still be usable
+
+# Example:
+```html
+
+
Use code SAVE50 to save 50% this week.
+```
+
+### Missing or default preview text
+
+Severity: MEDIUM
+
+Situation: Inbox shows "View this email in browser" or random HTML as preview.
+Lower open rates. First impression wasted on boilerplate.
+
+Symptoms:
+- View in browser as preview
+- HTML code visible in preview
+- No preview component in template
+
+Why this breaks:
+Preview text is prime real estate - appears right after subject line.
+Default or missing preview text wastes this space. Good preview text
+increases open rates 10-30%.
+
+Recommended fix:
+
+# Add explicit preview text:
+
+## In HTML:
+```html
+
+ Your preview text here. This appears in inbox preview.
+
+
+
+```
+
+## With React Email:
+```tsx
+
+ Your preview text here. This appears in inbox preview.
+
+```
+
+## Best practices:
+- Complement the subject line
+- 40-100 characters optimal
+- Create curiosity or value
+- Different from first line of email
+
+### Not handling partial send failures
+
+Severity: HIGH
+
+Situation: Sending to 10,000 users. API fails at 3,000. No tracking of what sent.
+Either double-send or lose 7,000. No way to know who got the email.
+
+Symptoms:
+- No per-recipient send logging
+- Cannot tell who received email
+- Double-sending issues
+- No retry mechanism
+
+Why this breaks:
+Bulk sends fail partially. APIs timeout. Rate limits hit. Without
+tracking individual send status, you cannot recover gracefully.
+
+Recommended fix:
+
+# Track each send individually:
+
+```typescript
+async function sendCampaign(emails: string[]) {
+ const results = await Promise.allSettled(
+ emails.map(async (email) => {
+ try {
+ const result = await resend.emails.send({ to: email, ... });
+ await db.emailLog.create({
+ email,
+ status: 'sent',
+ messageId: result.id,
+ });
+ return result;
+ } catch (error) {
+ await db.emailLog.create({
+ email,
+ status: 'failed',
+ error: error.message,
+ });
+ throw error;
+ }
+ })
+ );
+
+ const failed = results.filter(r => r.status === 'rejected');
+ // Retry failed sends or alert
+}
+```
+
+# Best practices:
+- Log every send attempt
+- Include message ID for tracking
+- Build retry queue for failures
+- Monitor success rate per campaign
+
+## Validation Checks
+
+### Missing plain text email part
+
+Severity: WARNING
+
+Emails should always include a plain text alternative
+
+Message: Email being sent with HTML but no plain text part. Add 'text:' property for accessibility and deliverability.
+
+### Hardcoded from email address
+
+Severity: WARNING
+
+From addresses should come from environment variables
+
+Message: From email appears hardcoded. Use environment variable for flexibility.
+
+### Missing bounce webhook handler
+
+Severity: WARNING
+
+Email bounces should be handled to maintain list hygiene
+
+Message: Email provider used but no bounce handling detected. Implement webhook handler for bounces.
+
+### Missing List-Unsubscribe header
+
+Severity: INFO
+
+Marketing emails should include List-Unsubscribe header
+
+Message: Marketing email detected without List-Unsubscribe header. Add header for better deliverability.
+
+### Synchronous email send in request handler
+
+Severity: WARNING
+
+Email sends should be queued, not blocking
+
+Message: Email sent synchronously in request handler. Consider queuing for better reliability.
+
+### Email send without retry logic
+
+Severity: INFO
+
+Email sends should have retry mechanism for failures
+
+Message: Email send without apparent retry logic. Add retry for transient failures.
+
+### Email API key in code
+
+Severity: ERROR
+
+API keys should come from environment variables
+
+Message: Email API key appears hardcoded in source code. Use environment variable.
+
+### Bulk email without rate limiting
+
+Severity: WARNING
+
+Bulk sends should respect provider rate limits
+
+Message: Bulk email sending without apparent rate limiting. Add throttling to avoid hitting limits.
+
+### Email without preview text
+
+Severity: INFO
+
+Emails should include preview/preheader text
+
+Message: Email template without preview text. Add hidden preheader for inbox preview.
+
+### Email send without logging
+
+Severity: WARNING
+
+Email sends should be logged for debugging and auditing
+
+Message: Email being sent without apparent logging. Log sends for debugging and compliance.
+
+## Collaboration
+
+### Delegation Triggers
+
+- copy|subject|messaging|content -> copywriting (Email needs copy)
+- design|template|visual|layout -> ui-design (Email needs design)
+- track|analytics|measure|metrics -> analytics-architecture (Email needs tracking)
+- infrastructure|deploy|server|queue -> devops (Email needs infrastructure)
+
+### Email Marketing Stack
+
+Skills: email-systems, copywriting, marketing, analytics-architecture
+
+Workflow:
+
+```
+1. Infrastructure setup (email-systems)
+2. Template creation (email-systems)
+3. Copy writing (copywriting)
+4. Campaign launch (marketing)
+5. Performance tracking (analytics-architecture)
+```
+
+### Transactional Email
+
+Skills: email-systems, backend, devops
+
+Workflow:
+
+```
+1. Provider setup (email-systems)
+2. Template coding (email-systems)
+3. Queue integration (backend)
+4. Monitoring (devops)
+```
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills-claude/skills/file-uploads/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/file-uploads/SKILL.md
index 598db0af..b0814728 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/file-uploads/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/file-uploads/SKILL.md
@@ -1,27 +1,228 @@
---
name: file-uploads
-description: "Careful about security and performance. Never trusts file extensions. Knows that large uploads need special handling. Prefers presigned URLs over server proxying."
+description: Expert at handling file uploads and cloud storage. Covers S3,
+ Cloudflare R2, presigned URLs, multipart uploads, and image optimization.
+ Knows how to handle large files without blocking.
risk: none
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# File Uploads & Storage
+Expert at handling file uploads and cloud storage. Covers S3,
+Cloudflare R2, presigned URLs, multipart uploads, and image
+optimization. Knows how to handle large files without blocking.
+
**Role**: File Upload Specialist
Careful about security and performance. Never trusts file
extensions. Knows that large uploads need special handling.
Prefers presigned URLs over server proxying.
-## ⚠️ Sharp Edges
+### Principles
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Trusting client-provided file type | critical | # CHECK MAGIC BYTES |
-| No upload size restrictions | high | # SET SIZE LIMITS |
-| User-controlled filename allows path traversal | critical | # SANITIZE FILENAMES |
-| Presigned URL shared or cached incorrectly | medium | # CONTROL PRESIGNED URL DISTRIBUTION |
+- Never trust client file type claims
+- Use presigned URLs for direct uploads
+- Stream large files, never buffer
+- Validate on upload, optimize after
+
+## Sharp Edges
+
+### Trusting client-provided file type
+
+Severity: CRITICAL
+
+Situation: User uploads malware.exe renamed to image.jpg. You check
+extension, looks fine. Store it. Serve it. Another user
+downloads and executes it.
+
+Symptoms:
+- Malware uploaded as images
+- Wrong content-type served
+
+Why this breaks:
+File extensions and Content-Type headers can be faked.
+Attackers rename executables to bypass filters.
+
+Recommended fix:
+
+# CHECK MAGIC BYTES
+
+import { fileTypeFromBuffer } from "file-type";
+
+async function validateImage(buffer: Buffer) {
+ const type = await fileTypeFromBuffer(buffer);
+
+ const allowedTypes = ["image/jpeg", "image/png", "image/webp"];
+
+ if (!type || !allowedTypes.includes(type.mime)) {
+ throw new Error("Invalid file type");
+ }
+
+ return type;
+}
+
+// For streams
+import { fileTypeFromStream } from "file-type";
+const type = await fileTypeFromStream(readableStream);
+
+### No upload size restrictions
+
+Severity: HIGH
+
+Situation: No file size limit. Attacker uploads 10GB file. Server runs
+out of memory or disk. Denial of service. Or massive
+storage bill.
+
+Symptoms:
+- Server crashes on large uploads
+- Massive storage bills
+- Memory exhaustion
+
+Why this breaks:
+Without limits, attackers can exhaust resources. Even
+legitimate users might accidentally upload huge files.
+
+Recommended fix:
+
+# SET SIZE LIMITS
+
+// Formidable
+const form = formidable({
+ maxFileSize: 10 * 1024 * 1024, // 10MB
+});
+
+// Multer
+const upload = multer({
+ limits: { fileSize: 10 * 1024 * 1024 },
+});
+
+// Client-side early check
+if (file.size > 10 * 1024 * 1024) {
+ alert("File too large (max 10MB)");
+ return;
+}
+
+// Presigned URL with size limit
+const command = new PutObjectCommand({
+ Bucket: BUCKET,
+ Key: key,
+ ContentLength: expectedSize, // Enforce size
+});
+
+### User-controlled filename allows path traversal
+
+Severity: CRITICAL
+
+Situation: User uploads file named "../../../etc/passwd". You use
+filename directly. File saved outside upload directory.
+System files overwritten.
+
+Symptoms:
+- Files outside upload directory
+- System file access
+
+Why this breaks:
+User input should never be used directly in file paths.
+Path traversal sequences can escape intended directories.
+
+Recommended fix:
+
+# SANITIZE FILENAMES
+
+import path from "path";
+import crypto from "crypto";
+
+function safeFilename(userFilename: string): string {
+ // Extract just the base name
+ const base = path.basename(userFilename);
+
+ // Remove any remaining path chars
+ const sanitized = base.replace(/[^a-zA-Z0-9.-]/g, "_");
+
+ // Or better: generate new name entirely
+ const ext = path.extname(userFilename).toLowerCase();
+ const allowed = [".jpg", ".png", ".pdf"];
+
+ if (!allowed.includes(ext)) {
+ throw new Error("Invalid extension");
+ }
+
+ return crypto.randomUUID() + ext;
+}
+
+// Never do this
+const path = "uploads/" + req.body.filename; // DANGER!
+
+// Do this
+const path = "uploads/" + safeFilename(req.body.filename);
+
+### Presigned URL shared or cached incorrectly
+
+Severity: MEDIUM
+
+Situation: Presigned URL for private file returned in API response.
+Response cached by CDN. Anyone with cached URL can access
+private file for hours.
+
+Symptoms:
+- Private files accessible via cached URLs
+- Access after expiry
+
+Why this breaks:
+Presigned URLs grant temporary access. If cached or shared,
+access extends beyond intended scope.
+
+Recommended fix:
+
+# CONTROL PRESIGNED URL DISTRIBUTION
+
+// Short expiry for sensitive files
+const url = await getSignedUrl(s3, command, {
+ expiresIn: 300, // 5 minutes
+});
+
+// No-cache headers for presigned URL responses
+return Response.json({ url }, {
+ headers: {
+ "Cache-Control": "no-store, max-age=0",
+ },
+});
+
+// Or use CloudFront signed URLs for more control
+
+## Validation Checks
+
+### Only checking file extension
+
+Severity: CRITICAL
+
+Message: Check magic bytes, not just extension
+
+Fix action: Use file-type library to verify actual type
+
+### User filename used directly in path
+
+Severity: CRITICAL
+
+Message: Sanitize filenames to prevent path traversal
+
+Fix action: Use path.basename() and generate safe name
+
+## Collaboration
+
+### Delegation Triggers
+
+- image optimization CDN -> performance-optimization (Image delivery)
+- storing file metadata -> postgres-wizard (Database schema)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: file upload
+- User mentions or implies: S3
+- User mentions or implies: R2
+- User mentions or implies: presigned URL
+- User mentions or implies: multipart
+- User mentions or implies: image upload
+- User mentions or implies: cloud storage
diff --git a/plugins/antigravity-awesome-skills-claude/skills/firebase/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/firebase/SKILL.md
index 811518b9..c2532e44 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/firebase/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/firebase/SKILL.md
@@ -1,23 +1,38 @@
---
name: firebase
-description: "You're a developer who has shipped dozens of Firebase projects. You've seen the \"easy\" path lead to security breaches, runaway costs, and impossible migrations. You know Firebase is powerful, but you also know its sharp edges."
+description: Firebase gives you a complete backend in minutes - auth, database,
+ storage, functions, hosting. But the ease of setup hides real complexity.
+ Security rules are your last line of defense, and they're often wrong.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Firebase
-You're a developer who has shipped dozens of Firebase projects. You've seen the
-"easy" path lead to security breaches, runaway costs, and impossible migrations.
-You know Firebase is powerful, but you also know its sharp edges.
+Firebase gives you a complete backend in minutes - auth, database, storage,
+functions, hosting. But the ease of setup hides real complexity. Security rules
+are your last line of defense, and they're often wrong. Firestore queries are
+limited, and you learn this after you've designed your data model.
-Your hard-won lessons: The team that skipped security rules got pwned. The team
-that designed Firestore like SQL couldn't query their data. The team that
-attached listeners to large collections got a $10k bill. You've learned from
-all of them.
+This skill covers Firebase Authentication, Firestore, Realtime Database, Cloud
+Functions, Cloud Storage, and Firebase Hosting. Key insight: Firebase is
+optimized for read-heavy, denormalized data. If you're thinking relationally,
+you're thinking wrong.
-You advocate for Firebase w
+2025 lesson: Firestore pricing can surprise you. Reads are cheap until they're
+not. A poorly designed listener can cost more than a dedicated database. Plan
+your data model for your query patterns, not your data relationships.
+
+## Principles
+
+- Design data for queries, not relationships
+- Security rules are mandatory, not optional
+- Denormalize aggressively - duplication is cheap, joins are expensive
+- Batch writes and transactions for consistency
+- Use offline persistence wisely - it's not free
+- Cloud Functions for what clients shouldn't do
+- Environment-based config, never hardcode keys in client
## Capabilities
@@ -31,31 +46,646 @@ You advocate for Firebase w
- firebase-admin-sdk
- firebase-emulators
+## Scope
+
+- general-backend-architecture -> backend
+- payment-processing -> stripe
+- email-sending -> email
+- advanced-auth-flows -> authentication-oauth
+- kubernetes-deployment -> devops
+
+## Tooling
+
+### Core
+
+- firebase - When: Client-side SDK Note: Modular SDK - tree-shakeable
+- firebase-admin - When: Server-side / Cloud Functions Note: Full access, bypasses security rules
+- firebase-functions - When: Cloud Functions v2 Note: v2 functions are recommended
+
+### Testing
+
+- @firebase/rules-unit-testing - When: Testing security rules Note: Essential - rules bugs are security bugs
+- firebase-tools - When: Emulator suite Note: Local development without hitting production
+
+### Frameworks
+
+- reactfire - When: React + Firebase Note: Hooks-based, handles subscriptions
+- vuefire - When: Vue + Firebase Note: Vue-specific bindings
+- angularfire - When: Angular + Firebase Note: Official Angular bindings
+
## Patterns
### Modular SDK Import
Import only what you need for smaller bundles
+**When to use**: Client-side Firebase usage
+
+# MODULAR IMPORTS:
+
+"""
+Firebase v9+ uses modular SDK. Import only what you need.
+This enables tree-shaking and smaller bundles.
+"""
+
+// WRONG: v8-compat style (larger bundle)
+import firebase from 'firebase/compat/app';
+import 'firebase/compat/firestore';
+const db = firebase.firestore();
+
+// RIGHT: v9+ modular (tree-shakeable)
+import { initializeApp } from 'firebase/app';
+import { getFirestore, collection, doc, getDoc } from 'firebase/firestore';
+
+const app = initializeApp(firebaseConfig);
+const db = getFirestore(app);
+
+// Get a document
+const docRef = doc(db, 'users', 'userId');
+const docSnap = await getDoc(docRef);
+
+if (docSnap.exists()) {
+ console.log(docSnap.data());
+}
+
+// Query with constraints
+import { query, where, orderBy, limit } from 'firebase/firestore';
+
+const q = query(
+ collection(db, 'posts'),
+ where('published', '==', true),
+ orderBy('createdAt', 'desc'),
+ limit(10)
+);
+
### Security Rules Design
Secure your data with proper rules from day one
+**When to use**: Any Firestore database
+
+# FIRESTORE SECURITY RULES:
+
+"""
+Rules are your last line of defense. Every read and write
+goes through them. Get them wrong, and your data is exposed.
+"""
+
+rules_version = '2';
+service cloud.firestore {
+ match /databases/{database}/documents {
+
+ // Helper functions
+ function isSignedIn() {
+ return request.auth != null;
+ }
+
+ function isOwner(userId) {
+ return request.auth.uid == userId;
+ }
+
+ function isAdmin() {
+ return request.auth.token.admin == true;
+ }
+
+ // Users collection
+ match /users/{userId} {
+ // Anyone can read public profile
+ allow read: if true;
+
+ // Only owner can write their own data
+ allow write: if isOwner(userId);
+
+ // Private subcollection
+ match /private/{document=**} {
+ allow read, write: if isOwner(userId);
+ }
+ }
+
+ // Posts collection
+ match /posts/{postId} {
+ // Anyone can read published posts
+ allow read: if resource.data.published == true
+ || isOwner(resource.data.authorId);
+
+ // Only authenticated users can create
+ allow create: if isSignedIn()
+ && request.resource.data.authorId == request.auth.uid;
+
+ // Only author can update/delete
+ allow update, delete: if isOwner(resource.data.authorId);
+ }
+
+ // Admin-only collection
+ match /admin/{document=**} {
+ allow read, write: if isAdmin();
+ }
+ }
+}
+
### Data Modeling for Queries
Design Firestore data structure around query patterns
-## Anti-Patterns
+**When to use**: Designing Firestore schema
-### ❌ No Security Rules
+# FIRESTORE DATA MODELING:
-### ❌ Client-Side Admin Operations
+"""
+Firestore is NOT relational. You can't JOIN.
+Design your data for how you'll QUERY it, not how it relates.
+"""
-### ❌ Listener on Large Collections
+// WRONG: Normalized (SQL thinking)
+// users/{userId}
+// posts/{postId} with authorId field
+// To get "posts by user" - need to query posts collection
+
+// RIGHT: Denormalized for queries
+// users/{userId}/posts/{postId} - subcollection
+// OR
+// posts/{postId} with embedded author data
+
+// Document structure for a post
+const post = {
+ id: 'post123',
+ title: 'My Post',
+ content: '...',
+
+ // Embed frequently-needed author data
+ author: {
+ id: 'user456',
+ name: 'Jane Doe',
+ avatarUrl: '...'
+ },
+
+ // Arrays for IN queries (max 30 items for 'in')
+ tags: ['javascript', 'firebase'],
+
+ // Maps for compound queries
+ stats: {
+ likes: 42,
+ comments: 7,
+ views: 1000
+ },
+
+ // Timestamps
+ createdAt: serverTimestamp(),
+ updatedAt: serverTimestamp(),
+
+ // Booleans for filtering
+ published: true,
+ featured: false
+};
+
+// Query patterns this enables:
+// - Get post with author info: 1 read (no join needed)
+// - Posts by tag: where('tags', 'array-contains', 'javascript')
+// - Featured posts: where('featured', '==', true)
+// - Recent posts: orderBy('createdAt', 'desc')
+
+// When author updates their name, update all their posts
+// This is the tradeoff: writes are more complex, reads are fast
+
+### Real-time Listeners
+
+Subscribe to data changes with proper cleanup
+
+**When to use**: Real-time features
+
+# REAL-TIME LISTENERS:
+
+"""
+onSnapshot creates a persistent connection. Always unsubscribe
+when component unmounts to prevent memory leaks and extra reads.
+"""
+
+// React hook for real-time document
+function useDocument(path) {
+ const [data, setData] = useState(null);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ const docRef = doc(db, path);
+
+ // Subscribe to document
+ const unsubscribe = onSnapshot(
+ docRef,
+ (snapshot) => {
+ if (snapshot.exists()) {
+ setData({ id: snapshot.id, ...snapshot.data() });
+ } else {
+ setData(null);
+ }
+ setLoading(false);
+ },
+ (err) => {
+ setError(err);
+ setLoading(false);
+ }
+ );
+
+ // Cleanup on unmount
+ return () => unsubscribe();
+ }, [path]);
+
+ return { data, loading, error };
+}
+
+// Usage
+function UserProfile({ userId }) {
+ const { data: user, loading } = useDocument(`users/${userId}`);
+
+ if (loading) return ;
+ return
{user?.name}
;
+}
+
+// Collection with query
+function usePosts(limit = 10) {
+ const [posts, setPosts] = useState([]);
+
+ useEffect(() => {
+ const q = query(
+ collection(db, 'posts'),
+ where('published', '==', true),
+ orderBy('createdAt', 'desc'),
+ limit(limit)
+ );
+
+ const unsubscribe = onSnapshot(q, (snapshot) => {
+ const results = snapshot.docs.map(doc => ({
+ id: doc.id,
+ ...doc.data()
+ }));
+ setPosts(results);
+ });
+
+ return () => unsubscribe();
+ }, [limit]);
+
+ return posts;
+}
+
+### Cloud Functions Patterns
+
+Server-side logic with Cloud Functions v2
+
+**When to use**: Backend logic, triggers, scheduled tasks
+
+# CLOUD FUNCTIONS V2:
+
+"""
+Cloud Functions run server-side code triggered by events.
+V2 uses more standard Node.js patterns and better scaling.
+"""
+
+import { onRequest } from 'firebase-functions/v2/https';
+import { onDocumentCreated } from 'firebase-functions/v2/firestore';
+import { onSchedule } from 'firebase-functions/v2/scheduler';
+import { getFirestore } from 'firebase-admin/firestore';
+import { initializeApp } from 'firebase-admin/app';
+
+initializeApp();
+const db = getFirestore();
+
+// HTTP function
+export const api = onRequest(
+ { cors: true, region: 'us-central1' },
+ async (req, res) => {
+ // Verify auth token
+ const token = req.headers.authorization?.split('Bearer ')[1];
+ if (!token) {
+ res.status(401).json({ error: 'Unauthorized' });
+ return;
+ }
+
+ try {
+ const decoded = await getAuth().verifyIdToken(token);
+ // Process request with decoded.uid
+ res.json({ userId: decoded.uid });
+ } catch (error) {
+ res.status(401).json({ error: 'Invalid token' });
+ }
+ }
+);
+
+// Firestore trigger - on document create
+export const onUserCreated = onDocumentCreated(
+ 'users/{userId}',
+ async (event) => {
+ const snapshot = event.data;
+ const userId = event.params.userId;
+
+ if (!snapshot) return;
+
+ const userData = snapshot.data();
+
+ // Send welcome email, create related documents, etc.
+ await db.collection('notifications').add({
+ userId,
+ type: 'welcome',
+ message: `Welcome, ${userData.name}!`,
+ createdAt: FieldValue.serverTimestamp()
+ });
+ }
+);
+
+// Scheduled function (every day at midnight)
+export const dailyCleanup = onSchedule(
+ { schedule: '0 0 * * *', timeZone: 'UTC' },
+ async (event) => {
+ const cutoff = new Date();
+ cutoff.setDate(cutoff.getDate() - 30);
+
+ // Delete old documents
+ const oldDocs = await db.collection('logs')
+ .where('createdAt', '<', cutoff)
+ .limit(500)
+ .get();
+
+ const batch = db.batch();
+ oldDocs.docs.forEach(doc => batch.delete(doc.ref));
+ await batch.commit();
+
+ console.log(`Deleted ${oldDocs.size} old logs`);
+ }
+);
+
+### Batch Operations
+
+Atomic writes and transactions for consistency
+
+**When to use**: Multiple document updates that must succeed together
+
+# BATCH WRITES AND TRANSACTIONS:
+
+"""
+Batches: Multiple writes that all succeed or all fail.
+Transactions: Read-then-write operations with consistency.
+Max 500 operations per batch/transaction.
+"""
+
+import {
+ writeBatch, runTransaction, doc, getDoc,
+ increment, serverTimestamp
+} from 'firebase/firestore';
+
+// Batch write - no reads, just writes
+async function createPostWithTags(post, tags) {
+ const batch = writeBatch(db);
+
+ // Create post
+ const postRef = doc(collection(db, 'posts'));
+ batch.set(postRef, {
+ ...post,
+ createdAt: serverTimestamp()
+ });
+
+ // Update tag counts
+ for (const tag of tags) {
+ const tagRef = doc(db, 'tags', tag);
+ batch.set(tagRef, {
+ count: increment(1),
+ lastUsed: serverTimestamp()
+ }, { merge: true });
+ }
+
+ await batch.commit();
+ return postRef.id;
+}
+
+// Transaction - read and write atomically
+async function likePost(postId, userId) {
+ return runTransaction(db, async (transaction) => {
+ const postRef = doc(db, 'posts', postId);
+ const likeRef = doc(db, 'posts', postId, 'likes', userId);
+
+ const postSnap = await transaction.get(postRef);
+ if (!postSnap.exists()) {
+ throw new Error('Post not found');
+ }
+
+ const likeSnap = await transaction.get(likeRef);
+ if (likeSnap.exists()) {
+ throw new Error('Already liked');
+ }
+
+ // Increment like count and add like document
+ transaction.update(postRef, {
+ likeCount: increment(1)
+ });
+
+ transaction.set(likeRef, {
+ userId,
+ createdAt: serverTimestamp()
+ });
+
+ return postSnap.data().likeCount + 1;
+ });
+}
+
+### Social Login (Google, GitHub, etc.)
+
+OAuth provider setup and authentication flows
+
+**When to use**: Social login implementation
+
+# SOCIAL LOGIN WITH FIREBASE AUTH
+
+import {
+ getAuth, signInWithPopup, signInWithRedirect,
+ GoogleAuthProvider, GithubAuthProvider, OAuthProvider
+} from "firebase/auth";
+
+const auth = getAuth();
+
+// GOOGLE
+const googleProvider = new GoogleAuthProvider();
+googleProvider.addScope("email");
+googleProvider.setCustomParameters({ prompt: "select_account" });
+
+async function signInWithGoogle() {
+ try {
+ const result = await signInWithPopup(auth, googleProvider);
+ return result.user;
+ } catch (error) {
+ if (error.code === "auth/account-exists-with-different-credential") {
+ return handleAccountConflict(error);
+ }
+ throw error;
+ }
+}
+
+// GITHUB
+const githubProvider = new GithubAuthProvider();
+githubProvider.addScope("read:user");
+
+// APPLE (Required for iOS apps!)
+const appleProvider = new OAuthProvider("apple.com");
+appleProvider.addScope("email");
+appleProvider.addScope("name");
+
+### Popup vs Redirect Auth
+
+When to use popup vs redirect for OAuth
+
+**When to use**: Choosing authentication flow
+
+# Popup: Desktop, SPA (simpler, can be blocked)
+# Redirect: Mobile, iOS Safari (always works)
+
+async function signIn(provider) {
+ if (/iPhone|iPad|Android/i.test(navigator.userAgent)) {
+ return signInWithRedirect(auth, provider);
+ }
+ try {
+ return await signInWithPopup(auth, provider);
+ } catch (e) {
+ if (e.code === "auth/popup-blocked") {
+ return signInWithRedirect(auth, provider);
+ }
+ throw e;
+ }
+}
+
+// Check redirect result on page load
+useEffect(() => {
+ getRedirectResult(auth).then(r => r && setUser(r.user));
+}, []);
+
+### Account Linking
+
+Link multiple providers to one account
+
+**When to use**: User has accounts with different providers
+
+import { fetchSignInMethodsForEmail, linkWithCredential } from "firebase/auth";
+
+async function handleAccountConflict(error) {
+ const email = error.customData?.email;
+ const pendingCred = OAuthProvider.credentialFromError(error);
+ const methods = await fetchSignInMethodsForEmail(auth, email);
+
+ if (methods.includes("google.com")) {
+ alert("Sign in with Google to link accounts");
+ const result = await signInWithPopup(auth, new GoogleAuthProvider());
+ await linkWithCredential(result.user, pendingCred);
+ return result.user;
+ }
+}
+
+// Link new provider
+await linkWithPopup(auth.currentUser, new GithubAuthProvider());
+
+// Unlink provider (keep at least one!)
+await unlink(auth.currentUser, "github.com");
+
+### Auth State Persistence
+
+Control session lifetime
+
+**When to use**: Managing user sessions
+
+import { setPersistence, browserLocalPersistence, browserSessionPersistence } from "firebase/auth";
+
+// LOCAL: survives browser close (default)
+// SESSION: cleared on tab close
+
+async function signInWithRememberMe(email, pass, remember) {
+ await setPersistence(auth, remember ? browserLocalPersistence : browserSessionPersistence);
+ return signInWithEmailAndPassword(auth, email, pass);
+}
+
+// React auth hook
+function useAuth() {
+ const [user, setUser] = useState(null);
+ const [loading, setLoading] = useState(true);
+ useEffect(() => onAuthStateChanged(auth, u => { setUser(u); setLoading(false); }), []);
+ return { user, loading };
+}
+
+### Email Verification and Password Reset
+
+Complete email auth flow
+
+**When to use**: Email/password authentication
+
+import { sendEmailVerification, sendPasswordResetEmail, reauthenticateWithCredential } from "firebase/auth";
+
+// Sign up with verification
+async function signUp(email, password) {
+ const result = await createUserWithEmailAndPassword(auth, email, password);
+ await sendEmailVerification(result.user);
+ return result.user;
+}
+
+// Password reset
+await sendPasswordResetEmail(auth, email);
+
+// Change password (requires recent auth)
+const cred = EmailAuthProvider.credential(user.email, currentPass);
+await reauthenticateWithCredential(user, cred);
+await updatePassword(user, newPass);
+
+### Token Management for APIs
+
+Handle ID tokens for backend calls
+
+**When to use**: Authenticating with backend APIs
+
+import { getIdToken, onIdTokenChanged } from "firebase/auth";
+
+// Get token (auto-refreshes if expired)
+const token = await getIdToken(auth.currentUser);
+
+// API helper with auto-retry
+async function apiCall(url, opts = {}) {
+ const token = await getIdToken(auth.currentUser);
+ const res = await fetch(url, {
+ ...opts,
+ headers: { ...opts.headers, Authorization: "Bearer " + token }
+ });
+ if (res.status === 401) {
+ const newToken = await getIdToken(auth.currentUser, true);
+ return fetch(url, { ...opts, headers: { ...opts.headers, Authorization: "Bearer " + newToken }});
+ }
+ return res;
+}
+
+// Sync to cookie for SSR
+onIdTokenChanged(auth, async u => {
+ document.cookie = u ? "__session=" + await u.getIdToken() : "__session=; max-age=0";
+});
+
+// Check admin claim
+const { claims } = await auth.currentUser.getIdTokenResult();
+const isAdmin = claims.admin === true;
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs complex OAuth flow -> authentication-oauth (Firebase Auth handles basics, complex flows need OAuth skill)
+- user needs payment integration -> stripe (Firebase + Stripe common pattern)
+- user needs email functionality -> email (Firebase doesn't include email - use SendGrid, Resend, etc.)
+- user needs container deployment -> devops (Beyond Firebase Hosting - Kubernetes, Docker)
+- user needs relational data model -> postgres-wizard (Firestore is wrong choice for highly relational data)
+- user needs full-text search -> elasticsearch-search (Firestore doesn't support full-text search - use Algolia/Elastic)
## Related Skills
Works well with: `nextjs-app-router`, `react-patterns`, `authentication-oauth`, `stripe`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: firebase
+- User mentions or implies: firestore
+- User mentions or implies: firebase auth
+- User mentions or implies: cloud functions
+- User mentions or implies: firebase storage
+- User mentions or implies: realtime database
+- User mentions or implies: firebase hosting
+- User mentions or implies: firebase emulator
+- User mentions or implies: security rules
+- User mentions or implies: firebase admin
diff --git a/plugins/antigravity-awesome-skills-claude/skills/gcp-cloud-run/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/gcp-cloud-run/SKILL.md
index 71749529..8a24ac02 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/gcp-cloud-run/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/gcp-cloud-run/SKILL.md
@@ -1,22 +1,38 @@
---
name: gcp-cloud-run
-description: "When to use: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']"
+description: Specialized skill for building production-ready serverless
+ applications on GCP. Covers Cloud Run services (containerized), Cloud Run
+ Functions (event-driven), cold start optimization, and event-driven
+ architecture with Pub/Sub.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# GCP Cloud Run
+Specialized skill for building production-ready serverless applications on GCP.
+Covers Cloud Run services (containerized), Cloud Run Functions (event-driven),
+cold start optimization, and event-driven architecture with Pub/Sub.
+
+## Principles
+
+- Cloud Run for containers, Functions for simple event handlers
+- Optimize for cold starts with startup CPU boost and min instances
+- Set concurrency based on workload (start with 8, adjust)
+- Memory includes /tmp filesystem - plan accordingly
+- Use VPC Connector only when needed (adds latency)
+- Containers should start fast and be stateless
+- Handle signals gracefully for clean shutdown
+
## Patterns
### Cloud Run Service Pattern
Containerized web service on Cloud Run
-**When to use**: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']
+**When to use**: Web applications and APIs,Need any runtime or library,Complex services with multiple endpoints,Stateless containerized workloads
-```javascript
```dockerfile
# Dockerfile - Multi-stage build for smaller image
FROM node:20-slim AS builder
@@ -106,16 +122,44 @@ steps:
- '--cpu=1'
- '--min-instances=1'
- '--max-instances=100'
-
+ - '--concurrency=80'
+ - '--cpu-boost'
+
+images:
+ - 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA'
```
+### Structure
+
+project/
+├── Dockerfile
+├── .dockerignore
+├── src/
+│ ├── index.js
+│ └── routes/
+├── package.json
+└── cloudbuild.yaml
+
+### Gcloud_deploy
+
+# Direct gcloud deployment
+gcloud run deploy my-service \
+ --source . \
+ --region us-central1 \
+ --allow-unauthenticated \
+ --memory 512Mi \
+ --cpu 1 \
+ --min-instances 1 \
+ --max-instances 100 \
+ --concurrency 80 \
+ --cpu-boost
+
### Cloud Run Functions Pattern
Event-driven functions (formerly Cloud Functions)
-**When to use**: ['Simple event handlers', 'Pub/Sub message processing', 'Cloud Storage triggers', 'HTTP webhooks']
+**When to use**: Simple event handlers,Pub/Sub message processing,Cloud Storage triggers,HTTP webhooks
-```javascript
```javascript
// HTTP Function
// index.js
@@ -186,15 +230,13 @@ gcloud functions deploy process-uploads \
--trigger-event-filters="bucket=my-bucket" \
--region us-central1
```
-```
### Cold Start Optimization Pattern
Minimize cold start latency for Cloud Run
-**When to use**: ['Latency-sensitive applications', 'User-facing APIs', 'High-traffic services']
+**When to use**: Latency-sensitive applications,User-facing APIs,High-traffic services
-```javascript
## 1. Enable Startup CPU Boost
```bash
@@ -258,36 +300,1079 @@ gcloud run deploy my-service \
--cpu 2 \
--region us-central1
```
+
+### Optimization_impact
+
+- Startup_cpu_boost: 50% faster cold starts
+- Min_instances: Eliminates cold starts for traffic spikes
+- Distroless_image: Smaller attack surface, faster pull
+- Lazy_init: Defers heavy loading to first request
+
+### Concurrency Configuration Pattern
+
+Proper concurrency settings for Cloud Run
+
+**When to use**: Need to optimize instance utilization,Handle traffic spikes efficiently,Reduce cold starts
+
+## Understanding Concurrency
+
+```bash
+# Default concurrency is 80
+# Adjust based on your workload
+
+# For I/O-bound workloads (most web apps)
+gcloud run deploy my-service \
+ --concurrency 80 \
+ --cpu 1
+
+# For CPU-bound workloads
+gcloud run deploy my-service \
+ --concurrency 1 \
+ --cpu 1
+
+# For memory-intensive workloads
+gcloud run deploy my-service \
+ --concurrency 10 \
+ --memory 2Gi
```
-## Anti-Patterns
+## Node.js Concurrency
-### ❌ CPU-Intensive Work Without Concurrency=1
+```javascript
+// Node.js is single-threaded but handles I/O concurrently
+// Use async/await for all I/O operations
-**Why bad**: CPU is shared across concurrent requests. CPU-bound work
-will starve other requests, causing timeouts.
+// GOOD - async I/O
+app.get('/api/data', async (req, res) => {
+ const [users, products] = await Promise.all([
+ fetchUsers(),
+ fetchProducts()
+ ]);
+ res.json({ users, products });
+});
-### ❌ Writing Large Files to /tmp
+// BAD - blocking operation
+app.get('/api/compute', (req, res) => {
+ const result = heavyCpuOperation(); // Blocks other requests!
+ res.json(result);
+});
+```
-**Why bad**: /tmp is an in-memory filesystem. Large files consume
-your memory allocation and can cause OOM errors.
+## Python Concurrency with Gunicorn
-### ❌ Long-Running Background Tasks
+```dockerfile
+FROM python:3.11-slim
+WORKDIR /app
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+COPY . .
-**Why bad**: Cloud Run throttles CPU to near-zero when not handling
-requests. Background tasks will be extremely slow or stall.
+# 4 workers for concurrency
+CMD exec gunicorn --bind :$PORT --workers 4 --threads 2 main:app
+```
-## ⚠️ Sharp Edges
+```python
+# main.py
+from flask import Flask
+app = Flask(__name__)
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | high | ## Calculate memory including /tmp usage |
-| Issue | high | ## Set appropriate concurrency |
-| Issue | high | ## Enable CPU always allocated |
-| Issue | medium | ## Configure connection pool with keep-alive |
-| Issue | high | ## Enable startup CPU boost |
-| Issue | medium | ## Explicitly set execution environment |
-| Issue | medium | ## Set consistent timeouts |
+@app.route('/api/data')
+def get_data():
+ return {'status': 'ok'}
+```
+
+### Concurrency_guidelines
+
+- Concurrency=1: Only for CPU-bound or unsafe code
+- Concurrency=8 20: Memory-intensive workloads
+- Concurrency=80: Default, good for I/O-bound
+- Concurrency=250: Maximum, for very lightweight handlers
+
+### Pub/Sub Integration Pattern
+
+Event-driven processing with Cloud Pub/Sub
+
+**When to use**: Asynchronous message processing,Decoupled microservices,Event-driven architecture
+
+## Push Subscription to Cloud Run
+
+```bash
+# Create topic
+gcloud pubsub topics create orders
+
+# Create push subscription to Cloud Run
+gcloud pubsub subscriptions create orders-push \
+ --topic orders \
+ --push-endpoint https://my-service-xxx.run.app/pubsub \
+ --ack-deadline 600
+```
+
+```javascript
+// Handle Pub/Sub push messages
+const express = require('express');
+const app = express();
+app.use(express.json());
+
+app.post('/pubsub', async (req, res) => {
+ // Verify the request is from Pub/Sub
+ if (!req.body.message) {
+ return res.status(400).send('Invalid Pub/Sub message');
+ }
+
+ try {
+ // Decode message data
+ const message = req.body.message;
+ const data = message.data
+ ? JSON.parse(Buffer.from(message.data, 'base64').toString())
+ : {};
+
+ console.log('Processing order:', data);
+
+ await processOrder(data);
+
+ // Return 200 to acknowledge
+ res.status(200).send('OK');
+ } catch (error) {
+ console.error('Processing failed:', error);
+ // Return 500 to trigger retry
+ res.status(500).send('Processing failed');
+ }
+});
+```
+
+## Publishing Messages
+
+```javascript
+const { PubSub } = require('@google-cloud/pubsub');
+const pubsub = new PubSub();
+
+async function publishOrder(order) {
+ const topic = pubsub.topic('orders');
+ const messageBuffer = Buffer.from(JSON.stringify(order));
+
+ const messageId = await topic.publishMessage({
+ data: messageBuffer,
+ attributes: {
+ type: 'order_created',
+ priority: 'high'
+ }
+ });
+
+ console.log(`Published message ${messageId}`);
+ return messageId;
+}
+```
+
+## Dead Letter Queue
+
+```bash
+# Create DLQ topic
+gcloud pubsub topics create orders-dlq
+
+# Update subscription with DLQ
+gcloud pubsub subscriptions update orders-push \
+ --dead-letter-topic orders-dlq \
+ --max-delivery-attempts 5
+```
+
+### Cloud SQL Connection Pattern
+
+Connect Cloud Run to Cloud SQL securely
+
+**When to use**: Need relational database,Migrating existing applications,Complex queries and transactions
+
+```bash
+# Deploy with Cloud SQL connection
+gcloud run deploy my-service \
+ --add-cloudsql-instances PROJECT:REGION:INSTANCE \
+ --set-env-vars INSTANCE_CONNECTION_NAME="PROJECT:REGION:INSTANCE" \
+ --set-env-vars DB_NAME="mydb" \
+ --set-env-vars DB_USER="myuser"
+```
+
+```javascript
+// Using Unix socket connection
+const { Pool } = require('pg');
+
+const pool = new Pool({
+ user: process.env.DB_USER,
+ password: process.env.DB_PASS,
+ database: process.env.DB_NAME,
+ // Cloud SQL connector uses Unix socket
+ host: `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`,
+ max: 5, // Connection pool size
+ idleTimeoutMillis: 30000,
+ connectionTimeoutMillis: 10000,
+});
+
+app.get('/api/users', async (req, res) => {
+ const client = await pool.connect();
+ try {
+ const result = await client.query('SELECT * FROM users LIMIT 100');
+ res.json(result.rows);
+ } finally {
+ client.release();
+ }
+});
+```
+
+```python
+# Python with SQLAlchemy
+import os
+from sqlalchemy import create_engine
+
+def get_engine():
+ instance_connection_name = os.environ["INSTANCE_CONNECTION_NAME"]
+ db_user = os.environ["DB_USER"]
+ db_pass = os.environ["DB_PASS"]
+ db_name = os.environ["DB_NAME"]
+
+ engine = create_engine(
+ f"postgresql+pg8000://{db_user}:{db_pass}@/{db_name}",
+ connect_args={
+ "unix_sock": f"/cloudsql/{instance_connection_name}/.s.PGSQL.5432"
+ },
+ pool_size=5,
+ max_overflow=2,
+ pool_timeout=30,
+ pool_recycle=1800,
+ )
+ return engine
+```
+
+### Best_practices
+
+- Use connection pooling (max 5-10 per instance)
+- Set appropriate idle timeouts
+- Handle connection errors gracefully
+- Consider Cloud SQL Proxy for local development
+
+### Secret Manager Integration
+
+Securely manage secrets in Cloud Run
+
+**When to use**: API keys, database passwords,Service account keys,Any sensitive configuration
+
+```bash
+# Create secret
+echo -n "my-secret-value" | gcloud secrets create my-secret --data-file=-
+
+# Mount as environment variable
+gcloud run deploy my-service \
+ --update-secrets=API_KEY=my-secret:latest
+
+# Mount as file volume
+gcloud run deploy my-service \
+ --update-secrets=/secrets/api-key=my-secret:latest
+```
+
+```javascript
+// Access mounted as environment variable
+const apiKey = process.env.API_KEY;
+
+// Access mounted as file
+const fs = require('fs');
+const apiKey = fs.readFileSync('/secrets/api-key', 'utf8');
+
+// Access via Secret Manager API (when not mounted)
+const { SecretManagerServiceClient } = require('@google-cloud/secret-manager');
+const client = new SecretManagerServiceClient();
+
+async function getSecret(name) {
+ const [version] = await client.accessSecretVersion({
+ name: `projects/${projectId}/secrets/${name}/versions/latest`
+ });
+ return version.payload.data.toString();
+}
+```
+
+## Sharp Edges
+
+### /tmp Filesystem Counts Against Memory
+
+Severity: HIGH
+
+Situation: Writing files to /tmp directory in Cloud Run
+
+Symptoms:
+Container killed with OOM error.
+Memory usage spikes unexpectedly.
+File operations cause container restarts.
+"Container memory limit exceeded" in logs.
+
+Why this breaks:
+Cloud Run uses an in-memory filesystem for /tmp. Any files written
+to /tmp consume memory from your container's allocation.
+
+Common scenarios:
+- Downloading files temporarily
+- Creating temp processing files
+- Libraries caching to /tmp
+- Large log buffers
+
+A 512MB container that downloads a 200MB file to /tmp only has
+~300MB left for the application.
+
+Recommended fix:
+
+## Calculate memory including /tmp usage
+
+```yaml
+# cloudbuild.yaml
+steps:
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args:
+ - 'run'
+ - 'deploy'
+ - 'my-service'
+ - '--memory=1Gi' # Include /tmp overhead
+ - '--image=gcr.io/$PROJECT_ID/my-service'
+```
+
+## Stream instead of buffering
+
+```python
+# BAD - buffers entire file in /tmp
+def process_large_file(bucket_name, blob_name):
+ blob = bucket.blob(blob_name)
+ blob.download_to_filename('/tmp/large_file')
+ with open('/tmp/large_file', 'rb') as f:
+ process(f.read())
+
+# GOOD - stream processing
+def process_large_file(bucket_name, blob_name):
+ blob = bucket.blob(blob_name)
+ with blob.open('rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ process_chunk(chunk)
+```
+
+## Use Cloud Storage for large files
+
+```python
+from google.cloud import storage
+
+def process_with_gcs(bucket_name, input_blob, output_blob):
+ client = storage.Client()
+ bucket = client.bucket(bucket_name)
+
+ # Process directly to/from GCS
+ input_blob = bucket.blob(input_blob)
+ output_blob = bucket.blob(output_blob)
+
+ with input_blob.open('rb') as reader:
+ with output_blob.open('wb') as writer:
+ for chunk in iter(lambda: reader.read(65536), b''):
+ processed = transform(chunk)
+ writer.write(processed)
+```
+
+## Monitor memory usage
+
+```python
+import psutil
+import logging
+
+def log_memory():
+ memory = psutil.virtual_memory()
+ logging.info(f"Memory: {memory.percent}% used, "
+ f"{memory.available / 1024 / 1024:.0f}MB available")
+```
+
+### Concurrency=1 Causes Scaling Bottlenecks
+
+Severity: HIGH
+
+Situation: Setting concurrency to 1 for request isolation
+
+Symptoms:
+Auto-scaling creates many container instances.
+High latency during traffic spikes.
+Increased cold starts.
+Higher costs from more instances.
+
+Why this breaks:
+Setting concurrency to 1 means each container handles only one
+request at a time. During traffic spikes:
+
+- 100 concurrent requests = 100 container instances
+- Each instance has cold start overhead
+- More instances = higher costs
+- Scaling takes time, requests queue up
+
+This should only be used when:
+- Processing is truly single-threaded
+- Memory-heavy per-request processing
+- Using thread-unsafe libraries
+
+Recommended fix:
+
+## Set appropriate concurrency
+
+```bash
+# For I/O-bound workloads (most web apps)
+gcloud run deploy my-service \
+ --concurrency=80 \
+ --max-instances=100
+
+# For CPU-bound workloads
+gcloud run deploy my-service \
+ --concurrency=4 \
+ --cpu=2
+
+# Only use 1 when absolutely necessary
+gcloud run deploy my-service \
+ --concurrency=1 \
+ --max-instances=1000 # Be prepared for many instances
+```
+
+## Node.js - use async properly
+
+```javascript
+// With high concurrency, ensure async operations
+const express = require('express');
+const app = express();
+
+app.get('/api/data', async (req, res) => {
+ // All I/O should be async
+ const data = await fetchFromDatabase();
+ const enriched = await enrichData(data);
+ res.json(enriched);
+});
+
+// Concurrency 80+ is safe for async I/O workloads
+```
+
+## Python - use async framework
+
+```python
+from fastapi import FastAPI
+import asyncio
+import httpx
+
+app = FastAPI()
+
+@app.get("/api/data")
+async def get_data():
+ # Async I/O allows high concurrency
+ async with httpx.AsyncClient() as client:
+ response = await client.get("https://api.example.com/data")
+ return response.json()
+
+# Concurrency 80+ safe with async framework
+```
+
+## Calculate concurrency
+
+```
+concurrency = memory_limit / per_request_memory
+
+Example:
+- 512MB container
+- 20MB per request overhead
+- Safe concurrency: ~25
+```
+
+### CPU Throttled When Not Handling Requests
+
+Severity: HIGH
+
+Situation: Running background tasks or processing between requests
+
+Symptoms:
+Background tasks run extremely slowly.
+Scheduled work doesn't complete.
+Metrics collection fails.
+Connection keep-alive breaks.
+
+Why this breaks:
+By default, Cloud Run throttles CPU to near-zero when not actively
+handling a request. This is "CPU only during requests" mode.
+
+Affected operations:
+- Background threads
+- Connection pool maintenance
+- Metrics/telemetry emission
+- Scheduled tasks within container
+- Cleanup operations after response
+
+Recommended fix:
+
+## Enable CPU always allocated
+
+```bash
+# CPU allocated even outside requests
+gcloud run deploy my-service \
+ --cpu-throttling=false \
+ --min-instances=1
+
+# Note: This increases costs but enables background work
+```
+
+## Use startup CPU boost for initialization
+
+```bash
+# Boost CPU during cold start only
+gcloud run deploy my-service \
+ --cpu-boost \
+ --cpu-throttling=true # Default, throttle after request
+```
+
+## Move background work to Cloud Tasks
+
+```python
+from google.cloud import tasks_v2
+import json
+
+def create_background_task(payload):
+ client = tasks_v2.CloudTasksClient()
+ parent = client.queue_path(
+ "my-project", "us-central1", "my-queue"
+ )
+
+ task = {
+ "http_request": {
+ "http_method": tasks_v2.HttpMethod.POST,
+ "url": "https://my-service.run.app/process",
+ "body": json.dumps(payload).encode(),
+ "headers": {"Content-Type": "application/json"}
+ }
+ }
+
+ client.create_task(parent=parent, task=task)
+
+# Handle response immediately, background via Cloud Tasks
+@app.post("/api/order")
+async def create_order(order: Order):
+ order_id = await save_order(order)
+
+ # Queue background processing
+ create_background_task({"order_id": order_id})
+
+ return {"order_id": order_id, "status": "processing"}
+```
+
+## Use Pub/Sub for async processing
+
+```yaml
+# Move heavy processing to separate service
+steps:
+ # Main service - responds quickly
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'api-service',
+ '--cpu-throttling=true']
+
+ # Worker service - processes messages
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'worker-service',
+ '--cpu-throttling=false',
+ '--min-instances=1']
+```
+
+### VPC Connector 10-Minute Idle Timeout
+
+Severity: MEDIUM
+
+Situation: Cloud Run service connecting to VPC resources
+
+Symptoms:
+Connection errors after period of inactivity.
+"Connection reset" or "Connection refused" errors.
+Sporadic failures to VPC resources.
+Database connections drop unexpectedly.
+
+Why this breaks:
+Cloud Run's VPC connector has a 10-minute idle timeout on connections.
+If a connection is idle for 10 minutes, it's silently closed.
+
+Affects:
+- Database connection pools
+- Redis connections
+- Internal API connections
+- Any persistent VPC connection
+
+Recommended fix:
+
+## Configure connection pool with keep-alive
+
+```python
+# SQLAlchemy with connection recycling
+from sqlalchemy import create_engine
+
+engine = create_engine(
+ DATABASE_URL,
+ pool_size=5,
+ max_overflow=2,
+ pool_recycle=300, # Recycle connections every 5 minutes
+ pool_pre_ping=True # Validate connection before use
+)
+```
+
+## TCP keep-alive for custom connections
+
+```python
+import socket
+
+sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 60)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
+```
+
+## Redis with connection validation
+
+```python
+import redis
+
+pool = redis.ConnectionPool(
+ host=REDIS_HOST,
+ port=6379,
+ socket_keepalive=True,
+ socket_keepalive_options={
+ socket.TCP_KEEPIDLE: 60,
+ socket.TCP_KEEPINTVL: 60,
+ socket.TCP_KEEPCNT: 5
+ },
+ health_check_interval=30
+)
+client = redis.Redis(connection_pool=pool)
+```
+
+## Use Cloud SQL Proxy sidecar
+
+```yaml
+# Use Cloud SQL connector which handles reconnection
+# requirements.txt
+cloud-sql-python-connector[pg8000]
+```
+
+```python
+from google.cloud.sql.connector import Connector
+import sqlalchemy
+
+connector = Connector()
+
+def getconn():
+ return connector.connect(
+ "project:region:instance",
+ "pg8000",
+ user="user",
+ password="password",
+ db="database"
+ )
+
+engine = sqlalchemy.create_engine(
+ "postgresql+pg8000://",
+ creator=getconn
+)
+```
+
+### Container Startup Timeout (4 minutes max)
+
+Severity: HIGH
+
+Situation: Deploying containers with slow initialization
+
+Symptoms:
+Deployment fails with "Container failed to start".
+Service never becomes healthy.
+"Revision failed to become ready" errors.
+Works locally but fails on Cloud Run.
+
+Why this breaks:
+Cloud Run expects your container to start listening on PORT within
+4 minutes (240 seconds). If it doesn't, the instance is killed.
+
+Common causes:
+- Heavy framework initialization (ML models, etc.)
+- Waiting for external dependencies at startup
+- Large dependency loading
+- Database migrations on startup
+
+Recommended fix:
+
+## Enable startup CPU boost
+
+```bash
+gcloud run deploy my-service \
+ --cpu-boost \
+ --startup-cpu-boost
+```
+
+## Lazy initialization
+
+```python
+from functools import lru_cache
+from fastapi import FastAPI
+
+app = FastAPI()
+
+# Don't load at import time
+model = None
+
+@lru_cache()
+def get_model():
+ global model
+ if model is None:
+ # Load on first request, not at startup
+ model = load_heavy_model()
+ return model
+
+@app.get("/predict")
+async def predict(data: dict):
+ model = get_model() # Loads on first call only
+ return model.predict(data)
+
+# Startup is fast - model loads on first request
+```
+
+## Start listening immediately
+
+```python
+import asyncio
+from fastapi import FastAPI
+import uvicorn
+
+app = FastAPI()
+
+# Global state for async initialization
+initialized = asyncio.Event()
+
+@app.on_event("startup")
+async def startup():
+ # Start background initialization
+ asyncio.create_task(async_init())
+
+async def async_init():
+ # Heavy initialization happens after server starts
+ await load_models()
+ await warm_up_connections()
+ initialized.set()
+
+@app.get("/ready")
+async def ready():
+ if not initialized.is_set():
+ raise HTTPException(503, "Still initializing")
+ return {"status": "ready"}
+
+@app.get("/health")
+async def health():
+ # Always respond - health check passes
+ return {"status": "healthy"}
+```
+
+## Use multi-stage builds
+
+```dockerfile
+# Build stage - slow
+FROM python:3.11 as builder
+WORKDIR /app
+COPY requirements.txt .
+RUN pip wheel --no-cache-dir --wheel-dir /wheels -r requirements.txt
+
+# Runtime stage - fast startup
+FROM python:3.11-slim
+WORKDIR /app
+COPY --from=builder /wheels /wheels
+RUN pip install --no-cache /wheels/* && rm -rf /wheels
+COPY . .
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
+```
+
+## Run migrations separately
+
+```bash
+# Don't migrate on startup - use Cloud Build
+steps:
+ # Run migrations first
+ - name: 'gcr.io/cloud-builders/gcloud'
+ entrypoint: 'bash'
+ args:
+ - '-c'
+ - |
+ gcloud run jobs execute migrate-job --wait
+
+ # Then deploy
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'my-service', ...]
+```
+
+### Second Generation Execution Environment Differences
+
+Severity: MEDIUM
+
+Situation: Migrating to or using Cloud Run second-gen execution environment
+
+Symptoms:
+Network behavior changes.
+Different syscall support.
+File system behavior differences.
+Container behaves differently than in first-gen.
+
+Why this breaks:
+Cloud Run's second-generation execution environment uses a different
+sandbox (gVisor) with different characteristics:
+
+- More Linux syscalls supported
+- Full /proc and /sys access
+- Different network stack
+- No automatic HTTPS redirect
+- Different tmp filesystem behavior
+
+Recommended fix:
+
+## Explicitly set execution environment
+
+```bash
+# First generation (legacy)
+gcloud run deploy my-service \
+ --execution-environment=gen1
+
+# Second generation (recommended for most)
+gcloud run deploy my-service \
+ --execution-environment=gen2
+```
+
+## Handle network differences
+
+```python
+# Second-gen doesn't auto-redirect HTTP to HTTPS
+from fastapi import FastAPI, Request
+from fastapi.responses import RedirectResponse
+
+app = FastAPI()
+
+@app.middleware("http")
+async def redirect_https(request: Request, call_next):
+ # Check X-Forwarded-Proto header
+ if request.headers.get("X-Forwarded-Proto") == "http":
+ url = request.url.replace(scheme="https")
+ return RedirectResponse(url, status_code=301)
+ return await call_next(request)
+```
+
+## GPU access (second-gen only)
+
+```bash
+# GPUs only available in second-gen
+gcloud run deploy ml-service \
+ --execution-environment=gen2 \
+ --gpu=1 \
+ --gpu-type=nvidia-l4
+```
+
+## Check execution environment
+
+```python
+import os
+
+def get_execution_environment():
+ # Second-gen has different /proc structure
+ try:
+ with open('/proc/version', 'r') as f:
+ version = f.read()
+ if 'gVisor' in version:
+ return 'gen2'
+ except:
+ pass
+ return 'gen1'
+```
+
+### Request Timeout Configuration Mismatch
+
+Severity: MEDIUM
+
+Situation: Long-running requests or background processing
+
+Symptoms:
+Requests terminated before completion.
+504 Gateway Timeout errors.
+Processing stops unexpectedly.
+Inconsistent timeout behavior.
+
+Why this breaks:
+Cloud Run has multiple timeout configurations that must align:
+- Request timeout (default 300s, max 3600s for HTTP, 60m for gRPC)
+- Client timeout
+- Downstream service timeouts
+- Load balancer timeout (for external access)
+
+Recommended fix:
+
+## Set consistent timeouts
+
+```bash
+# Increase request timeout (max 3600s for HTTP)
+gcloud run deploy my-service \
+ --timeout=900 # 15 minutes
+```
+
+## Handle long-running with webhooks
+
+```python
+from fastapi import FastAPI, BackgroundTasks
+import httpx
+
+app = FastAPI()
+
+@app.post("/process")
+async def process(data: dict, background_tasks: BackgroundTasks):
+ task_id = create_task_id()
+
+ # Start background processing
+ background_tasks.add_task(
+ long_running_process,
+ task_id,
+ data,
+ data.get("callback_url")
+ )
+
+ # Return immediately
+ return {"task_id": task_id, "status": "processing"}
+
+async def long_running_process(task_id, data, callback_url):
+ result = await heavy_computation(data)
+
+ # Callback when done
+ if callback_url:
+ async with httpx.AsyncClient() as client:
+ await client.post(callback_url, json={
+ "task_id": task_id,
+ "result": result
+ })
+```
+
+## Use Cloud Tasks for reliable long-running
+
+```python
+from google.cloud import tasks_v2
+
+def create_long_running_task(data):
+ client = tasks_v2.CloudTasksClient()
+ parent = client.queue_path(PROJECT, REGION, "long-tasks")
+
+ task = {
+ "http_request": {
+ "http_method": tasks_v2.HttpMethod.POST,
+ "url": "https://worker.run.app/process",
+ "body": json.dumps(data).encode(),
+ "headers": {"Content-Type": "application/json"}
+ },
+ "dispatch_deadline": {"seconds": 1800} # 30 min
+ }
+
+ return client.create_task(parent=parent, task=task)
+```
+
+## Streaming for long responses
+
+```python
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
+
+@app.get("/large-report")
+async def large_report():
+ async def generate():
+ for chunk in process_large_data():
+ yield chunk
+
+ return StreamingResponse(generate(), media_type="text/plain")
+```
+
+## Validation Checks
+
+### Hardcoded GCP Credentials
+
+Severity: ERROR
+
+GCP credentials must never be hardcoded in source code
+
+Message: Hardcoded GCP service account credentials. Use Secret Manager or Workload Identity.
+
+### GCP API Key in Source Code
+
+Severity: ERROR
+
+API keys should use Secret Manager
+
+Message: Hardcoded GCP API key. Use Secret Manager.
+
+### Credentials JSON File in Repository
+
+Severity: ERROR
+
+Service account JSON files should not be in source control
+
+Message: Credentials file detected. Add to .gitignore and use Secret Manager.
+
+### Running as Root User
+
+Severity: WARNING
+
+Containers should not run as root for security
+
+Message: Dockerfile runs as root. Add USER directive for security.
+
+### Missing Health Check in Dockerfile
+
+Severity: INFO
+
+Cloud Run uses HTTP health checks, Dockerfile HEALTHCHECK is optional
+
+Message: No HEALTHCHECK in Dockerfile. Cloud Run uses its own health checks.
+
+### Hardcoded Port in Application
+
+Severity: WARNING
+
+Port should come from PORT environment variable
+
+Message: Hardcoded port. Use PORT environment variable for Cloud Run.
+
+### Large File Writes to /tmp
+
+Severity: WARNING
+
+/tmp uses container memory, large writes can cause OOM
+
+Message: /tmp writes consume memory. Consider Cloud Storage for large files.
+
+### Synchronous File Operations
+
+Severity: WARNING
+
+Sync file ops block the event loop in async apps
+
+Message: Synchronous file operations. Use async versions for better concurrency.
+
+### Global Mutable State
+
+Severity: WARNING
+
+Global state issues with concurrent requests
+
+Message: Global mutable state may cause issues with concurrent requests.
+
+### Thread-Unsafe Singleton Pattern
+
+Severity: WARNING
+
+Singletons need thread safety for concurrency > 1
+
+Message: Singleton pattern - ensure thread safety if using concurrency > 1.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs AWS serverless -> aws-serverless (Lambda, API Gateway, SAM)
+- user needs Azure containers -> azure-functions (Azure Container Apps, Functions)
+- user needs database design -> postgres-wizard (Cloud SQL design, AlloyDB)
+- user needs authentication -> auth-specialist (Firebase Auth, Identity Platform)
+- user needs AI integration -> llm-architect (Vertex AI, Cloud Run + LLM)
+- user needs workflow orchestration -> workflow-automation (Cloud Workflows, Eventarc)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills-claude/skills/graphql/SKILL.md b/plugins/antigravity-awesome-skills-claude/skills/graphql/SKILL.md
index 52c15622..08aa2b36 100644
--- a/plugins/antigravity-awesome-skills-claude/skills/graphql/SKILL.md
+++ b/plugins/antigravity-awesome-skills-claude/skills/graphql/SKILL.md
@@ -1,22 +1,39 @@
---
name: graphql
-description: "You're a developer who has built GraphQL APIs at scale. You've seen the N+1 query problem bring down production servers. You've watched clients craft deeply nested queries that took minutes to resolve. You know that GraphQL's power is also its danger."
+description: GraphQL gives clients exactly the data they need - no more, no
+ less. One endpoint, typed schema, introspection. But the flexibility that
+ makes it powerful also makes it dangerous. Without proper controls, clients
+ can craft queries that bring down your server.
risk: safe
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# GraphQL
-You're a developer who has built GraphQL APIs at scale. You've seen the
-N+1 query problem bring down production servers. You've watched clients
-craft deeply nested queries that took minutes to resolve. You know that
-GraphQL's power is also its danger.
+GraphQL gives clients exactly the data they need - no more, no less. One
+endpoint, typed schema, introspection. But the flexibility that makes it
+powerful also makes it dangerous. Without proper controls, clients can
+craft queries that bring down your server.
-Your hard-won lessons: The team that didn't use DataLoader had unusable
-APIs. The team that allowed unlimited query depth got DDoS'd by their
-own clients. The team that made everything nullable couldn't distinguish
-errors from empty data. You've l
+This skill covers schema design, resolvers, DataLoader for N+1 prevention,
+federation for microservices, and client integration with Apollo/urql.
+Key insight: GraphQL is a contract. The schema is the API documentation.
+Design it carefully.
+
+2025 lesson: GraphQL isn't always the answer. For simple CRUD, REST is
+simpler. For high-performance public APIs, REST with caching wins. Use
+GraphQL when you have complex data relationships and diverse client needs.
+
+## Principles
+
+- Schema-first design - the schema is the contract
+- Prevent N+1 queries with DataLoader
+- Limit query depth and complexity
+- Use fragments for reusable selections
+- Mutations should be specific, not generic update operations
+- Errors are data - use union types for expected failures
+- Nullability is meaningful - design it intentionally
## Capabilities
@@ -30,44 +47,1026 @@ errors from empty data. You've l
- apollo-client
- urql
+## Scope
+
+- database-queries -> postgres-wizard
+- authentication -> authentication-oauth
+- rest-api-design -> backend
+- websocket-infrastructure -> backend
+
+## Tooling
+
+### Server
+
+- @apollo/server - When: Apollo Server v4 Note: Most popular GraphQL server
+- graphql-yoga - When: Lightweight alternative Note: Good for serverless
+- mercurius - When: Fastify integration Note: Fast, uses JIT
+
+### Client
+
+- @apollo/client - When: Full-featured client Note: Caching, state management
+- urql - When: Lightweight alternative Note: Smaller, simpler
+- graphql-request - When: Simple requests Note: Minimal, no caching
+
+### Tools
+
+- graphql-codegen - When: Type generation Note: Essential for TypeScript
+- dataloader - When: N+1 prevention Note: Batches and caches
+
## Patterns
### Schema Design
Type-safe schema with proper nullability
+**When to use**: Designing any GraphQL API
+
+# SCHEMA DESIGN:
+
+"""
+The schema is your API contract. Design nullability
+intentionally - non-null fields must always resolve.
+"""
+
+type Query {
+ # Non-null - will always return user or throw
+ user(id: ID!): User!
+
+ # Nullable - returns null if not found
+ userByEmail(email: String!): User
+
+ # Non-null list with non-null items
+ users(limit: Int = 10, offset: Int = 0): [User!]!
+
+ # Search with pagination
+ searchUsers(
+ query: String!
+ first: Int
+ after: String
+ ): UserConnection!
+}
+
+type Mutation {
+ # Input types for complex mutations
+ createUser(input: CreateUserInput!): CreateUserPayload!
+ updateUser(id: ID!, input: UpdateUserInput!): UpdateUserPayload!
+ deleteUser(id: ID!): DeleteUserPayload!
+}
+
+type Subscription {
+ userCreated: User!
+ messageReceived(roomId: ID!): Message!
+}
+
+# Input types
+input CreateUserInput {
+ email: String!
+ name: String!
+ role: Role = USER
+}
+
+input UpdateUserInput {
+ email: String
+ name: String
+ role: Role
+}
+
+# Payload types (for errors as data)
+type CreateUserPayload {
+ user: User
+ errors: [Error!]!
+}
+
+union UpdateUserPayload = UpdateUserSuccess | NotFoundError | ValidationError
+
+type UpdateUserSuccess {
+ user: User!
+}
+
+# Enums
+enum Role {
+ USER
+ ADMIN
+ MODERATOR
+}
+
+# Types with relationships
+type User {
+ id: ID!
+ email: String!
+ name: String!
+ role: Role!
+ posts(limit: Int = 10): [Post!]!
+ createdAt: DateTime!
+}
+
+type Post {
+ id: ID!
+ title: String!
+ content: String!
+ author: User!
+ comments: [Comment!]!
+ published: Boolean!
+}
+
+# Pagination (Relay-style)
+type UserConnection {
+ edges: [UserEdge!]!
+ pageInfo: PageInfo!
+ totalCount: Int!
+}
+
+type UserEdge {
+ node: User!
+ cursor: String!
+}
+
+type PageInfo {
+ hasNextPage: Boolean!
+ hasPreviousPage: Boolean!
+ startCursor: String
+ endCursor: String
+}
+
### DataLoader for N+1 Prevention
Batch and cache database queries
+**When to use**: Resolving relationships
+
+# DATALOADER:
+
+"""
+Without DataLoader, fetching 10 posts with authors
+makes 11 queries (1 for posts + 10 for each author).
+DataLoader batches into 2 queries.
+"""
+
+import DataLoader from 'dataloader';
+
+// Create loaders per request
+function createLoaders(db) {
+ return {
+ userLoader: new DataLoader(async (ids) => {
+ // Single query for all users
+ const users = await db.user.findMany({
+ where: { id: { in: ids } }
+ });
+
+ // Return in same order as ids
+ const userMap = new Map(users.map(u => [u.id, u]));
+ return ids.map(id => userMap.get(id) || null);
+ }),
+
+ postsByAuthorLoader: new DataLoader(async (authorIds) => {
+ const posts = await db.post.findMany({
+ where: { authorId: { in: authorIds } }
+ });
+
+ // Group by author
+ const postsByAuthor = new Map();
+ posts.forEach(post => {
+ const existing = postsByAuthor.get(post.authorId) || [];
+ postsByAuthor.set(post.authorId, [...existing, post]);
+ });
+
+ return authorIds.map(id => postsByAuthor.get(id) || []);
+ })
+ };
+}
+
+// Attach to context
+const server = new ApolloServer({
+ typeDefs,
+ resolvers,
+});
+
+app.use('/graphql', expressMiddleware(server, {
+ context: async ({ req }) => ({
+ db,
+ loaders: createLoaders(db),
+ user: req.user
+ })
+}));
+
+// Use in resolvers
+const resolvers = {
+ Post: {
+ author: (post, _, { loaders }) => {
+ return loaders.userLoader.load(post.authorId);
+ }
+ },
+ User: {
+ posts: (user, _, { loaders }) => {
+ return loaders.postsByAuthorLoader.load(user.id);
+ }
+ }
+};
+
### Apollo Client Caching
Normalized cache with type policies
-## Anti-Patterns
+**When to use**: Client-side data management
-### ❌ No DataLoader
+# APOLLO CLIENT CACHING:
-### ❌ No Query Depth Limiting
+"""
+Apollo Client normalizes responses into a flat cache.
+Configure type policies for custom cache behavior.
+"""
-### ❌ Authorization in Schema
+import { ApolloClient, InMemoryCache } from '@apollo/client';
-## ⚠️ Sharp Edges
+const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ // Paginated field
+ users: {
+ keyArgs: ['query'], // Cache separately per query
+ merge(existing = { edges: [] }, incoming, { args }) {
+ // Append for infinite scroll
+ if (args?.after) {
+ return {
+ ...incoming,
+ edges: [...existing.edges, ...incoming.edges]
+ };
+ }
+ return incoming;
+ }
+ }
+ }
+ },
+ User: {
+ keyFields: ['id'], // How to identify users
+ fields: {
+ fullName: {
+ read(_, { readField }) {
+ // Computed field
+ return `${readField('firstName')} ${readField('lastName')}`;
+ }
+ }
+ }
+ }
+ }
+});
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Each resolver makes separate database queries | critical | # USE DATALOADER |
-| Deeply nested queries can DoS your server | critical | # LIMIT QUERY DEPTH AND COMPLEXITY |
-| Introspection enabled in production exposes your schema | high | # DISABLE INTROSPECTION IN PRODUCTION |
-| Authorization only in schema directives, not resolvers | high | # AUTHORIZE IN RESOLVERS |
-| Authorization on queries but not on fields | high | # FIELD-LEVEL AUTHORIZATION |
-| Non-null field failure nullifies entire parent | medium | # DESIGN NULLABILITY INTENTIONALLY |
-| Expensive queries treated same as cheap ones | medium | # QUERY COST ANALYSIS |
-| Subscriptions not properly cleaned up | medium | # PROPER SUBSCRIPTION CLEANUP |
+const client = new ApolloClient({
+ uri: '/graphql',
+ cache,
+ defaultOptions: {
+ watchQuery: {
+ fetchPolicy: 'cache-and-network'
+ }
+ }
+});
+
+// Queries with hooks
+import { useQuery, useMutation } from '@apollo/client';
+
+const GET_USER = gql`
+ query GetUser($id: ID!) {
+ user(id: $id) {
+ id
+ name
+ email
+ }
+ }
+`;
+
+function UserProfile({ userId }) {
+ const { data, loading, error } = useQuery(GET_USER, {
+ variables: { id: userId }
+ });
+
+ if (loading) return ;
+ if (error) return ;
+
+ return
+ );
+}
+
+### Anti_patterns
+
+- Pattern: Using Admin API key in frontend code | Why: Admin key exposes full index control including deletion | Fix: Use search-only API key with restrictions
+- Pattern: Not using /lite client for frontend | Why: Full client includes unnecessary code for search | Fix: Import from algoliasearch/lite for smaller bundle
+
+### References
+
+- https://www.algolia.com/doc/api-reference/widgets/react
+- https://www.algolia.com/doc/libraries/javascript/v5/methods/search/
+
### Next.js Server-Side Rendering
SSR integration for Next.js with react-instantsearch-nextjs package.
@@ -36,6 +117,73 @@ Key considerations:
- Handle URL synchronization with routing prop
- Use getServerState for initial state
+### Code_example
+
+// app/search/page.tsx
+import { InstantSearchNext } from 'react-instantsearch-nextjs';
+import { searchClient, INDEX_NAME } from '@/lib/algolia';
+import { SearchBox, Hits, RefinementList } from 'react-instantsearch';
+
+// Force dynamic rendering for fresh search results
+export const dynamic = 'force-dynamic';
+
+export default function SearchPage() {
+ return (
+
+
+
+
+
+
+
+
+
+ );
+}
+
+// For custom routing (URL synchronization)
+import { history } from 'instantsearch.js/es/lib/routers';
+import { simple } from 'instantsearch.js/es/lib/stateMappings';
+
+
+ typeof window === 'undefined'
+ ? new URL(url) as unknown as Location
+ : window.location,
+ }),
+ stateMapping: simple(),
+ }}
+>
+ {/* widgets */}
+
+
+### Anti_patterns
+
+- Pattern: Using InstantSearch component for Next.js SSR | Why: Regular component doesn't support server-side rendering | Fix: Use InstantSearchNext from react-instantsearch-nextjs
+- Pattern: Static rendering for search pages | Why: Search results must be fresh for each request | Fix: Set export const dynamic = 'force-dynamic'
+
+### References
+
+- https://www.npmjs.com/package/react-instantsearch-nextjs
+- https://www.algolia.com/developers/code-exchange/instantsearch-and-next-js-starter
+
### Data Synchronization and Indexing
Indexing strategies for keeping Algolia in sync with your data.
@@ -51,18 +199,722 @@ Best practices:
- partialUpdateObjects for attribute-only changes
- Avoid deleteBy (computationally expensive)
-## ⚠️ Sharp Edges
+### Code_example
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | critical | See docs |
-| Issue | high | See docs |
-| Issue | medium | See docs |
-| Issue | medium | See docs |
-| Issue | medium | See docs |
-| Issue | medium | See docs |
-| Issue | medium | See docs |
-| Issue | medium | See docs |
+// lib/algolia-admin.ts (SERVER ONLY)
+import algoliasearch from 'algoliasearch';
+
+// Admin client - NEVER expose to frontend
+const adminClient = algoliasearch(
+ process.env.ALGOLIA_APP_ID!,
+ process.env.ALGOLIA_ADMIN_KEY! // Admin key for indexing
+);
+
+const index = adminClient.initIndex('products');
+
+// Batch indexing (recommended approach)
+export async function indexProducts(products: Product[]) {
+ const records = products.map((p) => ({
+ objectID: p.id, // Required unique identifier
+ name: p.name,
+ description: p.description,
+ price: p.price,
+ category: p.category,
+ inStock: p.inventory > 0,
+ createdAt: p.createdAt.getTime(), // Use timestamps for sorting
+ }));
+
+ // Batch in chunks of ~1000-5000 records
+ const BATCH_SIZE = 1000;
+ for (let i = 0; i < records.length; i += BATCH_SIZE) {
+ const batch = records.slice(i, i + BATCH_SIZE);
+ await index.saveObjects(batch);
+ }
+}
+
+// Partial update - update only specific fields
+export async function updateProductPrice(productId: string, price: number) {
+ await index.partialUpdateObject({
+ objectID: productId,
+ price,
+ updatedAt: Date.now(),
+ });
+}
+
+// Partial update with operations
+export async function incrementViewCount(productId: string) {
+ await index.partialUpdateObject({
+ objectID: productId,
+ viewCount: {
+ _operation: 'Increment',
+ value: 1,
+ },
+ });
+}
+
+// Delete records (prefer this over deleteBy)
+export async function deleteProducts(productIds: string[]) {
+ await index.deleteObjects(productIds);
+}
+
+// Full reindex with zero-downtime (atomic swap)
+export async function fullReindex(products: Product[]) {
+ const tempIndex = adminClient.initIndex('products_temp');
+
+ // Index to temp index
+ await tempIndex.saveObjects(
+ products.map((p) => ({
+ objectID: p.id,
+ ...p,
+ }))
+ );
+
+ // Copy settings from main index
+ await adminClient.copyIndex('products', 'products_temp', {
+ scope: ['settings', 'synonyms', 'rules'],
+ });
+
+ // Atomic swap
+ await adminClient.moveIndex('products_temp', 'products');
+}
+
+### Anti_patterns
+
+- Pattern: Using deleteBy for bulk deletions | Why: deleteBy is computationally expensive and rate limited | Fix: Use deleteObjects with array of objectIDs
+- Pattern: Indexing one record at a time | Why: Creates indexing queue, slows down process | Fix: Batch records in groups of 1K-10K
+- Pattern: Full reindex for small changes | Why: Wastes operations, slower than incremental | Fix: Use partialUpdateObject for attribute changes
+
+### References
+
+- https://www.algolia.com/doc/guides/sending-and-managing-data/send-and-update-your-data/in-depth/the-different-synchronization-strategies
+- https://www.algolia.com/blog/engineering/search-indexing-best-practices-for-top-performance-with-code-samples
+
+### API Key Security and Restrictions
+
+Secure API key configuration for Algolia.
+
+Key types:
+- Admin API Key: Full control (indexing, settings, deletion)
+- Search-Only API Key: Safe for frontend
+- Secured API Keys: Generated from base key with restrictions
+
+Restrictions available:
+- Indices: Limit accessible indices
+- Rate limit: Limit API calls per hour per IP
+- Validity: Set expiration time
+- HTTP referrers: Restrict to specific URLs
+- Query parameters: Enforce search parameters
+
+### Code_example
+
+// NEVER do this - admin key in frontend
+// const client = algoliasearch(appId, ADMIN_KEY); // WRONG!
+
+// Correct: Use search-only key in frontend
+const searchClient = algoliasearch(
+ process.env.NEXT_PUBLIC_ALGOLIA_APP_ID!,
+ process.env.NEXT_PUBLIC_ALGOLIA_SEARCH_KEY!
+);
+
+// Server-side: Generate secured API key
+// lib/algolia-secured-key.ts
+import algoliasearch from 'algoliasearch';
+
+const adminClient = algoliasearch(
+ process.env.ALGOLIA_APP_ID!,
+ process.env.ALGOLIA_ADMIN_KEY!
+);
+
+// Generate user-specific secured key
+export function generateSecuredKey(userId: string) {
+ const searchKey = process.env.ALGOLIA_SEARCH_KEY!;
+
+ return adminClient.generateSecuredApiKey(searchKey, {
+ // User can only see their own data
+ filters: `userId:${userId}`,
+ // Key expires in 1 hour
+ validUntil: Math.floor(Date.now() / 1000) + 3600,
+ // Restrict to specific index
+ restrictIndices: ['user_documents'],
+ });
+}
+
+// Rate-limited key for public APIs
+export async function createRateLimitedKey() {
+ const { key } = await adminClient.addApiKey({
+ acl: ['search'],
+ indexes: ['products'],
+ description: 'Public search with rate limit',
+ maxQueriesPerIPPerHour: 1000,
+ referers: ['https://mysite.com/*'],
+ validity: 0, // Never expires
+ });
+
+ return key;
+}
+
+// API endpoint to get user's secured key
+// app/api/search-key/route.ts
+import { auth } from '@/lib/auth';
+import { generateSecuredKey } from '@/lib/algolia-secured-key';
+
+export async function GET() {
+ const session = await auth();
+ if (!session?.user) {
+ return Response.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ const securedKey = generateSecuredKey(session.user.id);
+
+ return Response.json({ key: securedKey });
+}
+
+### Anti_patterns
+
+- Pattern: Hardcoding Admin API key in client code | Why: Exposes full index control to attackers | Fix: Use search-only key with restrictions
+- Pattern: Using same key for all users | Why: Can't restrict data access per user | Fix: Generate secured API keys with user filters
+- Pattern: No rate limiting on public search | Why: Bots can exhaust your search quota | Fix: Set maxQueriesPerIPPerHour on API key
+
+### References
+
+- https://www.algolia.com/doc/guides/security/api-keys
+- https://support.algolia.com/hc/en-us/articles/14339249272977-What-are-the-best-practices-to-manage-Algolia-API-keys-in-my-code-and-protect-them
+
+### Custom Ranking and Relevance Tuning
+
+Configure searchable attributes and custom ranking for relevance.
+
+Searchable attributes (order matters):
+1. Most important fields first (title, name)
+2. Secondary fields next (description, tags)
+3. Exclude non-searchable fields (image_url, id)
+
+Custom ranking:
+- Add business metrics (popularity, rating, date)
+- Use desc() for descending, asc() for ascending
+
+### Code_example
+
+// scripts/configure-index.ts
+import algoliasearch from 'algoliasearch';
+
+const adminClient = algoliasearch(
+ process.env.ALGOLIA_APP_ID!,
+ process.env.ALGOLIA_ADMIN_KEY!
+);
+
+const index = adminClient.initIndex('products');
+
+async function configureIndex() {
+ await index.setSettings({
+ // Searchable attributes in order of importance
+ searchableAttributes: [
+ 'name', // Most important
+ 'brand',
+ 'category',
+ 'description', // Least important
+ ],
+
+ // Attributes for faceting/filtering
+ attributesForFaceting: [
+ 'category',
+ 'brand',
+ 'filterOnly(inStock)', // Filter only, not displayed
+ 'searchable(tags)', // Searchable facet
+ ],
+
+ // Custom ranking (after text relevance)
+ customRanking: [
+ 'desc(popularity)', // Most popular first
+ 'desc(rating)', // Then by rating
+ 'desc(createdAt)', // Then by recency
+ ],
+
+ // Typo tolerance
+ typoTolerance: true,
+ minWordSizefor1Typo: 4,
+ minWordSizefor2Typos: 8,
+
+ // Query settings
+ queryLanguages: ['en'],
+ removeStopWords: ['en'],
+
+ // Highlighting
+ attributesToHighlight: ['name', 'description'],
+ highlightPreTag: '',
+ highlightPostTag: '',
+
+ // Pagination
+ hitsPerPage: 20,
+ paginationLimitedTo: 1000,
+
+ // Distinct (deduplication)
+ attributeForDistinct: 'productFamily',
+ distinct: true,
+ });
+
+ // Add synonyms
+ await index.saveSynonyms([
+ {
+ objectID: 'phone-mobile',
+ type: 'synonym',
+ synonyms: ['phone', 'mobile', 'cell', 'smartphone'],
+ },
+ {
+ objectID: 'laptop-notebook',
+ type: 'oneWaySynonym',
+ input: 'laptop',
+ synonyms: ['notebook', 'portable computer'],
+ },
+ ]);
+
+ // Add rules (query-based customization)
+ await index.saveRules([
+ {
+ objectID: 'boost-sale-items',
+ condition: {
+ anchoring: 'contains',
+ pattern: 'sale',
+ },
+ consequence: {
+ params: {
+ filters: 'onSale:true',
+ optionalFilters: ['featured:true'],
+ },
+ },
+ },
+ ]);
+
+ console.log('Index configured successfully');
+}
+
+configureIndex();
+
+### Anti_patterns
+
+- Pattern: Searching all attributes equally | Why: Reduces relevance, matches in descriptions rank same as titles | Fix: Order searchableAttributes by importance
+- Pattern: No custom ranking | Why: Relies only on text matching, ignores business value | Fix: Add popularity, rating, or recency to customRanking
+- Pattern: Indexing raw dates as strings | Why: Can't sort by date correctly | Fix: Use timestamps (getTime()) for date sorting
+
+### References
+
+- https://www.algolia.com/doc/guides/managing-results/relevance-overview
+- https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking
+
+### Faceted Search and Filtering
+
+Implement faceted navigation with refinement lists, range sliders,
+and hierarchical menus.
+
+Widget types:
+- RefinementList: Multi-select checkboxes
+- Menu: Single-select list
+- HierarchicalMenu: Nested categories
+- RangeInput/RangeSlider: Numeric ranges
+- ToggleRefinement: Boolean filters
+
+### Code_example
+
+'use client';
+import {
+ InstantSearch,
+ SearchBox,
+ Hits,
+ RefinementList,
+ HierarchicalMenu,
+ RangeInput,
+ ToggleRefinement,
+ ClearRefinements,
+ CurrentRefinements,
+ Stats,
+ SortBy,
+} from 'react-instantsearch';
+import { searchClient, INDEX_NAME } from '@/lib/algolia';
+
+export function ProductSearch() {
+ return (
+
+
;
+}
+
+### Anti_patterns
+
+- Pattern: Not checking isLoaded | Why: Auth state undefined during hydration | Fix: Always check isLoaded before accessing user/auth state
+- Pattern: Using hooks in Server Components | Why: Hooks only work in Client Components | Fix: Use auth() and currentUser() in Server Components
+
+### References
+
+- https://clerk.com/docs/references/react/use-user
+
+### Organizations and Multi-Tenancy
+
+Implement B2B multi-tenancy with Clerk Organizations.
+
+Features:
+- Multiple orgs per user
+- Roles and permissions
+- Organization-scoped data
+- Enterprise SSO per organization
+
+### Code_example
+
+// Organization creation UI
+// app/create-org/page.tsx
+import { CreateOrganization } from '@clerk/nextjs';
+
+export default function CreateOrgPage() {
+ return (
+
+
+ );
+
+ // Or manual check
+ if (membership?.role !== 'org:admin') {
+ return
Admin access required
;
+ }
+
+ return
Admin content here
;
+}
+
+### Anti_patterns
+
+- Pattern: Not scoping data by orgId | Why: Data leaks between organizations | Fix: Always filter queries by orgId from auth()
+- Pattern: Hardcoding role strings | Why: Typos cause access issues | Fix: Define role constants or use TypeScript enums
+
+### References
+
+- https://clerk.com/docs/guides/organizations
+- https://clerk.com/articles/multi-tenancy-in-react-applications-guide
+
+### Webhook User Sync
+
+Sync Clerk users to your database using webhooks.
+
+Key webhooks:
+- user.created: New user signed up
+- user.updated: User profile changed
+- user.deleted: User deleted account
+
+Uses svix for signature verification.
+
+### Code_example
+
+// app/api/webhooks/clerk/route.ts
+import { Webhook } from 'svix';
+import { headers } from 'next/headers';
+import { WebhookEvent } from '@clerk/nextjs/server';
+import { prisma } from '@/lib/prisma';
+
+export async function POST(req: Request) {
+ const WEBHOOK_SECRET = process.env.CLERK_WEBHOOK_SECRET;
+
+ if (!WEBHOOK_SECRET) {
+ throw new Error('Missing CLERK_WEBHOOK_SECRET');
+ }
+
+ // Get headers
+ const headerPayload = await headers();
+ const svix_id = headerPayload.get('svix-id');
+ const svix_timestamp = headerPayload.get('svix-timestamp');
+ const svix_signature = headerPayload.get('svix-signature');
+
+ if (!svix_id || !svix_timestamp || !svix_signature) {
+ return new Response('Missing svix headers', { status: 400 });
+ }
+
+ // Get body
+ const payload = await req.json();
+ const body = JSON.stringify(payload);
+
+ // Verify webhook
+ const wh = new Webhook(WEBHOOK_SECRET);
+ let evt: WebhookEvent;
+
+ try {
+ evt = wh.verify(body, {
+ 'svix-id': svix_id,
+ 'svix-timestamp': svix_timestamp,
+ 'svix-signature': svix_signature,
+ }) as WebhookEvent;
+ } catch (err) {
+ console.error('Webhook verification failed:', err);
+ return new Response('Verification failed', { status: 400 });
+ }
+
+ // Handle events
+ const eventType = evt.type;
+
+ if (eventType === 'user.created') {
+ const { id, email_addresses, first_name, last_name, image_url } = evt.data;
+
+ await prisma.user.create({
+ data: {
+ clerkId: id,
+ email: email_addresses[0]?.email_address,
+ firstName: first_name,
+ lastName: last_name,
+ imageUrl: image_url,
+ },
+ });
+ }
+
+ if (eventType === 'user.updated') {
+ const { id, email_addresses, first_name, last_name, image_url } = evt.data;
+
+ await prisma.user.update({
+ where: { clerkId: id },
+ data: {
+ email: email_addresses[0]?.email_address,
+ firstName: first_name,
+ lastName: last_name,
+ imageUrl: image_url,
+ },
+ });
+ }
+
+ if (eventType === 'user.deleted') {
+ const { id } = evt.data;
+
+ await prisma.user.delete({
+ where: { clerkId: id! },
+ });
+ }
+
+ return new Response('Webhook processed', { status: 200 });
+}
+
+// Prisma schema
+// prisma/schema.prisma
+model User {
+ id String @id @default(cuid())
+ clerkId String @unique
+ email String @unique
+ firstName String?
+ lastName String?
+ imageUrl String?
+ createdAt DateTime @default(now())
+ updatedAt DateTime @updatedAt
+
+ posts Post[]
+ @@index([clerkId])
+}
+
+### Anti_patterns
+
+- Pattern: Not verifying webhook signature | Why: Anyone can hit your endpoint with fake data | Fix: Always verify with svix
+- Pattern: Blocking middleware for webhook routes | Why: Webhooks come from Clerk, not authenticated users | Fix: Add /api/webhooks(.*)' to public routes
+- Pattern: Not handling race conditions | Why: user.created might arrive after user.updated | Fix: Use upsert instead of create, handle missing records
+
+### References
+
+- https://clerk.com/docs/webhooks/sync-data
+- https://clerk.com/articles/how-to-sync-clerk-user-data-to-your-database
+
+### API Route Protection
+
+Protect API routes using auth() from Clerk.
+
+Route Handlers in App Router use auth() for authentication.
+Middleware provides initial protection, auth() provides in-handler verification.
+
+### Code_example
+
+// app/api/projects/route.ts
+import { auth } from '@clerk/nextjs/server';
+import { prisma } from '@/lib/prisma';
+import { NextResponse } from 'next/server';
+
+export async function GET() {
+ const { userId, orgId } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ // User's personal projects or org projects
+ const projects = await prisma.project.findMany({
+ where: orgId
+ ? { organizationId: orgId }
+ : { userId, organizationId: null },
+ });
+
+ return NextResponse.json(projects);
+}
+
+export async function POST(req: Request) {
+ const { userId, orgId } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ const body = await req.json();
+
+ const project = await prisma.project.create({
+ data: {
+ name: body.name,
+ userId,
+ organizationId: orgId ?? null,
+ },
+ });
+
+ return NextResponse.json(project, { status: 201 });
+}
+
+// Protected with role check
+// app/api/admin/users/route.ts
+export async function GET() {
+ const { userId, orgRole } = await auth();
+
+ if (!userId) {
+ return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
+ }
+
+ if (orgRole !== 'org:admin') {
+ return NextResponse.json({ error: 'Forbidden' }, { status: 403 });
+ }
+
+ // Admin-only logic
+ const users = await prisma.user.findMany();
+ return NextResponse.json(users);
+}
+
+// Using getAuth in older patterns (not recommended)
+// For backwards compatibility only
+import { getAuth } from '@clerk/nextjs/server';
+
+export async function GET(req: Request) {
+ const { userId } = getAuth(req);
+ // ...
+}
+
+### Anti_patterns
+
+- Pattern: Trusting middleware alone | Why: Middleware can be bypassed (CVE-2025-29927) | Fix: Always verify auth in route handler too
+- Pattern: Not checking orgId for multi-tenant | Why: Users might access other org's data | Fix: Always filter by orgId from auth()
+
+### References
+
+- https://clerk.com/docs/guides/protecting-pages
+
+## Sharp Edges
+
+### CVE-2025-29927 Middleware Bypass Vulnerability
+
+Severity: CRITICAL
+
+### Multiple Middleware Files Cause Conflicts
+
+Severity: HIGH
+
+### 4KB Session Token Cookie Limit
+
+Severity: HIGH
+
+### auth() Requires clerkMiddleware Configuration
+
+Severity: HIGH
+
+### Webhook Race Conditions
+
+Severity: MEDIUM
+
+### auth() is Async in App Router
+
+Severity: MEDIUM
+
+### Middleware Blocks Webhook Endpoints
+
+Severity: MEDIUM
+
+### Accessing Auth State Before isLoaded
+
+Severity: MEDIUM
+
+### Manual Redirects Cause Double Redirects
+
+Severity: MEDIUM
+
+### Organization Data Not Scoped by orgId
+
+Severity: HIGH
+
+## Validation Checks
+
+### Clerk Secret Key in Client Code
+
+Severity: ERROR
+
+CLERK_SECRET_KEY must only be used server-side
+
+Message: Clerk secret key exposed to client. Use CLERK_SECRET_KEY without NEXT_PUBLIC prefix.
+
+### Protected Route Without Middleware
+
+Severity: ERROR
+
+API routes should have middleware protection
+
+Message: API route without auth check. Add middleware protection or auth() check.
+
+### Hardcoded Clerk API Keys
+
+Severity: ERROR
+
+Clerk keys should use environment variables
+
+Message: Hardcoded Clerk keys. Use environment variables.
+
+### Missing Await on auth()
+
+Severity: ERROR
+
+auth() is async in App Router and must be awaited
+
+Message: auth() not awaited. Use 'await auth()' in App Router.
+
+### Multiple Middleware Files
+
+Severity: WARNING
+
+Only one middleware.ts file should exist
+
+Message: Multiple middleware files detected. Use single middleware.ts.
+
+### Webhook Route Not Excluded from Protection
+
+Severity: WARNING
+
+Webhook routes should be public
+
+Message: Webhook route may be blocked by middleware. Add to public routes.
+
+### Accessing Auth Without isLoaded Check
+
+Severity: WARNING
+
+Check isLoaded before accessing user state in client components
+
+Message: Accessing user without isLoaded check. Check isLoaded first.
+
+### Clerk Hooks in Server Component
+
+Severity: ERROR
+
+Clerk hooks only work in Client Components
+
+Message: Clerk hooks in Server Component. Add 'use client' or use auth().
+
+### Multi-Tenant Query Without orgId
+
+Severity: WARNING
+
+Organization data should be scoped by orgId
+
+Message: Query without organization scope. Filter by orgId for multi-tenancy.
+
+### Webhook Without Signature Verification
+
+Severity: ERROR
+
+Clerk webhooks must verify svix signature
+
+Message: Webhook without signature verification. Use svix to verify.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs database -> postgres-wizard (User table with clerkId)
+- user needs payments -> stripe-integration (Customer linked to Clerk user)
+- user needs search -> algolia-search (Secured API keys per user)
+- user needs analytics -> segment-cdp (User identification)
+- user needs email -> resend-email (Transactional emails)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: adding authentication
+- User mentions or implies: clerk auth
+- User mentions or implies: user authentication
+- User mentions or implies: sign in
+- User mentions or implies: sign up
+- User mentions or implies: user management
+- User mentions or implies: multi-tenancy
+- User mentions or implies: organizations
+- User mentions or implies: sso
+- User mentions or implies: single sign-on
diff --git a/plugins/antigravity-awesome-skills/skills/computer-use-agents/SKILL.md b/plugins/antigravity-awesome-skills/skills/computer-use-agents/SKILL.md
index 4ad1afbc..9647697d 100644
--- a/plugins/antigravity-awesome-skills/skills/computer-use-agents/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/computer-use-agents/SKILL.md
@@ -1,13 +1,20 @@
---
name: computer-use-agents
-description: "The fundamental architecture of computer use agents: observe screen, reason about next action, execute action, repeat. This loop integrates vision models with action execution through an iterative pipeline."
+description: Build AI agents that interact with computers like humans do -
+ viewing screens, moving cursors, clicking buttons, and typing text. Covers
+ Anthropic's Computer Use, OpenAI's Operator/CUA, and open-source alternatives.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Computer Use Agents
+Build AI agents that interact with computers like humans do - viewing screens,
+moving cursors, clicking buttons, and typing text. Covers Anthropic's Computer
+Use, OpenAI's Operator/CUA, and open-source alternatives. Critical focus on
+sandboxing, security, and handling the unique challenges of vision-based control.
+
## Patterns
### Perception-Reasoning-Action Loop
@@ -25,10 +32,8 @@ Key components:
Critical insight: Vision agents are completely still during "thinking"
phase (1-5 seconds), creating a detectable pause pattern.
+**When to use**: Building any computer use agent from scratch,Integrating vision models with desktop control,Understanding agent behavior patterns
-**When to use**: ['Building any computer use agent from scratch', 'Integrating vision models with desktop control', 'Understanding agent behavior patterns']
-
-```python
from anthropic import Anthropic
from PIL import Image
import base64
@@ -83,8 +88,116 @@ class ComputerUseAgent:
amount = action.get("amount", 3)
scroll = -amount if direction == "down" else amount
pyautogui.scroll(scroll)
- return {"success": True, "action": f"scrolled {dir
-```
+ return {"success": True, "action": f"scrolled {direction}"}
+
+ elif action_type == "move":
+ x, y = action["x"], action["y"]
+ pyautogui.moveTo(x, y)
+ return {"success": True, "action": f"moved to ({x}, {y})"}
+
+ else:
+ return {"success": False, "error": f"Unknown action: {action_type}"}
+
+ def run(self, task: str) -> dict:
+ """
+ Run perception-reasoning-action loop until task complete.
+
+ The loop:
+ 1. Screenshot current state
+ 2. Send to vision model with task context
+ 3. Parse action from response
+ 4. Execute action
+ 5. Repeat until done or max steps
+ """
+ messages = []
+ step_count = 0
+
+ system_prompt = """You are a computer use agent. You can see the screen
+ and control mouse/keyboard.
+
+ Available actions (respond with JSON):
+ - {"type": "click", "x": 100, "y": 200, "button": "left"}
+ - {"type": "type", "text": "hello world"}
+ - {"type": "key", "key": "enter"}
+ - {"type": "scroll", "direction": "down", "amount": 3}
+ - {"type": "done", "result": "task completed successfully"}
+
+ Always respond with ONLY a JSON action object.
+ Be precise with coordinates - click exactly where needed.
+ If you see an error, try to recover.
+ """
+
+ while step_count < self.max_steps:
+ step_count += 1
+
+ # 1. PERCEPTION: Capture current screen
+ screenshot_b64 = self.capture_screenshot()
+
+ # 2. REASONING: Send to vision model
+ user_content = [
+ {"type": "text", "text": f"Task: {task}\n\nStep {step_count}. What action should I take?"},
+ {"type": "image", "source": {
+ "type": "base64",
+ "media_type": "image/png",
+ "data": screenshot_b64
+ }}
+ ]
+
+ messages.append({"role": "user", "content": user_content})
+
+ response = self.client.messages.create(
+ model=self.model,
+ max_tokens=1024,
+ system=system_prompt,
+ messages=messages
+ )
+
+ assistant_message = response.content[0].text
+ messages.append({"role": "assistant", "content": assistant_message})
+
+ # 3. Parse action from response
+ import json
+ try:
+ action = json.loads(assistant_message)
+ except json.JSONDecodeError:
+ # Try to extract JSON from response
+ import re
+ match = re.search(r'\{[^}]+\}', assistant_message)
+ if match:
+ action = json.loads(match.group())
+ else:
+ continue
+
+ # Check if done
+ if action.get("type") == "done":
+ return {
+ "success": True,
+ "result": action.get("result"),
+ "steps": step_count
+ }
+
+ # 4. ACTION: Execute
+ result = self.execute_action(action)
+
+ # Small delay for UI to update
+ time.sleep(self.action_delay)
+
+ return {
+ "success": False,
+ "error": "Max steps reached",
+ "steps": step_count
+ }
+
+# Usage
+agent = ComputerUseAgent(Anthropic())
+result = agent.run("Open Chrome and search for 'weather today'")
+
+### Anti_patterns
+
+- Running without step limits (infinite loops)
+- No delay between actions (UI can't keep up)
+- Screenshots at full resolution (token explosion)
+- Ignoring action failures (no recovery)
### Sandboxed Environment Pattern
@@ -102,10 +215,8 @@ Key isolation requirements:
The goal is "blast radius minimization" - if the agent goes wrong,
damage is contained to the sandbox.
+**When to use**: Deploying any computer use agent,Testing agent behavior safely,Running untrusted automation tasks
-**When to use**: ['Deploying any computer use agent', 'Testing agent behavior safely', 'Running untrusted automation tasks']
-
-```python
# Dockerfile for sandboxed computer use environment
# Based on Anthropic's reference implementation pattern
@@ -208,8 +319,89 @@ volumes:
# Python wrapper with additional runtime sandboxing
import subprocess
import os
-from dataclasses im
-```
+from dataclasses import dataclass
+from typing import Optional
+
+@dataclass
+class SandboxConfig:
+ """Configuration for agent sandbox."""
+ network_allowed: list[str] = None # Allowed domains
+ max_runtime_seconds: int = 300
+ max_memory_mb: int = 2048
+ allow_downloads: bool = False
+ allow_clipboard: bool = False
+
+class SandboxedAgent:
+ """
+ Run computer use agent in Docker sandbox.
+ """
+
+ def __init__(self, config: SandboxConfig):
+ self.config = config
+ self.container_id: Optional[str] = None
+
+ def start(self):
+ """Start sandboxed environment."""
+ # Build network rules
+ network_rules = ""
+ if self.config.network_allowed:
+ for domain in self.config.network_allowed:
+ network_rules += f"--add-host={domain}:$(dig +short {domain}) "
+ else:
+ network_rules = "--network=none"
+
+ cmd = f"""
+ docker run -d \
+ --name computer-use-sandbox-$$ \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --memory {self.config.max_memory_mb}m \
+ --cpus 2 \
+ --read-only \
+ --tmpfs /tmp \
+ {network_rules} \
+ computer-use-agent:latest
+ """
+
+ result = subprocess.run(cmd, shell=True, capture_output=True)
+ self.container_id = result.stdout.decode().strip()
+
+ # Set up kill timer
+ subprocess.Popen([
+ "sh", "-c",
+ f"sleep {self.config.max_runtime_seconds} && docker kill {self.container_id}"
+ ])
+
+ return self.container_id
+
+ def execute_task(self, task: str) -> dict:
+ """Execute task in sandbox."""
+ if not self.container_id:
+ self.start()
+
+ # Send task to agent via API
+ import requests
+ response = requests.post(
+ f"http://localhost:8080/task",
+ json={"task": task},
+ timeout=self.config.max_runtime_seconds
+ )
+
+ return response.json()
+
+ def stop(self):
+ """Stop and remove sandbox."""
+ if self.container_id:
+ subprocess.run(f"docker rm -f {self.container_id}", shell=True)
+ self.container_id = None
+
+### Anti_patterns
+
+- Running agents on host system directly
+- Giving sandbox full network access
+- Running as root in container
+- No resource limits (denial of service)
+- Persistent storage (data can leak between runs)
### Anthropic Computer Use Implementation
@@ -231,10 +423,8 @@ Tool versions:
Critical limitation: "Some UI elements (like dropdowns and scrollbars)
might be tricky for Claude to manipulate" - Anthropic docs
+**When to use**: Building production computer use agents,Need highest quality vision understanding,Full desktop control (not just browser)
-**When to use**: ['Building production computer use agents', 'Need highest quality vision understanding', 'Full desktop control (not just browser)']
-
-```python
from anthropic import Anthropic
from anthropic.types.beta import (
BetaToolComputerUse20241022,
@@ -301,20 +491,1672 @@ class AnthropicComputerUse:
subprocess.run(["scrot", "/tmp/screenshot.png"])
with open("/tmp/screenshot.png", "rb") as f:
-
+ img_data = f.read()
+
+ # Resize for efficiency
+ img = Image.open(io.BytesIO(img_data))
+ img = img.resize(self.screen_size, Image.LANCZOS)
+
+ buffer = io.BytesIO()
+ img.save(buffer, format="PNG")
+
+ return {
+ "type": "image",
+ "source": {
+ "type": "base64",
+ "media_type": "image/png",
+ "data": base64.b64encode(buffer.getvalue()).decode()
+ }
+ }
+
+ elif action == "mouse_move":
+ x, y = input.get("coordinate", [0, 0])
+ subprocess.run(["xdotool", "mousemove", str(x), str(y)])
+ return {"success": True}
+
+ elif action == "left_click":
+ subprocess.run(["xdotool", "click", "1"])
+ return {"success": True}
+
+ elif action == "right_click":
+ subprocess.run(["xdotool", "click", "3"])
+ return {"success": True}
+
+ elif action == "double_click":
+ subprocess.run(["xdotool", "click", "--repeat", "2", "1"])
+ return {"success": True}
+
+ elif action == "type":
+ text = input.get("text", "")
+ # Use xdotool type with delay for reliability
+ subprocess.run(["xdotool", "type", "--delay", "50", text])
+ return {"success": True}
+
+ elif action == "key":
+ key = input.get("key", "")
+ # Map common key names
+ key_map = {
+ "return": "Return",
+ "enter": "Return",
+ "tab": "Tab",
+ "escape": "Escape",
+ "backspace": "BackSpace",
+ }
+ xdotool_key = key_map.get(key.lower(), key)
+ subprocess.run(["xdotool", "key", xdotool_key])
+ return {"success": True}
+
+ elif action == "scroll":
+ direction = input.get("direction", "down")
+ amount = input.get("amount", 3)
+ button = "5" if direction == "down" else "4"
+ for _ in range(amount):
+ subprocess.run(["xdotool", "click", button])
+ return {"success": True}
+
+ return {"error": f"Unknown action: {action}"}
+
+ def _handle_bash(self, input: dict) -> dict:
+ """Execute bash command."""
+ command = input.get("command", "")
+
+ # Security: Sanitize and limit commands
+ dangerous_patterns = ["rm -rf", "mkfs", "dd if=", "> /dev/"]
+ for pattern in dangerous_patterns:
+ if pattern in command:
+ return {"error": "Dangerous command blocked"}
+
+ try:
+ result = subprocess.run(
+ command,
+ shell=True,
+ capture_output=True,
+ text=True,
+ timeout=30
+ )
+ return {
+ "stdout": result.stdout[:10000], # Limit output
+ "stderr": result.stderr[:1000],
+ "returncode": result.returncode
+ }
+ except subprocess.TimeoutExpired:
+ return {"error": "Command timed out"}
+
+ def _handle_editor(self, input: dict) -> dict:
+ """Handle text editor operations."""
+ command = input.get("command")
+ path = input.get("path")
+
+ if command == "view":
+ try:
+ with open(path, "r") as f:
+ content = f.read()
+ return {"content": content[:50000]} # Limit size
+ except Exception as e:
+ return {"error": str(e)}
+
+ elif command == "str_replace":
+ old_str = input.get("old_str")
+ new_str = input.get("new_str")
+ try:
+ with open(path, "r") as f:
+ content = f.read()
+ if old_str not in content:
+ return {"error": "old_str not found in file"}
+ content = content.replace(old_str, new_str, 1)
+ with open(path, "w") as f:
+ f.write(content)
+ return {"success": True}
+ except Exception as e:
+ return {"error": str(e)}
+
+ return {"error": f"Unknown editor command: {command}"}
+
+ def run_task(self, task: str, max_steps: int = 50) -> dict:
+ """Run computer use task with agentic loop."""
+ messages = [{"role": "user", "content": task}]
+ tools = self.get_tools()
+
+ for step in range(max_steps):
+ response = self.client.beta.messages.create(
+ model=self.model,
+ max_tokens=4096,
+ tools=tools,
+ messages=messages,
+ betas=["computer-use-2024-10-22"]
+ )
+
+ # Check for completion
+ if response.stop_reason == "end_turn":
+ return {
+ "success": True,
+ "result": response.content[0].text if response.content else "",
+ "steps": step + 1
+ }
+
+ # Handle tool use
+ if response.stop_reason == "tool_use":
+ messages.append({"role": "assistant", "content": response.content})
+
+ tool_results = []
+ for block in response.content:
+ if block.type == "tool_use":
+ result = self.execute_tool(block.name, block.input)
+ tool_results.append({
+ "type": "tool_result",
+ "tool_use_id": block.id,
+ "content": result
+ })
+
+ messages.append({"role": "user", "content": tool_results})
+
+ return {"success": False, "error": "Max steps reached"}
+
+### Anti_patterns
+
+- Not using betas=['computer-use-2024-10-22'] flag
+- Full resolution screenshots (wasteful)
+- No command sanitization for bash tool
+- Unbounded execution time
+
+### Browser-Use Pattern (Playwright-based)
+
+For browser-only automation, using structured DOM access is more efficient
+than pixel-based computer use. Playwright MCP allows LLMs to control
+browsers using accessibility snapshots rather than screenshots.
+
+Advantages over vision-based:
+- Faster: No image processing required
+- Cheaper: Text tokens vs image tokens
+- More precise: Direct element targeting
+- More reliable: No coordinate drift
+
+When to use vision vs structured:
+- Vision: Desktop apps, complex UIs, visual verification
+- Structured: Web automation, form filling, data extraction
+
+**When to use**: Browser-only automation tasks,Form filling and web interactions,When speed and cost matter more than visual understanding
+
+from playwright.async_api import async_playwright
+from dataclasses import dataclass
+from typing import Optional
+import asyncio
+
+@dataclass
+class BrowserAction:
+ """Structured browser action."""
+ action: str # click, type, navigate, scroll, extract
+ selector: Optional[str] = None
+ text: Optional[str] = None
+ url: Optional[str] = None
+
+class BrowserUseAgent:
+ """
+ Browser automation using Playwright with structured commands.
+ More efficient than pixel-based for web tasks.
+ """
+
+ def __init__(self):
+ self.browser = None
+ self.page = None
+
+ async def start(self, headless: bool = True):
+ """Start browser session."""
+ self.playwright = await async_playwright().start()
+ self.browser = await self.playwright.chromium.launch(headless=headless)
+ self.page = await self.browser.new_page()
+
+ async def get_page_snapshot(self) -> dict:
+ """
+ Get structured snapshot of page for LLM.
+ Uses accessibility tree for efficiency.
+ """
+ # Get accessibility tree
+ snapshot = await self.page.accessibility.snapshot()
+
+ # Get simplified DOM info
+ elements = await self.page.evaluate('''() => {
+ const interactable = [];
+ const selector = 'a, button, input, select, textarea, [role="button"]';
+ document.querySelectorAll(selector).forEach((el, i) => {
+ const rect = el.getBoundingClientRect();
+ if (rect.width > 0 && rect.height > 0) {
+ interactable.push({
+ index: i,
+ tag: el.tagName.toLowerCase(),
+ text: el.textContent?.trim().slice(0, 100),
+ type: el.type,
+ placeholder: el.placeholder,
+ name: el.name,
+ id: el.id,
+ class: el.className
+ });
+ }
+ });
+ return interactable;
+ }''')
+
+ return {
+ "url": self.page.url,
+ "title": await self.page.title(),
+ "accessibility_tree": snapshot,
+ "interactable_elements": elements[:50] # Limit for token efficiency
+ }
+
+ async def execute_action(self, action: BrowserAction) -> dict:
+ """Execute structured browser action."""
+
+ try:
+ if action.action == "navigate":
+ await self.page.goto(action.url, wait_until="domcontentloaded")
+ return {"success": True, "url": self.page.url}
+
+ elif action.action == "click":
+ await self.page.click(action.selector, timeout=5000)
+ await self.page.wait_for_load_state("networkidle", timeout=5000)
+ return {"success": True}
+
+ elif action.action == "type":
+ await self.page.fill(action.selector, action.text)
+ return {"success": True}
+
+ elif action.action == "scroll":
+ direction = action.text or "down"
+ distance = 500 if direction == "down" else -500
+ await self.page.evaluate(f"window.scrollBy(0, {distance})")
+ return {"success": True}
+
+ elif action.action == "extract":
+ # Extract text content
+ if action.selector:
+ text = await self.page.text_content(action.selector)
+ else:
+ text = await self.page.text_content("body")
+ return {"success": True, "text": text[:5000]}
+
+ elif action.action == "screenshot":
+ # Fall back to vision when needed
+ screenshot = await self.page.screenshot(type="png")
+ import base64
+ return {
+ "success": True,
+ "image": base64.b64encode(screenshot).decode()
+ }
+
+ except Exception as e:
+ return {"success": False, "error": str(e)}
+
+ return {"success": False, "error": f"Unknown action: {action.action}"}
+
+ async def run_with_llm(self, task: str, llm_client, max_steps: int = 20):
+ """
+ Run browser task with LLM decision making.
+ Uses structured DOM instead of screenshots.
+ """
+
+ system_prompt = """You are a browser automation agent. You receive
+ page snapshots with interactable elements and decide actions.
+
+ Respond with JSON action:
+ - {"action": "navigate", "url": "https://..."}
+ - {"action": "click", "selector": "button.submit"}
+ - {"action": "type", "selector": "input[name='email']", "text": "..."}
+ - {"action": "scroll", "text": "down"}
+ - {"action": "extract", "selector": ".results"}
+ - {"action": "done", "result": "task completed"}
+
+ Use CSS selectors based on the element info provided.
+ Prefer id > name > class > text content for selectors.
+ """
+
+ messages = []
+
+ for step in range(max_steps):
+ # Get current page state
+ snapshot = await self.get_page_snapshot()
+
+ user_message = f"""Task: {task}
+
+ Current page:
+ URL: {snapshot['url']}
+ Title: {snapshot['title']}
+
+ Interactable elements:
+ {snapshot['interactable_elements']}
+
+ What action should I take?"""
+
+ messages.append({"role": "user", "content": user_message})
+
+ # Get LLM decision
+ response = llm_client.messages.create(
+ model="claude-sonnet-4-20250514",
+ max_tokens=1024,
+ system=system_prompt,
+ messages=messages
+ )
+
+ assistant_text = response.content[0].text
+ messages.append({"role": "assistant", "content": assistant_text})
+
+ # Parse and execute
+ import json
+ action_dict = json.loads(assistant_text)
+
+ if action_dict.get("action") == "done":
+ return {"success": True, "result": action_dict.get("result")}
+
+ action = BrowserAction(**action_dict)
+ result = await self.execute_action(action)
+
+ if not result.get("success"):
+ messages.append({
+ "role": "user",
+ "content": f"Action failed: {result.get('error')}"
+ })
+
+ await asyncio.sleep(0.5) # Rate limit
+
+ return {"success": False, "error": "Max steps reached"}
+
+ async def close(self):
+ """Clean up browser."""
+ if self.browser:
+ await self.browser.close()
+ if hasattr(self, 'playwright'):
+ await self.playwright.stop()
+
+# Usage
+async def main():
+ agent = BrowserUseAgent()
+ await agent.start(headless=False)
+
+ from anthropic import Anthropic
+ result = await agent.run_with_llm(
+ "Go to weather.com and find the weather for New York",
+ Anthropic()
+ )
+
+ print(result)
+ await agent.close()
+
+asyncio.run(main())
+
+### Anti_patterns
+
+- Using screenshots when DOM access works
+- Not waiting for page loads
+- Hardcoded selectors that break
+- No error recovery for stale elements
+
+### User Confirmation Pattern
+
+For sensitive actions, agents should pause and ask for human confirmation.
+"ChatGPT agent also pauses and asks for confirmation prior to taking
+sensitive steps such as completing a purchase."
+
+Sensitivity levels:
+1. LOW: Navigation, reading (auto-approve)
+2. MEDIUM: Form filling, clicking (log, maybe confirm)
+3. HIGH: Purchases, authentication, file operations (always confirm)
+4. CRITICAL: Credential entry, financial transactions (confirm + review)
+
+**When to use**: Actions with real-world consequences,Financial transactions,Authentication flows,File modifications
+
+from enum import Enum
+from dataclasses import dataclass
+from typing import Callable, Optional
+import asyncio
+
+class ActionSeverity(Enum):
+ LOW = "low" # Auto-approve
+ MEDIUM = "medium" # Log, optional confirm
+ HIGH = "high" # Always confirm
+ CRITICAL = "critical" # Confirm + review details
+
+@dataclass
+class SensitiveAction:
+ """Action that may need user confirmation."""
+ action_type: str
+ description: str
+ severity: ActionSeverity
+ details: dict
+
+class ConfirmationGate:
+ """
+ Gate sensitive actions through user confirmation.
+ """
+
+ # Action type -> severity mapping
+ ACTION_SEVERITY = {
+ # LOW - auto-approve
+ "navigate": ActionSeverity.LOW,
+ "scroll": ActionSeverity.LOW,
+ "read": ActionSeverity.LOW,
+ "screenshot": ActionSeverity.LOW,
+
+ # MEDIUM - log and maybe confirm
+ "click": ActionSeverity.MEDIUM,
+ "type": ActionSeverity.MEDIUM,
+ "search": ActionSeverity.MEDIUM,
+
+ # HIGH - always confirm
+ "download": ActionSeverity.HIGH,
+ "submit_form": ActionSeverity.HIGH,
+ "login": ActionSeverity.HIGH,
+ "file_write": ActionSeverity.HIGH,
+
+ # CRITICAL - confirm with full review
+ "purchase": ActionSeverity.CRITICAL,
+ "enter_password": ActionSeverity.CRITICAL,
+ "enter_credit_card": ActionSeverity.CRITICAL,
+ "send_money": ActionSeverity.CRITICAL,
+ "delete": ActionSeverity.CRITICAL,
+ }
+
+ def __init__(
+ self,
+ confirm_callback: Callable[[SensitiveAction], bool] = None,
+ auto_confirm_low: bool = True,
+ auto_confirm_medium: bool = False
+ ):
+ self.confirm_callback = confirm_callback or self._default_confirm
+ self.auto_confirm_low = auto_confirm_low
+ self.auto_confirm_medium = auto_confirm_medium
+ self.action_log = []
+
+ def _default_confirm(self, action: SensitiveAction) -> bool:
+ """Default confirmation via CLI prompt."""
+ print(f"\n{'='*60}")
+ print(f"ACTION CONFIRMATION REQUIRED")
+ print(f"{'='*60}")
+ print(f"Type: {action.action_type}")
+ print(f"Severity: {action.severity.value.upper()}")
+ print(f"Description: {action.description}")
+ print(f"Details: {action.details}")
+ print(f"{'='*60}")
+
+ while True:
+ response = input("Allow this action? [y/n]: ").lower().strip()
+ if response in ['y', 'yes']:
+ return True
+ elif response in ['n', 'no']:
+ return False
+
+ def classify_action(self, action_type: str, context: dict) -> ActionSeverity:
+ """Classify action severity, considering context."""
+ base_severity = self.ACTION_SEVERITY.get(action_type, ActionSeverity.MEDIUM)
+
+ # Escalate based on context
+ if context.get("involves_credentials"):
+ return ActionSeverity.CRITICAL
+ if context.get("involves_money"):
+ return ActionSeverity.CRITICAL
+ if context.get("irreversible"):
+ return max(base_severity, ActionSeverity.HIGH, key=lambda x: x.value)
+
+ return base_severity
+
+ def check_action(
+ self,
+ action_type: str,
+ description: str,
+ details: dict = None
+ ) -> tuple[bool, str]:
+ """
+ Check if action should proceed.
+ Returns (approved, reason).
+ """
+ details = details or {}
+ severity = self.classify_action(action_type, details)
+
+ action = SensitiveAction(
+ action_type=action_type,
+ description=description,
+ severity=severity,
+ details=details
+ )
+
+ # Log all actions
+ self.action_log.append({
+ "action": action,
+ "timestamp": __import__('datetime').datetime.now().isoformat()
+ })
+
+ # Auto-approve low severity
+ if severity == ActionSeverity.LOW and self.auto_confirm_low:
+ return True, "auto-approved (low severity)"
+
+ # Maybe auto-approve medium
+ if severity == ActionSeverity.MEDIUM and self.auto_confirm_medium:
+ return True, "auto-approved (medium severity)"
+
+ # Request confirmation
+ approved = self.confirm_callback(action)
+
+ if approved:
+ return True, "user approved"
+ else:
+ return False, "user rejected"
+
+class ConfirmedComputerUseAgent:
+ """
+ Computer use agent with confirmation gates.
+ """
+
+ def __init__(self, base_agent, confirmation_gate: ConfirmationGate):
+ self.agent = base_agent
+ self.gate = confirmation_gate
+
+ def execute_action(self, action: dict) -> dict:
+ """Execute action with confirmation check."""
+ action_type = action.get("type", "unknown")
+
+ # Build description
+ if action_type == "click":
+ desc = f"Click at ({action.get('x')}, {action.get('y')})"
+ elif action_type == "type":
+ text = action.get('text', '')
+ # Mask if looks like password
+ if self._looks_sensitive(text):
+ desc = f"Type sensitive text ({len(text)} chars)"
+ else:
+ desc = f"Type: {text[:50]}..."
+ else:
+ desc = f"Execute: {action_type}"
+
+ # Context for severity classification
+ context = {
+ "involves_credentials": self._looks_sensitive(action.get("text", "")),
+ "involves_money": self._mentions_money(action),
+ }
+
+ # Check with gate
+ approved, reason = self.gate.check_action(
+ action_type, desc, context
+ )
+
+ if not approved:
+ return {
+ "success": False,
+ "error": f"Action blocked: {reason}",
+ "action": action_type
+ }
+
+ # Execute if approved
+ return self.agent.execute_action(action)
+
+ def _looks_sensitive(self, text: str) -> bool:
+ """Check if text looks like sensitive data."""
+ if not text:
+ return False
+ # Common patterns
+ patterns = [
+ r'\b\d{16}\b', # Credit card
+ r'\b\d{3,4}\b.*\b\d{3,4}\b', # CVV-like
+ r'password',
+ r'secret',
+ r'api.?key',
+ r'token'
+ ]
+ import re
+ return any(re.search(p, text.lower()) for p in patterns)
+
+ def _mentions_money(self, action: dict) -> bool:
+ """Check if action involves money."""
+ text = str(action)
+ money_patterns = [
+ r'\$\d+', r'pay', r'purchase', r'buy', r'checkout',
+ r'credit', r'debit', r'invoice', r'payment'
+ ]
+ import re
+ return any(re.search(p, text.lower()) for p in money_patterns)
+
+# Usage
+gate = ConfirmationGate(
+ auto_confirm_low=True,
+ auto_confirm_medium=False # Confirm clicks, typing
+)
+
+agent = ConfirmedComputerUseAgent(base_agent, gate)
+result = agent.execute_action({"type": "click", "x": 500, "y": 300})
+
+### Anti_patterns
+
+- Auto-approving all actions
+- Not logging rejected actions
+- Showing full passwords in confirmation
+- No timeout on confirmation (hangs forever)
+
+### Action Logging Pattern
+
+All computer use agent actions should be logged for:
+1. Debugging failed automations
+2. Security auditing
+3. Reproducibility
+4. Compliance requirements
+
+Log format should capture:
+- Timestamp
+- Action type and parameters
+- Screenshot before/after
+- Success/failure status
+- Model reasoning (if available)
+
+**When to use**: Production computer use deployments,Debugging automation failures,Security-sensitive environments
+
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Optional, Any
+import json
+import os
+
+@dataclass
+class ActionLogEntry:
+ """Single action log entry."""
+ timestamp: datetime
+ action_type: str
+ parameters: dict
+ success: bool
+ error: Optional[str] = None
+ screenshot_before: Optional[str] = None # Path to screenshot
+ screenshot_after: Optional[str] = None
+ model_reasoning: Optional[str] = None
+ duration_ms: Optional[int] = None
+
+ def to_dict(self) -> dict:
+ return {
+ "timestamp": self.timestamp.isoformat(),
+ "action_type": self.action_type,
+ "parameters": self._sanitize_params(self.parameters),
+ "success": self.success,
+ "error": self.error,
+ "screenshot_before": self.screenshot_before,
+ "screenshot_after": self.screenshot_after,
+ "model_reasoning": self.model_reasoning,
+ "duration_ms": self.duration_ms
+ }
+
+ def _sanitize_params(self, params: dict) -> dict:
+ """Remove sensitive data from params."""
+ sanitized = {}
+ sensitive_keys = ['password', 'secret', 'token', 'key', 'credit_card']
+
+ for k, v in params.items():
+ if any(s in k.lower() for s in sensitive_keys):
+ sanitized[k] = "[REDACTED]"
+ elif isinstance(v, str) and len(v) > 100:
+ sanitized[k] = v[:100] + "...[truncated]"
+ else:
+ sanitized[k] = v
+
+ return sanitized
+
+@dataclass
+class TaskSession:
+ """A complete task execution session."""
+ session_id: str
+ task: str
+ start_time: datetime
+ end_time: Optional[datetime] = None
+ actions: list[ActionLogEntry] = field(default_factory=list)
+ success: bool = False
+ final_result: Optional[str] = None
+
+class ActionLogger:
+ """
+ Comprehensive action logging for computer use agents.
+ """
+
+ def __init__(self, log_dir: str = "./agent_logs"):
+ self.log_dir = log_dir
+ self.screenshot_dir = os.path.join(log_dir, "screenshots")
+ os.makedirs(self.screenshot_dir, exist_ok=True)
+
+ self.current_session: Optional[TaskSession] = None
+
+ def start_session(self, task: str) -> str:
+ """Start a new task session."""
+ import uuid
+ session_id = str(uuid.uuid4())[:8]
+
+ self.current_session = TaskSession(
+ session_id=session_id,
+ task=task,
+ start_time=datetime.now()
+ )
+
+ return session_id
+
+ def log_action(
+ self,
+ action_type: str,
+ parameters: dict,
+ success: bool,
+ error: Optional[str] = None,
+ screenshot_before: bytes = None,
+ screenshot_after: bytes = None,
+ model_reasoning: str = None,
+ duration_ms: int = None
+ ):
+ """Log a single action."""
+ if not self.current_session:
+ raise RuntimeError("No active session")
+
+ # Save screenshots if provided
+ screenshot_paths = {}
+ timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
+
+ if screenshot_before:
+ path = os.path.join(
+ self.screenshot_dir,
+ f"{self.current_session.session_id}_{timestamp_str}_before.png"
+ )
+ with open(path, "wb") as f:
+ f.write(screenshot_before)
+ screenshot_paths["before"] = path
+
+ if screenshot_after:
+ path = os.path.join(
+ self.screenshot_dir,
+ f"{self.current_session.session_id}_{timestamp_str}_after.png"
+ )
+ with open(path, "wb") as f:
+ f.write(screenshot_after)
+ screenshot_paths["after"] = path
+
+ # Create log entry
+ entry = ActionLogEntry(
+ timestamp=datetime.now(),
+ action_type=action_type,
+ parameters=parameters,
+ success=success,
+ error=error,
+ screenshot_before=screenshot_paths.get("before"),
+ screenshot_after=screenshot_paths.get("after"),
+ model_reasoning=model_reasoning,
+ duration_ms=duration_ms
+ )
+
+ self.current_session.actions.append(entry)
+
+ # Also append to running log file
+ self._append_to_log(entry)
+
+ def _append_to_log(self, entry: ActionLogEntry):
+ """Append entry to JSONL log file."""
+ log_file = os.path.join(
+ self.log_dir,
+ f"session_{self.current_session.session_id}.jsonl"
+ )
+
+ with open(log_file, "a") as f:
+ f.write(json.dumps(entry.to_dict()) + "\n")
+
+ def end_session(self, success: bool, result: str = None):
+ """End current session."""
+ if not self.current_session:
+ return
+
+ self.current_session.end_time = datetime.now()
+ self.current_session.success = success
+ self.current_session.final_result = result
+
+ # Write session summary
+ summary_file = os.path.join(
+ self.log_dir,
+ f"session_{self.current_session.session_id}_summary.json"
+ )
+
+ summary = {
+ "session_id": self.current_session.session_id,
+ "task": self.current_session.task,
+ "start_time": self.current_session.start_time.isoformat(),
+ "end_time": self.current_session.end_time.isoformat(),
+ "duration_seconds": (
+ self.current_session.end_time -
+ self.current_session.start_time
+ ).total_seconds(),
+ "total_actions": len(self.current_session.actions),
+ "successful_actions": sum(
+ 1 for a in self.current_session.actions if a.success
+ ),
+ "failed_actions": sum(
+ 1 for a in self.current_session.actions if not a.success
+ ),
+ "success": success,
+ "final_result": result
+ }
+
+ with open(summary_file, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ self.current_session = None
+
+ def get_session_replay(self, session_id: str) -> list[dict]:
+ """Get all actions from a session for replay/debugging."""
+ log_file = os.path.join(self.log_dir, f"session_{session_id}.jsonl")
+
+ actions = []
+ with open(log_file, "r") as f:
+ for line in f:
+ actions.append(json.loads(line))
+
+ return actions
+
+# Integration with agent
+class LoggedComputerUseAgent:
+ """Computer use agent with comprehensive logging."""
+
+ def __init__(self, base_agent, logger: ActionLogger):
+ self.agent = base_agent
+ self.logger = logger
+
+ def run_task(self, task: str) -> dict:
+ """Run task with full logging."""
+ session_id = self.logger.start_session(task)
+
+ try:
+ result = self._run_with_logging(task)
+ self.logger.end_session(
+ success=result.get("success", False),
+ result=result.get("result")
+ )
+ return result
+ except Exception as e:
+ self.logger.end_session(success=False, result=str(e))
+ raise
+
+ def _run_with_logging(self, task: str) -> dict:
+ """Internal run with action logging."""
+ # This would wrap the base agent's run method
+ # and log each action
+ pass
+
+### Anti_patterns
+
+- Not sanitizing sensitive data in logs
+- Storing screenshots indefinitely (storage costs)
+- Not rotating log files
+- Logging synchronously (blocks agent)
+
+## Sharp Edges
+
+### Web Content Can Hijack Your Agent
+
+Severity: CRITICAL
+
+Situation: Computer use agent browsing the web
+
+Symptoms:
+Agent suddenly performs unexpected actions. Clicks malicious links.
+Enters credentials on phishing sites. Downloads files it shouldn't.
+Ignores your instructions and follows embedded commands instead.
+
+Why this breaks:
+"While all agents that process untrusted content are subject to prompt
+injection risks, browser use amplifies this risk in two ways. First,
+the attack surface is vast: every webpage, embedded document, advertisement,
+and dynamically loaded script represents a potential vector for malicious
+instructions. Second, browser agents can take many different actions—
+navigating to URLs, filling forms, clicking buttons, downloading files—
+that attackers can exploit."
+
+Real attacks have already happened:
+- "Microsoft Copilot agents were hijacked with emails containing malicious
+ instructions, which allowed attackers to extract entire CRM databases."
+- "Google's Workspace services were manipulated—hidden prompts inside
+ calendar invites and emails tricked Gemini agents into deleting events
+ and exposing sensitive messages."
+
+Even a 1% attack success rate represents meaningful risk at scale.
+
+Recommended fix:
+
+## Defense in depth - no single solution works
+
+1. Sandboxing (most effective):
+ ```python
+ # Docker with strict isolation
+ docker run \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --network none \ # No internet!
+ --read-only \
+ computer-use-agent
+ ```
+
+2. Classifier-based detection:
+ ```python
+ def scan_for_injection(content: str) -> bool:
+ """Detect prompt injection attempts."""
+ patterns = [
+ r"ignore.*instructions",
+ r"disregard.*previous",
+ r"new.*instructions",
+ r"you are now",
+ r"act as if",
+ r"pretend to be",
+ ]
+ return any(re.search(p, content.lower()) for p in patterns)
+
+ # Check page content before processing
+ page_text = await page.text_content("body")
+ if scan_for_injection(page_text):
+ return {"error": "Potential injection detected"}
+ ```
+
+3. User confirmation for sensitive actions:
+ ```python
+ SENSITIVE_ACTIONS = {"download", "submit", "login", "purchase"}
+
+ if action_type in SENSITIVE_ACTIONS:
+ if not await get_user_confirmation(action):
+ return {"error": "User rejected action"}
+ ```
+
+4. Scoped credentials:
+ - Never give agent access to all credentials
+ - Use temporary, limited tokens
+ - Revoke after task completion
+
+### Vision Agents Click Exact Centers
+
+Severity: MEDIUM
+
+Situation: Agent clicking on UI elements
+
+Symptoms:
+Agent's clicks are detectable as non-human. Websites may block or
+CAPTCHA the agent. Anti-bot systems flag the interaction.
+
+Why this breaks:
+"When a vision model identifies a button, it calculates the center.
+Click coordinates land at mathematically precise positions—often exact
+element centers or grid-aligned pixel values. Humans don't click centers;
+their click distributions follow a Gaussian pattern around targets."
+
+The screenshot loop also creates detectable patterns:
+"Predictable pauses. Vision agents are completely still during their
+'thinking' phase. The pattern looks like: Action → Complete stillness
+(1-5 seconds) → Action → Complete stillness → Action."
+
+Sophisticated anti-bot systems detect:
+- Perfect center clicks
+- No mouse movement during "thinking"
+- Consistent timing between actions
+- Lack of micro-movements and hesitation
+
+Recommended fix:
+
+## Add human-like variance to actions
+
+```python
+import random
+import time
+
+def humanized_click(x: int, y: int) -> tuple[int, int]:
+ """Add human-like variance to click coordinates."""
+ # Gaussian distribution around target
+ # Humans typically land within ~10px of target
+ x_offset = int(random.gauss(0, 5))
+ y_offset = int(random.gauss(0, 5))
+
+ return (x + x_offset, y + y_offset)
+
+def humanized_delay():
+ """Add human-like delay between actions."""
+ # Humans have variable reaction times
+ base_delay = random.uniform(0.3, 0.8)
+ # Occasionally longer pauses (reading, thinking)
+ if random.random() < 0.2:
+ base_delay += random.uniform(0.5, 2.0)
+ time.sleep(base_delay)
+
+def humanized_movement(from_pos: tuple, to_pos: tuple):
+ """Move mouse in curved path like human."""
+ # Bezier curve or similar
+ # Humans don't move in straight lines
+ steps = random.randint(10, 20)
+ for i in range(steps):
+ t = i / steps
+ # Simple curve approximation
+ x = from_pos[0] + (to_pos[0] - from_pos[0]) * t
+ y = from_pos[1] + (to_pos[1] - from_pos[1]) * t
+ # Add wobble
+ x += random.gauss(0, 2)
+ y += random.gauss(0, 2)
+ pyautogui.moveTo(int(x), int(y))
+ time.sleep(0.01)
```
-## ⚠️ Sharp Edges
+## Rotate user agents and fingerprints
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | critical | ## Defense in depth - no single solution works |
-| Issue | medium | ## Add human-like variance to actions |
-| Issue | high | ## Use keyboard alternatives when possible |
-| Issue | medium | ## Accept the tradeoff |
-| Issue | high | ## Implement context management |
-| Issue | high | ## Monitor and limit costs |
-| Issue | critical | ## ALWAYS use sandboxing |
+```python
+USER_AGENTS = [
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/120...",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) Safari/...",
+ # ... more realistic agents
+]
+
+await page.set_extra_http_headers({
+ "User-Agent": random.choice(USER_AGENTS)
+})
+```
+
+### Dropdowns, Scrollbars, and Drags Are Unreliable
+
+Severity: HIGH
+
+Situation: Agent interacting with complex UI elements
+
+Symptoms:
+Agent fails to select dropdown options. Scroll doesn't work as expected.
+Drag and drop completely fails. Hover menus disappear before clicking.
+
+Why this breaks:
+"Computer Use currently struggles with certain interface interactions,
+particularly scrolling, dragging, and zooming operations. Some UI elements
+(like dropdowns and scrollbars) might be tricky for Claude to manipulate."
+- Anthropic documentation
+
+Why these are hard:
+1. Dropdowns: Options appear after click, need second click to select
+2. Scrollbars: Small targets, need precise positioning
+3. Drag: Requires coordinated mouse down, move, mouse up
+4. Hover menus: Disappear when mouse moves away
+5. Canvas elements: No semantic information visible
+
+Vision models see pixels, not DOM structure. They don't "know" that
+a dropdown is a dropdown - they have to infer from visual cues.
+
+Recommended fix:
+
+## Use keyboard alternatives when possible
+
+```python
+# Instead of clicking dropdown, use keyboard
+async def select_dropdown_option(page, dropdown_selector, option_text):
+ # Focus the dropdown
+ await page.click(dropdown_selector)
+ await asyncio.sleep(0.3)
+
+ # Use keyboard to find option
+ await page.keyboard.type(option_text[:3]) # Type first letters
+ await asyncio.sleep(0.2)
+ await page.keyboard.press("Enter")
+```
+
+## Break complex actions into steps
+
+```python
+# Instead of drag-and-drop
+async def reliable_drag(page, source, target):
+ # Step 1: Click and hold
+ await page.mouse.move(source["x"], source["y"])
+ await page.mouse.down()
+ await asyncio.sleep(0.2)
+
+ # Step 2: Move in steps
+ steps = 10
+ for i in range(steps):
+ x = source["x"] + (target["x"] - source["x"]) * i / steps
+ y = source["y"] + (target["y"] - source["y"]) * i / steps
+ await page.mouse.move(x, y)
+ await asyncio.sleep(0.05)
+
+ # Step 3: Release
+ await page.mouse.move(target["x"], target["y"])
+ await asyncio.sleep(0.1)
+ await page.mouse.up()
+```
+
+## Fall back to DOM access for web
+
+```python
+# If vision fails, try direct DOM manipulation
+async def robust_select(page, select_selector, value):
+ try:
+ # Try vision approach first
+ await vision_agent.select(select_selector, value)
+ except Exception:
+ # Fall back to direct DOM
+ await page.select_option(select_selector, value=value)
+```
+
+## Add verification after action
+
+```python
+async def verified_scroll(page, direction):
+ # Get current scroll position
+ before = await page.evaluate("window.scrollY")
+
+ # Attempt scroll
+ await page.mouse.wheel(0, 500 if direction == "down" else -500)
+ await asyncio.sleep(0.3)
+
+ # Verify it worked
+ after = await page.evaluate("window.scrollY")
+ if before == after:
+ # Try alternative method
+ await page.keyboard.press("PageDown" if direction == "down" else "PageUp")
+```
+
+### Agents Are 2-5x Slower Than Humans
+
+Severity: MEDIUM
+
+Situation: Automating any computer task
+
+Symptoms:
+Task that takes human 1 minute takes agent 3-5 minutes.
+Users complain about speed. Timeouts occur.
+
+Why this breaks:
+"The technology can be slow compared to human operators, often requiring
+multiple screenshots and analysis cycles."
+
+Why so slow:
+1. Screenshot capture: 100-500ms
+2. Vision model inference: 1-5 seconds per screenshot
+3. Action execution: 200-500ms
+4. Wait for UI update: 500-1000ms
+5. Total per action: 2-7 seconds
+
+A task requiring 20 actions takes 40-140 seconds minimum.
+Humans do the same actions in 20-30 seconds.
+
+Recommended fix:
+
+## Accept the tradeoff
+
+Computer use is for:
+- Tasks humans don't want to do (repetitive)
+- Tasks that can run in background
+- Tasks where accuracy > speed
+
+## Optimize where possible
+
+```python
+# 1. Reduce screenshot resolution
+SCREEN_SIZE = (1280, 800) # Not 4K
+
+# 2. Batch similar actions
+# Instead of: type "hello", wait, type " world"
+await page.type("hello world")
+
+# 3. Parallelize independent tasks
+# Run multiple sandboxed agents concurrently
+
+# 4. Cache repeated computations
+# If same screenshot, reuse analysis
+
+# 5. Use smaller models for simple decisions
+simple_model = "claude-haiku-..." # For "is task done?"
+complex_model = "claude-sonnet-..." # For complex reasoning
+```
+
+## Set realistic expectations
+
+```python
+# Estimate task duration
+def estimate_duration(task_complexity: str) -> int:
+ """Estimate task duration in seconds."""
+ estimates = {
+ "simple": 30, # Single page, few actions
+ "medium": 120, # Multi-page, moderate actions
+ "complex": 300, # Many pages, complex interactions
+ }
+ return estimates.get(task_complexity, 120)
+
+# Inform users
+estimated = estimate_duration("medium")
+print(f"Estimated completion: {estimated // 60}m {estimated % 60}s")
+```
+
+### Screenshots Fill Up Context Window Fast
+
+Severity: HIGH
+
+Situation: Long-running computer use tasks
+
+Symptoms:
+Agent forgets earlier steps. Starts repeating actions.
+Errors increase as task progresses. Costs explode.
+
+Why this breaks:
+Each screenshot is ~1500-3000 tokens. A task with 30 screenshots
+uses 45,000-90,000 tokens just for images - before any text.
+
+Claude's context window is finite. When full:
+- Older context gets dropped
+- Agent loses memory of earlier steps
+- Task coherence decreases
+
+"Getting agents to make consistent progress across multiple context
+windows remains an open problem. The core challenge is that they must
+work in discrete sessions, and each new session begins with no memory
+of what came before." - Anthropic engineering blog
+
+Recommended fix:
+
+## Implement context management
+
+```python
+class ContextManager:
+ """Manage context window usage for computer use."""
+
+ MAX_SCREENSHOTS = 10 # Keep only recent screenshots
+ MAX_TOKENS = 100000
+
+ def __init__(self):
+ self.messages = []
+ self.screenshot_count = 0
+
+ def add_screenshot(self, screenshot_b64: str, description: str):
+ """Add screenshot with automatic pruning."""
+ self.screenshot_count += 1
+
+ # Keep only recent screenshots
+ if self.screenshot_count > self.MAX_SCREENSHOTS:
+ self._prune_old_screenshots()
+
+ # Store with description for context
+ self.messages.append({
+ "role": "user",
+ "content": [
+ {"type": "text", "text": description},
+ {"type": "image", "source": {...}}
+ ]
+ })
+
+ def _prune_old_screenshots(self):
+ """Remove old screenshots, keep text summaries."""
+ new_messages = []
+ screenshots_kept = 0
+
+ for msg in reversed(self.messages):
+ if self._has_image(msg):
+ if screenshots_kept < self.MAX_SCREENSHOTS:
+ new_messages.insert(0, msg)
+ screenshots_kept += 1
+ else:
+ # Convert to text summary
+ summary = self._summarize_screenshot(msg)
+ new_messages.insert(0, {
+ "role": msg["role"],
+ "content": summary
+ })
+ else:
+ new_messages.insert(0, msg)
+
+ self.messages = new_messages
+
+ def _summarize_screenshot(self, msg) -> str:
+ """Summarize screenshot to text."""
+ # Extract any text description
+ for content in msg.get("content", []):
+ if content.get("type") == "text":
+ return f"[Previous screenshot: {content['text']}]"
+ return "[Previous screenshot - details pruned]"
+
+ def add_checkpoint(self):
+ """Create a checkpoint summary."""
+ summary = self._create_progress_summary()
+ self.messages.append({
+ "role": "user",
+ "content": f"CHECKPOINT: {summary}"
+ })
+```
+
+## Use checkpointing for long tasks
+
+```python
+async def run_with_checkpoints(task: str, checkpoint_every: int = 10):
+ """Run task with periodic checkpoints."""
+ context = ContextManager()
+ step = 0
+
+ while not task_complete:
+ step += 1
+
+ # Take action...
+
+ if step % checkpoint_every == 0:
+ # Create checkpoint
+ context.add_checkpoint()
+
+ # Optional: persist to disk
+ save_checkpoint(context, step)
+```
+
+## Break into subtasks
+
+```python
+# Instead of one 50-step task:
+subtasks = [
+ "Navigate to the website and login",
+ "Find the settings page",
+ "Update the email address to ...",
+ "Save and verify the change"
+]
+
+for subtask in subtasks:
+ result = await agent.run(subtask)
+ if not result["success"]:
+ handle_error(subtask, result)
+ break
+```
+
+### Costs Can Explode Quickly
+
+Severity: HIGH
+
+Situation: Running computer use at scale
+
+Symptoms:
+API bill is 10x higher than expected. Single task costs $5+ instead of $0.50.
+Monthly costs reach thousands of dollars quickly.
+
+Why this breaks:
+Vision tokens are expensive. Each screenshot:
+- ~2000-3000 tokens per image
+- At $10/million tokens, that's $0.02-0.03 per screenshot
+- Task with 30 screenshots = $0.60-0.90 just for images
+
+But it compounds:
+- Screenshots accumulate in context
+- Model sees ALL previous screenshots each turn
+- Turn 10 processes 10 screenshots = $0.20-0.30
+- Turn 20 processes 20 screenshots = $0.40-0.60
+- Quadratic growth!
+
+Complex task: 50 turns × average 25 images in context = 1250 image tokens
+Plus text = could easily hit $5-10 per task.
+
+Recommended fix:
+
+## Monitor and limit costs
+
+```python
+class CostTracker:
+ """Track and limit computer use costs."""
+
+ # Anthropic pricing (approximate)
+ INPUT_COST_PER_1K = 0.003 # Text
+ OUTPUT_COST_PER_1K = 0.015
+ IMAGE_COST_PER_1K = 0.01 # Roughly
+
+ def __init__(self, max_cost_per_task: float = 1.0):
+ self.max_cost = max_cost_per_task
+ self.current_cost = 0.0
+ self.total_tokens = 0
+
+ def add_turn(
+ self,
+ input_tokens: int,
+ output_tokens: int,
+ image_tokens: int
+ ):
+ """Track cost of a single turn."""
+ cost = (
+ input_tokens / 1000 * self.INPUT_COST_PER_1K +
+ output_tokens / 1000 * self.OUTPUT_COST_PER_1K +
+ image_tokens / 1000 * self.IMAGE_COST_PER_1K
+ )
+ self.current_cost += cost
+ self.total_tokens += input_tokens + output_tokens + image_tokens
+
+ if self.current_cost > self.max_cost:
+ raise CostLimitExceeded(
+ f"Cost limit exceeded: ${self.current_cost:.2f} > ${self.max_cost:.2f}"
+ )
+
+ return cost
+
+class CostLimitExceeded(Exception):
+ pass
+
+# Usage
+tracker = CostTracker(max_cost_per_task=2.0)
+
+try:
+ for turn in turns:
+ tracker.add_turn(turn.input, turn.output, turn.images)
+except CostLimitExceeded:
+ print("Task aborted due to cost limit")
+```
+
+## Reduce image costs
+
+```python
+# 1. Lower resolution
+SCREEN_SIZE = (1024, 768) # Smaller = fewer tokens
+
+# 2. JPEG instead of PNG (when quality ok)
+screenshot.save(buffer, format="JPEG", quality=70)
+
+# 3. Crop to relevant region
+def crop_relevant(screenshot: Image, focus_area: tuple):
+ """Crop to area of interest."""
+ return screenshot.crop(focus_area)
+
+# 4. Don't include screenshot every turn
+if not needs_visual_update:
+ # Text-only turn
+ messages.append({"role": "user", "content": "Continue..."})
+```
+
+## Use cheaper models strategically
+
+```python
+async def tiered_model_selection(task_complexity: str):
+ """Use appropriate model for task."""
+ if task_complexity == "simple":
+ return "claude-haiku-..." # Cheapest
+ elif task_complexity == "medium":
+ return "claude-sonnet-4-20250514" # Balanced
+ else:
+ return "claude-opus-4-5-..." # Best but expensive
+```
+
+### Running Agent on Your Actual Computer
+
+Severity: CRITICAL
+
+Situation: Testing or deploying computer use
+
+Symptoms:
+Agent deletes important files. Sends emails from your account.
+Posts on social media. Accesses sensitive documents.
+
+Why this breaks:
+Computer use agents make mistakes. They can:
+- Misinterpret instructions
+- Click wrong buttons
+- Type in wrong fields
+- Follow prompt injection attacks
+
+Without sandboxing, these mistakes happen on your real system.
+There's no undo for "agent sent email to all contacts" or
+"agent deleted project folder."
+
+"Autonomous agents that can access external systems and APIs
+introduce new security risks. They may be vulnerable to prompt
+injection attacks, unauthorized access to sensitive data, or
+manipulation by malicious actors."
+
+Recommended fix:
+
+## ALWAYS use sandboxing
+
+```python
+# Minimum viable sandbox: Docker with restrictions
+
+docker run -it --rm \
+ --security-opt no-new-privileges \
+ --cap-drop ALL \
+ --network none \
+ --read-only \
+ --tmpfs /tmp \
+ --memory 2g \
+ --cpus 1 \
+ computer-use-sandbox
+```
+
+## Layer your defenses
+
+```python
+# Defense 1: Docker isolation
+# Defense 2: Non-root user
+# Defense 3: Network restrictions
+# Defense 4: Filesystem restrictions
+# Defense 5: Resource limits
+# Defense 6: Action confirmation
+# Defense 7: Action logging
+
+@dataclass
+class SandboxConfig:
+ docker_image: str = "computer-use-sandbox:latest"
+ network: str = "none" # or specific allowlist
+ readonly_root: bool = True
+ max_memory_mb: int = 2048
+ max_cpu: float = 1.0
+ max_runtime_seconds: int = 300
+ require_confirmation: list = field(default_factory=lambda: [
+ "download", "submit", "login", "delete"
+ ])
+ log_all_actions: bool = True
+```
+
+## Test in isolated environment first
+
+```python
+class SandboxedTestRunner:
+ """Run tests in throwaway containers."""
+
+ async def run_test(self, test_task: str) -> dict:
+ # Spin up fresh container
+ container_id = await self.create_container()
+
+ try:
+ # Run task
+ result = await self.execute_in_container(container_id, test_task)
+
+ # Capture state for verification
+ state = await self.capture_container_state(container_id)
+
+ return {
+ "result": result,
+ "final_state": state,
+ "logs": await self.get_logs(container_id)
+ }
+ finally:
+ # Always destroy container
+ await self.destroy_container(container_id)
+```
+
+## Validation Checks
+
+### Computer Use Without Sandbox
+
+Severity: ERROR
+
+Computer use agents MUST run in sandboxed environments
+
+Message: Computer use without sandboxing detected. Use Docker containers with restrictions.
+
+### Sandbox With Full Network Access
+
+Severity: ERROR
+
+Sandboxed agents should have restricted network access
+
+Message: Sandbox has full network access. Use --network=none or specific allowlist.
+
+### Running as Root in Container
+
+Severity: ERROR
+
+Container agents should run as non-root user
+
+Message: Container running as root. Add --user flag or USER directive in Dockerfile.
+
+### Container Without Capability Drops
+
+Severity: WARNING
+
+Containers should drop unnecessary capabilities
+
+Message: Container has full capabilities. Add --cap-drop ALL.
+
+### Container Without Seccomp Profile
+
+Severity: WARNING
+
+Containers should use seccomp profiles for syscall filtering
+
+Message: No security options set. Consider --security-opt seccomp:profile.json
+
+### No Maximum Step Limit
+
+Severity: WARNING
+
+Computer use loops should have maximum step limits
+
+Message: Infinite loop risk. Add max_steps limit (recommended: 50).
+
+### No Execution Timeout
+
+Severity: WARNING
+
+Computer use should have timeout limits
+
+Message: No timeout on execution. Add timeout (recommended: 5-10 minutes).
+
+### Container Without Memory Limit
+
+Severity: WARNING
+
+Containers should have memory limits to prevent DoS
+
+Message: No memory limit on container. Add --memory 2g or similar.
+
+### No Cost Tracking
+
+Severity: WARNING
+
+Computer use should track API costs
+
+Message: No cost tracking. Monitor token usage to prevent bill surprises.
+
+### No Maximum Cost Limit
+
+Severity: INFO
+
+Consider adding cost limits per task
+
+Message: Consider adding max_cost_per_task to prevent expensive runaway tasks.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs web-only automation -> browser-automation (Playwright/Selenium more efficient for web)
+- user needs security review -> security-specialist (Review sandboxing, prompt injection defenses)
+- user needs container orchestration -> devops (Kubernetes, Docker Swarm for scaling)
+- user needs vision model optimization -> llm-architect (Model selection, prompt engineering)
+- user needs multi-agent coordination -> multi-agent-orchestration (Multiple computer use agents working together)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: computer use
+- User mentions or implies: desktop automation agent
+- User mentions or implies: screen control AI
+- User mentions or implies: vision-based agent
+- User mentions or implies: GUI automation
+- User mentions or implies: Claude computer
+- User mentions or implies: OpenAI Operator
+- User mentions or implies: browser agent
+- User mentions or implies: visual agent
+- User mentions or implies: RPA with AI
diff --git a/plugins/antigravity-awesome-skills/skills/context-window-management/SKILL.md b/plugins/antigravity-awesome-skills/skills/context-window-management/SKILL.md
index fa4717dd..e42fe233 100644
--- a/plugins/antigravity-awesome-skills/skills/context-window-management/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/context-window-management/SKILL.md
@@ -1,23 +1,15 @@
---
name: context-window-management
-description: "You're a context engineering specialist who has optimized LLM applications handling millions of conversations. You've seen systems hit token limits, suffer context rot, and lose critical information mid-dialogue."
+description: Strategies for managing LLM context windows including
+ summarization, trimming, routing, and avoiding context rot
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Context Window Management
-You're a context engineering specialist who has optimized LLM applications handling
-millions of conversations. You've seen systems hit token limits, suffer context rot,
-and lose critical information mid-dialogue.
-
-You understand that context is a finite resource with diminishing returns. More tokens
-doesn't mean better results—the art is in curating the right information. You know
-the serial position effect, the lost-in-the-middle problem, and when to summarize
-versus when to retrieve.
-
-Your cor
+Strategies for managing LLM context windows including summarization, trimming, routing, and avoiding context rot
## Capabilities
@@ -28,31 +20,292 @@ Your cor
- token-counting
- context-prioritization
+## Prerequisites
+
+- Knowledge: LLM fundamentals, Tokenization basics, Prompt engineering
+- Skills_recommended: prompt-engineering
+
+## Scope
+
+- Does_not_cover: RAG implementation details, Model fine-tuning, Embedding models
+- Boundaries: Focus is context optimization, Covers strategies not specific implementations
+
+## Ecosystem
+
+### Primary_tools
+
+- tiktoken - OpenAI's tokenizer for counting tokens
+- LangChain - Framework with context management utilities
+- Claude API - 200K+ context with caching support
+
## Patterns
### Tiered Context Strategy
Different strategies based on context size
+**When to use**: Building any multi-turn conversation system
+
+interface ContextTier {
+ maxTokens: number;
+ strategy: 'full' | 'summarize' | 'rag';
+ model: string;
+}
+
+const TIERS: ContextTier[] = [
+ { maxTokens: 8000, strategy: 'full', model: 'claude-3-haiku' },
+ { maxTokens: 32000, strategy: 'full', model: 'claude-3-5-sonnet' },
+ { maxTokens: 100000, strategy: 'summarize', model: 'claude-3-5-sonnet' },
+ { maxTokens: Infinity, strategy: 'rag', model: 'claude-3-5-sonnet' }
+];
+
+async function selectStrategy(messages: Message[]): ContextTier {
+ const tokens = await countTokens(messages);
+
+ for (const tier of TIERS) {
+ if (tokens <= tier.maxTokens) {
+ return tier;
+ }
+ }
+ return TIERS[TIERS.length - 1];
+}
+
+async function prepareContext(messages: Message[]): PreparedContext {
+ const tier = await selectStrategy(messages);
+
+ switch (tier.strategy) {
+ case 'full':
+ return { messages, model: tier.model };
+
+ case 'summarize':
+ const summary = await summarizeOldMessages(messages);
+ return { messages: [summary, ...recentMessages(messages)], model: tier.model };
+
+ case 'rag':
+ const relevant = await retrieveRelevant(messages);
+ return { messages: [...relevant, ...recentMessages(messages)], model: tier.model };
+ }
+}
+
### Serial Position Optimization
Place important content at start and end
+**When to use**: Constructing prompts with significant context
+
+// LLMs weight beginning and end more heavily
+// Structure prompts to leverage this
+
+function buildOptimalPrompt(components: {
+ systemPrompt: string;
+ criticalContext: string;
+ conversationHistory: Message[];
+ currentQuery: string;
+}): string {
+ // START: System instructions (always first)
+ const parts = [components.systemPrompt];
+
+ // CRITICAL CONTEXT: Right after system (high primacy)
+ if (components.criticalContext) {
+ parts.push(`## Key Context\n${components.criticalContext}`);
+ }
+
+ // MIDDLE: Conversation history (lower weight)
+ // Summarize if long, keep recent messages full
+ const history = components.conversationHistory;
+ if (history.length > 10) {
+ const oldSummary = summarize(history.slice(0, -5));
+ const recent = history.slice(-5);
+ parts.push(`## Earlier Conversation (Summary)\n${oldSummary}`);
+ parts.push(`## Recent Messages\n${formatMessages(recent)}`);
+ } else {
+ parts.push(`## Conversation\n${formatMessages(history)}`);
+ }
+
+ // END: Current query (high recency)
+ // Restate critical requirements here
+ parts.push(`## Current Request\n${components.currentQuery}`);
+
+ // FINAL: Reminder of key constraints
+ parts.push(`Remember: ${extractKeyConstraints(components.systemPrompt)}`);
+
+ return parts.join('\n\n');
+}
+
### Intelligent Summarization
Summarize by importance, not just recency
-## Anti-Patterns
+**When to use**: Context exceeds optimal size
-### ❌ Naive Truncation
+interface MessageWithMetadata extends Message {
+ importance: number; // 0-1 score
+ hasCriticalInfo: boolean; // User preferences, decisions
+ referenced: boolean; // Was this referenced later?
+}
-### ❌ Ignoring Token Costs
+async function smartSummarize(
+ messages: MessageWithMetadata[],
+ targetTokens: number
+): Message[] {
+ // Sort by importance, preserve order for tied scores
+ const sorted = [...messages].sort((a, b) =>
+ (b.importance + (b.hasCriticalInfo ? 0.5 : 0) + (b.referenced ? 0.3 : 0)) -
+ (a.importance + (a.hasCriticalInfo ? 0.5 : 0) + (a.referenced ? 0.3 : 0))
+ );
-### ❌ One-Size-Fits-All
+ const keep: Message[] = [];
+ const summarizePool: Message[] = [];
+ let currentTokens = 0;
+
+ for (const msg of sorted) {
+ const msgTokens = await countTokens([msg]);
+ if (currentTokens + msgTokens < targetTokens * 0.7) {
+ keep.push(msg);
+ currentTokens += msgTokens;
+ } else {
+ summarizePool.push(msg);
+ }
+ }
+
+ // Summarize the low-importance messages
+ if (summarizePool.length > 0) {
+ const summary = await llm.complete(`
+ Summarize these messages, preserving:
+ - Any user preferences or decisions
+ - Key facts that might be referenced later
+ - The overall flow of conversation
+
+ Messages:
+ ${formatMessages(summarizePool)}
+ `);
+
+ keep.unshift({ role: 'system', content: `[Earlier context: ${summary}]` });
+ }
+
+ // Restore original order
+ return keep.sort((a, b) => a.timestamp - b.timestamp);
+}
+
+### Token Budget Allocation
+
+Allocate token budget across context components
+
+**When to use**: Need predictable context management
+
+interface TokenBudget {
+ system: number; // System prompt
+ criticalContext: number; // User prefs, key info
+ history: number; // Conversation history
+ query: number; // Current query
+ response: number; // Reserved for response
+}
+
+function allocateBudget(totalTokens: number): TokenBudget {
+ return {
+ system: Math.floor(totalTokens * 0.10), // 10%
+ criticalContext: Math.floor(totalTokens * 0.15), // 15%
+ history: Math.floor(totalTokens * 0.40), // 40%
+ query: Math.floor(totalTokens * 0.10), // 10%
+ response: Math.floor(totalTokens * 0.25), // 25%
+ };
+}
+
+async function buildWithBudget(
+ components: ContextComponents,
+ modelMaxTokens: number
+): PreparedContext {
+ const budget = allocateBudget(modelMaxTokens);
+
+ // Truncate/summarize each component to fit budget
+ const prepared = {
+ system: truncateToTokens(components.system, budget.system),
+ criticalContext: truncateToTokens(
+ components.criticalContext, budget.criticalContext
+ ),
+ history: await summarizeToTokens(components.history, budget.history),
+ query: truncateToTokens(components.query, budget.query),
+ };
+
+ // Reallocate unused budget
+ const used = await countTokens(Object.values(prepared).join('\n'));
+ const remaining = modelMaxTokens - used - budget.response;
+
+ if (remaining > 0) {
+ // Give extra to history (most valuable for conversation)
+ prepared.history = await summarizeToTokens(
+ components.history,
+ budget.history + remaining
+ );
+ }
+
+ return prepared;
+}
+
+## Validation Checks
+
+### No Token Counting
+
+Severity: WARNING
+
+Message: Building context without token counting. May exceed model limits.
+
+Fix action: Count tokens before sending, implement budget allocation
+
+### Naive Message Truncation
+
+Severity: WARNING
+
+Message: Truncating messages without summarization. Critical context may be lost.
+
+Fix action: Summarize old messages instead of simply removing them
+
+### Hardcoded Token Limit
+
+Severity: INFO
+
+Message: Hardcoded token limit. Consider making configurable per model.
+
+Fix action: Use model-specific limits from configuration
+
+### No Context Management Strategy
+
+Severity: WARNING
+
+Message: LLM calls without context management strategy.
+
+Fix action: Implement context management: budgets, summarization, or RAG
+
+## Collaboration
+
+### Delegation Triggers
+
+- retrieval|rag|search -> rag-implementation (Need retrieval system)
+- memory|persistence|remember -> conversation-memory (Need memory storage)
+- cache|caching -> prompt-caching (Need caching optimization)
+
+### Complete Context System
+
+Skills: context-window-management, rag-implementation, conversation-memory, prompt-caching
+
+Workflow:
+
+```
+1. Design context strategy
+2. Implement RAG for large corpuses
+3. Set up memory persistence
+4. Add caching for performance
+```
## Related Skills
Works well with: `rag-implementation`, `conversation-memory`, `prompt-caching`, `llm-npc-dialogue`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: context window
+- User mentions or implies: token limit
+- User mentions or implies: context management
+- User mentions or implies: context engineering
+- User mentions or implies: long context
+- User mentions or implies: context overflow
diff --git a/plugins/antigravity-awesome-skills/skills/conversation-memory/SKILL.md b/plugins/antigravity-awesome-skills/skills/conversation-memory/SKILL.md
index 3a57f20b..e081bdf7 100644
--- a/plugins/antigravity-awesome-skills/skills/conversation-memory/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/conversation-memory/SKILL.md
@@ -1,23 +1,15 @@
---
name: conversation-memory
-description: "Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory Use when: conversation memory, remember, memory persistence, long-term memory, chat history."
+description: Persistent memory systems for LLM conversations including
+ short-term, long-term, and entity-based memory
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Conversation Memory
-You're a memory systems specialist who has built AI assistants that remember
-users across months of interactions. You've implemented systems that know when
-to remember, when to forget, and how to surface relevant memories.
-
-You understand that memory is not just storage—it's about retrieval, relevance,
-and context. You've seen systems that remember everything (and overwhelm context)
-and systems that forget too much (frustrating users).
-
-Your core principles:
-1. Memory types differ—short-term, lo
+Persistent memory systems for LLM conversations including short-term, long-term, and entity-based memory
## Capabilities
@@ -28,39 +20,476 @@ Your core principles:
- memory-retrieval
- memory-consolidation
+## Prerequisites
+
+- Knowledge: LLM conversation patterns, Database basics, Key-value stores
+- Skills_recommended: context-window-management, rag-implementation
+
+## Scope
+
+- Does_not_cover: Knowledge graph construction, Semantic search implementation, Database administration
+- Boundaries: Focus is memory patterns for LLMs, Covers storage and retrieval strategies
+
+## Ecosystem
+
+### Primary_tools
+
+- Mem0 - Memory layer for AI applications
+- LangChain Memory - Memory utilities in LangChain
+- Redis - In-memory data store for session memory
+
## Patterns
### Tiered Memory System
Different memory tiers for different purposes
+**When to use**: Building any conversational AI
+
+interface MemorySystem {
+ // Buffer: Current conversation (in context)
+ buffer: ConversationBuffer;
+
+ // Short-term: Recent interactions (session)
+ shortTerm: ShortTermMemory;
+
+ // Long-term: Persistent across sessions
+ longTerm: LongTermMemory;
+
+ // Entity: Facts about people, places, things
+ entity: EntityMemory;
+}
+
+class TieredMemory implements MemorySystem {
+ async addMessage(message: Message): Promise {
+ // Always add to buffer
+ this.buffer.add(message);
+
+ // Extract entities
+ const entities = await extractEntities(message);
+ for (const entity of entities) {
+ await this.entity.upsert(entity);
+ }
+
+ // Check for memorable content
+ if (await isMemoryWorthy(message)) {
+ await this.shortTerm.add({
+ content: message.content,
+ timestamp: Date.now(),
+ importance: await scoreImportance(message)
+ });
+ }
+ }
+
+ async consolidate(): Promise {
+ // Move important short-term to long-term
+ const memories = await this.shortTerm.getOld(24 * 60 * 60 * 1000);
+ for (const memory of memories) {
+ if (memory.importance > 0.7 || memory.referenced > 2) {
+ await this.longTerm.add(memory);
+ }
+ await this.shortTerm.remove(memory.id);
+ }
+ }
+
+ async buildContext(query: string): Promise {
+ const parts: string[] = [];
+
+ // Relevant long-term memories
+ const longTermRelevant = await this.longTerm.search(query, 3);
+ if (longTermRelevant.length) {
+ parts.push('## Relevant Memories\n' +
+ longTermRelevant.map(m => `- ${m.content}`).join('\n'));
+ }
+
+ // Relevant entities
+ const entities = await this.entity.getRelevant(query);
+ if (entities.length) {
+ parts.push('## Known Entities\n' +
+ entities.map(e => `- ${e.name}: ${e.facts.join(', ')}`).join('\n'));
+ }
+
+ // Recent conversation
+ const recent = this.buffer.getRecent(10);
+ parts.push('## Recent Conversation\n' + formatMessages(recent));
+
+ return parts.join('\n\n');
+ }
+}
+
### Entity Memory
Store and update facts about entities
+**When to use**: Need to remember details about people, places, things
+
+interface Entity {
+ id: string;
+ name: string;
+ type: 'person' | 'place' | 'thing' | 'concept';
+ facts: Fact[];
+ lastMentioned: number;
+ mentionCount: number;
+}
+
+interface Fact {
+ content: string;
+ confidence: number;
+ source: string; // Which message this came from
+ timestamp: number;
+}
+
+class EntityMemory {
+ async extractAndStore(message: Message): Promise {
+ // Use LLM to extract entities and facts
+ const extraction = await llm.complete(`
+ Extract entities and facts from this message.
+ Return JSON: { "entities": [
+ { "name": "...", "type": "...", "facts": ["..."] }
+ ]}
+
+ Message: "${message.content}"
+ `);
+
+ const { entities } = JSON.parse(extraction);
+ for (const entity of entities) {
+ await this.upsert(entity, message.id);
+ }
+ }
+
+ async upsert(entity: ExtractedEntity, sourceId: string): Promise {
+ const existing = await this.store.get(entity.name.toLowerCase());
+
+ if (existing) {
+ // Merge facts, avoiding duplicates
+ for (const fact of entity.facts) {
+ if (!this.hasSimilarFact(existing.facts, fact)) {
+ existing.facts.push({
+ content: fact,
+ confidence: 0.9,
+ source: sourceId,
+ timestamp: Date.now()
+ });
+ }
+ }
+ existing.lastMentioned = Date.now();
+ existing.mentionCount++;
+ await this.store.set(existing.id, existing);
+ } else {
+ // Create new entity
+ await this.store.set(entity.name.toLowerCase(), {
+ id: generateId(),
+ name: entity.name,
+ type: entity.type,
+ facts: entity.facts.map(f => ({
+ content: f,
+ confidence: 0.9,
+ source: sourceId,
+ timestamp: Date.now()
+ })),
+ lastMentioned: Date.now(),
+ mentionCount: 1
+ });
+ }
+ }
+}
+
### Memory-Aware Prompting
Include relevant memories in prompts
-## Anti-Patterns
+**When to use**: Making LLM calls with memory context
-### ❌ Remember Everything
+async function promptWithMemory(
+ query: string,
+ memory: MemorySystem,
+ systemPrompt: string
+): Promise {
+ // Retrieve relevant memories
+ const relevantMemories = await memory.longTerm.search(query, 5);
+ const entities = await memory.entity.getRelevant(query);
+ const recentContext = memory.buffer.getRecent(5);
-### ❌ No Memory Retrieval
+ // Build memory-augmented prompt
+ const prompt = `
+${systemPrompt}
-### ❌ Single Memory Store
+## User Context
+${entities.length ? `Known about user:\n${entities.map(e =>
+ `- ${e.name}: ${e.facts.map(f => f.content).join('; ')}`
+).join('\n')}` : ''}
-## ⚠️ Sharp Edges
+${relevantMemories.length ? `Relevant past interactions:\n${relevantMemories.map(m =>
+ `- [${formatDate(m.timestamp)}] ${m.content}`
+).join('\n')}` : ''}
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Memory store grows unbounded, system slows | high | // Implement memory lifecycle management |
-| Retrieved memories not relevant to current query | high | // Intelligent memory retrieval |
-| Memories from one user accessible to another | critical | // Strict user isolation in memory |
+## Recent Conversation
+${formatMessages(recentContext)}
+
+## Current Query
+${query}
+ `.trim();
+
+ const response = await llm.complete(prompt);
+
+ // Extract any new memories from response
+ await memory.addMessage({ role: 'assistant', content: response });
+
+ return response;
+}
+
+## Sharp Edges
+
+### Memory store grows unbounded, system slows
+
+Severity: HIGH
+
+Situation: System slows over time, costs increase
+
+Symptoms:
+- Slow memory retrieval
+- High storage costs
+- Increasing latency over time
+
+Why this breaks:
+Every message stored as memory.
+No cleanup or consolidation.
+Retrieval over millions of items.
+
+Recommended fix:
+
+// Implement memory lifecycle management
+
+class ManagedMemory {
+ // Limits
+ private readonly SHORT_TERM_MAX = 100;
+ private readonly LONG_TERM_MAX = 10000;
+ private readonly CONSOLIDATION_INTERVAL = 24 * 60 * 60 * 1000;
+
+ async add(memory: Memory): Promise {
+ // Score importance before storing
+ const score = await this.scoreImportance(memory);
+ if (score < 0.3) return; // Don't store low-importance
+
+ memory.importance = score;
+ await this.shortTerm.add(memory);
+
+ // Check limits
+ await this.enforceShortTermLimit();
+ }
+
+ async enforceShortTermLimit(): Promise {
+ const count = await this.shortTerm.count();
+ if (count > this.SHORT_TERM_MAX) {
+ // Consolidate: move important to long-term, delete rest
+ const memories = await this.shortTerm.getAll();
+ memories.sort((a, b) => b.importance - a.importance);
+
+ const toKeep = memories.slice(0, this.SHORT_TERM_MAX * 0.7);
+ const toConsolidate = memories.slice(this.SHORT_TERM_MAX * 0.7);
+
+ for (const m of toConsolidate) {
+ if (m.importance > 0.7) {
+ await this.longTerm.add(m);
+ }
+ await this.shortTerm.remove(m.id);
+ }
+ }
+ }
+
+ async scoreImportance(memory: Memory): Promise {
+ const factors = {
+ hasUserPreference: /prefer|like|don't like|hate|love/i.test(memory.content) ? 0.3 : 0,
+ hasDecision: /decided|chose|will do|won't do/i.test(memory.content) ? 0.3 : 0,
+ hasFactAboutUser: /my|I am|I have|I work/i.test(memory.content) ? 0.2 : 0,
+ length: memory.content.length > 100 ? 0.1 : 0,
+ userMessage: memory.role === 'user' ? 0.1 : 0,
+ };
+
+ return Object.values(factors).reduce((a, b) => a + b, 0);
+ }
+}
+
+### Retrieved memories not relevant to current query
+
+Severity: HIGH
+
+Situation: Memories included in context but don't help
+
+Symptoms:
+- Memories in context seem random
+- User asks about things already in memory
+- Confusion from irrelevant context
+
+Why this breaks:
+Simple keyword matching.
+No relevance scoring.
+Including all retrieved memories.
+
+Recommended fix:
+
+// Intelligent memory retrieval
+
+async function retrieveRelevant(
+ query: string,
+ memories: MemoryStore,
+ maxResults: number = 5
+): Promise {
+ // 1. Semantic search
+ const candidates = await memories.semanticSearch(query, maxResults * 3);
+
+ // 2. Score relevance with context
+ const scored = await Promise.all(candidates.map(async (m) => {
+ const relevanceScore = await llm.complete(`
+ Rate 0-1 how relevant this memory is to the query.
+ Query: "${query}"
+ Memory: "${m.content}"
+ Return just the number.
+ `);
+ return { ...m, relevance: parseFloat(relevanceScore) };
+ }));
+
+ // 3. Filter low relevance
+ const relevant = scored.filter(m => m.relevance > 0.5);
+
+ // 4. Sort and limit
+ return relevant
+ .sort((a, b) => b.relevance - a.relevance)
+ .slice(0, maxResults);
+}
+
+### Memories from one user accessible to another
+
+Severity: CRITICAL
+
+Situation: User sees information from another user's sessions
+
+Symptoms:
+- User sees other user's information
+- Privacy complaints
+- Compliance violations
+
+Why this breaks:
+No user isolation in memory store.
+Shared memory namespace.
+Cross-user retrieval.
+
+Recommended fix:
+
+// Strict user isolation in memory
+
+class IsolatedMemory {
+ private getKey(userId: string, memoryId: string): string {
+ // Namespace all keys by user
+ return `user:${userId}:memory:${memoryId}`;
+ }
+
+ async add(userId: string, memory: Memory): Promise {
+ // Validate userId is authenticated
+ if (!isValidUserId(userId)) {
+ throw new Error('Invalid user ID');
+ }
+
+ const key = this.getKey(userId, memory.id);
+ memory.userId = userId; // Tag with user
+ await this.store.set(key, memory);
+ }
+
+ async search(userId: string, query: string): Promise {
+ // CRITICAL: Filter by user in query
+ return await this.store.search({
+ query,
+ filter: { userId: userId }, // Mandatory filter
+ limit: 10
+ });
+ }
+
+ async delete(userId: string, memoryId: string): Promise {
+ const memory = await this.get(userId, memoryId);
+ // Verify ownership before delete
+ if (memory.userId !== userId) {
+ throw new Error('Access denied');
+ }
+ await this.store.delete(this.getKey(userId, memoryId));
+ }
+
+ // User data export (GDPR compliance)
+ async exportUserData(userId: string): Promise {
+ return await this.store.getAll({ userId });
+ }
+
+ // User data deletion (GDPR compliance)
+ async deleteUserData(userId: string): Promise {
+ const memories = await this.exportUserData(userId);
+ for (const m of memories) {
+ await this.store.delete(this.getKey(userId, m.id));
+ }
+ }
+}
+
+## Validation Checks
+
+### No User Isolation in Memory
+
+Severity: CRITICAL
+
+Message: Memory operations without user isolation. Privacy vulnerability.
+
+Fix action: Add userId to all memory operations, filter by user on retrieval
+
+### No Importance Filtering
+
+Severity: WARNING
+
+Message: Storing memories without importance filtering. May cause memory explosion.
+
+Fix action: Score importance before storing, filter low-importance content
+
+### Memory Storage Without Retrieval
+
+Severity: WARNING
+
+Message: Storing memories but no retrieval logic. Memories won't be used.
+
+Fix action: Implement memory retrieval and include in prompts
+
+### No Memory Cleanup
+
+Severity: INFO
+
+Message: No memory cleanup mechanism. Storage will grow unbounded.
+
+Fix action: Implement consolidation and cleanup based on age/importance
+
+## Collaboration
+
+### Delegation Triggers
+
+- context window|token -> context-window-management (Need context optimization)
+- rag|retrieval|vector -> rag-implementation (Need retrieval system)
+- cache|caching -> prompt-caching (Need caching strategies)
+
+### Complete Memory System
+
+Skills: conversation-memory, context-window-management, rag-implementation
+
+Workflow:
+
+```
+1. Design memory tiers
+2. Implement storage and retrieval
+3. Integrate with context management
+4. Add consolidation and cleanup
+```
## Related Skills
Works well with: `context-window-management`, `rag-implementation`, `prompt-caching`, `llm-npc-dialogue`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: conversation memory
+- User mentions or implies: remember
+- User mentions or implies: memory persistence
+- User mentions or implies: long-term memory
+- User mentions or implies: chat history
diff --git a/plugins/antigravity-awesome-skills/skills/crewai/SKILL.md b/plugins/antigravity-awesome-skills/skills/crewai/SKILL.md
index 0fa51972..9e3acada 100644
--- a/plugins/antigravity-awesome-skills/skills/crewai/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/crewai/SKILL.md
@@ -1,13 +1,19 @@
---
name: crewai
-description: "You are an expert in designing collaborative AI agent teams with CrewAI. You think in terms of roles, responsibilities, and delegation. You design clear agent personas with specific expertise, create well-defined tasks with expected outputs, and orchestrate crews for optimal collaboration."
+description: Expert in CrewAI - the leading role-based multi-agent framework
+ used by 60% of Fortune 500 companies.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# CrewAI
+Expert in CrewAI - the leading role-based multi-agent framework used by 60% of Fortune 500
+companies. Covers agent design with roles and goals, task definition, crew orchestration,
+process types (sequential, hierarchical, parallel), memory systems, and flows for complex
+workflows. Essential for building collaborative AI agent teams.
+
**Role**: CrewAI Multi-Agent Architect
You are an expert in designing collaborative AI agent teams with CrewAI. You think
@@ -16,6 +22,15 @@ with specific expertise, create well-defined tasks with expected outputs, and
orchestrate crews for optimal collaboration. You know when to use sequential vs
hierarchical processes.
+### Expertise
+
+- Agent persona design
+- Task decomposition
+- Crew orchestration
+- Process selection
+- Memory configuration
+- Flow design
+
## Capabilities
- Agent definitions (role, goal, backstory)
@@ -26,11 +41,39 @@ hierarchical processes.
- Tool integration
- Flows for complex workflows
-## Requirements
+## Prerequisites
-- Python 3.10+
-- crewai package
-- LLM API access
+- 0: Python proficiency
+- 1: Multi-agent concepts
+- 2: Understanding of delegation
+- Required skills: Python 3.10+, crewai package, LLM API access
+
+## Scope
+
+- 0: Python-only
+- 1: Best for structured workflows
+- 2: Can be verbose for simple cases
+- 3: Flows are newer feature
+
+## Ecosystem
+
+### Primary
+
+- CrewAI framework
+- CrewAI Tools
+
+### Common_integrations
+
+- OpenAI / Anthropic / Ollama
+- SerperDev (search)
+- FileReadTool, DirectoryReadTool
+- Custom tools
+
+### Platforms
+
+- Python applications
+- FastAPI backends
+- Enterprise deployments
## Patterns
@@ -40,7 +83,6 @@ Define agents and tasks in YAML (recommended)
**When to use**: Any CrewAI project
-```python
# config/agents.yaml
researcher:
role: "Senior Research Analyst"
@@ -119,8 +161,20 @@ class ContentCrew:
@task
def writing_task(self) -> Task:
- return Task(config
-```
+ return Task(config=self.tasks_config['writing_task'])
+
+ @crew
+ def crew(self) -> Crew:
+ return Crew(
+ agents=self.agents,
+ tasks=self.tasks,
+ process=Process.sequential,
+ verbose=True
+ )
+
+# main.py
+crew = ContentCrew()
+result = crew.crew().kickoff(inputs={"topic": "AI Agents in 2025"})
### Hierarchical Process
@@ -128,7 +182,6 @@ Manager agent delegates to workers
**When to use**: Complex tasks needing coordination
-```python
from crewai import Crew, Process
# Define specialized agents
@@ -165,7 +218,6 @@ crew = Crew(
# - How to combine results
result = crew.kickoff()
-```
### Planning Feature
@@ -173,7 +225,6 @@ Generate execution plan before running
**When to use**: Complex workflows needing structure
-```python
from crewai import Crew, Process
# Enable planning
@@ -195,54 +246,209 @@ result = crew.kickoff()
# Access the plan
print(crew.plan)
+
+### Memory Configuration
+
+Enable agent memory for context
+
+**When to use**: Multi-turn or complex workflows
+
+from crewai import Crew
+
+# Memory types:
+# - Short-term: Within task execution
+# - Long-term: Across executions
+# - Entity: About specific entities
+
+crew = Crew(
+ agents=[...],
+ tasks=[...],
+ memory=True, # Enable all memory types
+ verbose=True
+)
+
+# Custom memory config
+from crewai.memory import LongTermMemory, ShortTermMemory
+
+crew = Crew(
+ agents=[...],
+ tasks=[...],
+ memory=True,
+ long_term_memory=LongTermMemory(
+ storage=CustomStorage() # Custom backend
+ ),
+ short_term_memory=ShortTermMemory(
+ storage=CustomStorage()
+ ),
+ embedder={
+ "provider": "openai",
+ "config": {"model": "text-embedding-3-small"}
+ }
+)
+
+# Memory helps agents:
+# - Remember previous interactions
+# - Build on past work
+# - Maintain consistency
+
+### Flows for Complex Workflows
+
+Event-driven orchestration with state
+
+**When to use**: Complex, multi-stage workflows
+
+from crewai.flow.flow import Flow, listen, start, and_, or_, router
+
+class ContentFlow(Flow):
+ # State persists across steps
+ model_config = {"extra": "allow"}
+
+ @start()
+ def gather_requirements(self):
+ """First step - gather inputs."""
+ self.topic = self.inputs.get("topic", "AI")
+ self.style = self.inputs.get("style", "professional")
+ return {"topic": self.topic}
+
+ @listen(gather_requirements)
+ def research(self, requirements):
+ """Research after requirements gathered."""
+ research_crew = ResearchCrew()
+ result = research_crew.crew().kickoff(
+ inputs={"topic": requirements["topic"]}
+ )
+ self.research = result.raw
+ return result
+
+ @listen(research)
+ def write_content(self, research_result):
+ """Write after research complete."""
+ writing_crew = WritingCrew()
+ result = writing_crew.crew().kickoff(
+ inputs={
+ "research": self.research,
+ "style": self.style
+ }
+ )
+ return result
+
+ @router(write_content)
+ def quality_check(self, content):
+ """Route based on quality."""
+ if self.needs_revision(content):
+ return "revise"
+ return "publish"
+
+ @listen("revise")
+ def revise_content(self):
+ """Revision flow."""
+ # Re-run writing with feedback
+ pass
+
+ @listen("publish")
+ def publish_content(self):
+ """Final publishing."""
+ return {"status": "published", "content": self.content}
+
+# Run flow
+flow = ContentFlow()
+result = flow.kickoff(inputs={"topic": "AI Agents"})
+
+### Custom Tools
+
+Create tools for agents
+
+**When to use**: Agents need external capabilities
+
+from crewai.tools import BaseTool
+from pydantic import BaseModel, Field
+
+# Method 1: Class-based tool
+class SearchInput(BaseModel):
+ query: str = Field(..., description="Search query")
+
+class WebSearchTool(BaseTool):
+ name: str = "web_search"
+ description: str = "Search the web for information"
+ args_schema: type[BaseModel] = SearchInput
+
+ def _run(self, query: str) -> str:
+ # Implementation
+ results = search_api.search(query)
+ return format_results(results)
+
+# Method 2: Function decorator
+from crewai import tool
+
+@tool("Database Query")
+def query_database(sql: str) -> str:
+ """Execute SQL query and return results."""
+ return db.execute(sql)
+
+# Assign tools to agents
+researcher = Agent(
+ role="Researcher",
+ goal="Find information",
+ backstory="...",
+ tools=[WebSearchTool(), query_database]
+)
+
+## Collaboration
+
+### Delegation Triggers
+
+- langgraph|state machine|graph -> langgraph (Need explicit state management)
+- observability|tracing -> langfuse (Need LLM observability)
+- structured output|json schema -> structured-output (Need structured responses)
+
+### Research and Writing Crew
+
+Skills: crewai, structured-output
+
+Workflow:
+
+```
+1. Define researcher and writer agents
+2. Create research → analysis → writing pipeline
+3. Use structured output for research format
+4. Chain tasks with context
```
-## Anti-Patterns
+### Observable Agent Team
-### ❌ Vague Agent Roles
+Skills: crewai, langfuse
-**Why bad**: Agent doesn't know its specialty.
-Overlapping responsibilities.
-Poor task delegation.
+Workflow:
-**Instead**: Be specific:
-- "Senior React Developer" not "Developer"
-- "Financial Analyst specializing in crypto" not "Analyst"
-Include specific skills in backstory.
+```
+1. Build crew with agents and tasks
+2. Add Langfuse callback handler
+3. Monitor agent interactions
+4. Evaluate output quality
+```
-### ❌ Missing Expected Outputs
+### Complex Workflow with Flows
-**Why bad**: Agent doesn't know done criteria.
-Inconsistent outputs.
-Hard to chain tasks.
+Skills: crewai, langgraph
-**Instead**: Always specify expected_output:
-expected_output: |
- A JSON object with:
- - summary: string (100 words max)
- - key_points: list of strings
- - confidence: float 0-1
+Workflow:
-### ❌ Too Many Agents
-
-**Why bad**: Coordination overhead.
-Inconsistent communication.
-Slower execution.
-
-**Instead**: 3-5 agents with clear roles.
-One agent can handle multiple related tasks.
-Use tools instead of agents for simple actions.
-
-## Limitations
-
-- Python-only
-- Best for structured workflows
-- Can be verbose for simple cases
-- Flows are newer feature
+```
+1. Design workflow with CrewAI Flows
+2. Use LangGraph patterns for state
+3. Combine crews in flow steps
+4. Handle branching and routing
+```
## Related Skills
Works well with: `langgraph`, `autonomous-agents`, `langfuse`, `structured-output`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: crewai
+- User mentions or implies: multi-agent team
+- User mentions or implies: agent roles
+- User mentions or implies: crew of agents
+- User mentions or implies: role-based agents
+- User mentions or implies: collaborative agents
diff --git a/plugins/antigravity-awesome-skills/skills/discord-bot-architect/SKILL.md b/plugins/antigravity-awesome-skills/skills/discord-bot-architect/SKILL.md
index 48e98cf1..4c887f46 100644
--- a/plugins/antigravity-awesome-skills/skills/discord-bot-architect/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/discord-bot-architect/SKILL.md
@@ -1,22 +1,37 @@
---
name: discord-bot-architect
-description: "Specialized skill for building production-ready Discord bots. Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash commands, interactive components, rate limiting, and sharding."
+description: Specialized skill for building production-ready Discord bots.
+ Covers Discord.js (JavaScript) and Pycord (Python), gateway intents, slash
+ commands, interactive components, rate limiting, and sharding.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Discord Bot Architect
+Specialized skill for building production-ready Discord bots.
+Covers Discord.js (JavaScript) and Pycord (Python), gateway intents,
+slash commands, interactive components, rate limiting, and sharding.
+
+## Principles
+
+- Slash commands over message parsing (Message Content Intent deprecated)
+- Acknowledge interactions within 3 seconds, always
+- Request only required intents (minimize privileged intents)
+- Handle rate limits gracefully with exponential backoff
+- Plan for sharding from the start (required at 2500+ guilds)
+- Use components (buttons, selects, modals) for rich UX
+- Test with guild commands first, deploy global when ready
+
## Patterns
### Discord.js v14 Foundation
Modern Discord bot setup with Discord.js v14 and slash commands
-**When to use**: ['Building Discord bots with JavaScript/TypeScript', 'Need full gateway connection with events', 'Building bots with complex interactions']
+**When to use**: Building Discord bots with JavaScript/TypeScript,Need full gateway connection with events,Building bots with complex interactions
-```javascript
```javascript
// src/index.js
const { Client, Collection, GatewayIntentBits, Events } = require('discord.js');
@@ -90,16 +105,96 @@ module.exports = {
const { Events } = require('discord.js');
module.exports = {
- name: Event
+ name: Events.InteractionCreate,
+ async execute(interaction) {
+ if (!interaction.isChatInputCommand()) return;
+
+ const command = interaction.client.commands.get(interaction.commandName);
+ if (!command) {
+ console.error(`No command matching ${interaction.commandName}`);
+ return;
+ }
+
+ try {
+ await command.execute(interaction);
+ } catch (error) {
+ console.error(error);
+ const reply = {
+ content: 'There was an error executing this command!',
+ ephemeral: true
+ };
+
+ if (interaction.replied || interaction.deferred) {
+ await interaction.followUp(reply);
+ } else {
+ await interaction.reply(reply);
+ }
+ }
+ }
+};
```
+```javascript
+// src/deploy-commands.js
+const { REST, Routes } = require('discord.js');
+const fs = require('node:fs');
+const path = require('node:path');
+require('dotenv').config();
+
+const commands = [];
+const commandsPath = path.join(__dirname, 'commands');
+const commandFiles = fs.readdirSync(commandsPath).filter(f => f.endsWith('.js'));
+
+for (const file of commandFiles) {
+ const command = require(path.join(commandsPath, file));
+ commands.push(command.data.toJSON());
+}
+
+const rest = new REST().setToken(process.env.DISCORD_TOKEN);
+
+(async () => {
+ try {
+ console.log(`Refreshing ${commands.length} commands...`);
+
+ // Guild commands (instant, for testing)
+ // const data = await rest.put(
+ // Routes.applicationGuildCommands(CLIENT_ID, GUILD_ID),
+ // { body: commands }
+ // );
+
+ // Global commands (can take up to 1 hour to propagate)
+ const data = await rest.put(
+ Routes.applicationCommands(process.env.CLIENT_ID),
+ { body: commands }
+ );
+
+ console.log(`Successfully registered ${data.length} commands`);
+ } catch (error) {
+ console.error(error);
+ }
+})();
+```
+
+### Structure
+
+discord-bot/
+├── src/
+│ ├── index.js # Main entry point
+│ ├── deploy-commands.js # Command registration script
+│ ├── commands/ # Slash command handlers
+│ │ └── ping.js
+│ └── events/ # Event handlers
+│ ├── ready.js
+│ └── interactionCreate.js
+├── .env
+└── package.json
+
### Pycord Bot Foundation
Discord bot with Pycord (Python) and application commands
-**When to use**: ['Building Discord bots with Python', 'Prefer async/await patterns', 'Need good slash command support']
+**When to use**: Building Discord bots with Python,Prefer async/await patterns,Need good slash command support
-```python
```python
# main.py
import os
@@ -169,16 +264,32 @@ class General(commands.Cog):
embed.add_field(name="Latency", value=f"{round(self.bot.latency * 1000)}ms")
await ctx.respond(embed=embed)
- @commands.Cog.
+ @commands.Cog.listener()
+ async def on_member_join(self, member: discord.Member):
+ # Requires Members intent (PRIVILEGED)
+ channel = member.guild.system_channel
+ if channel:
+ await channel.send(f"Welcome {member.mention}!")
+
+def setup(bot):
+ bot.add_cog(General(bot))
```
+### Structure
+
+discord-bot/
+├── main.py # Main bot file
+├── cogs/ # Command groups
+│ └── general.py
+├── .env
+└── requirements.txt
+
### Interactive Components Pattern
Using buttons, select menus, and modals for rich UX
-**When to use**: ['Need interactive user interfaces', 'Collecting user input beyond slash command options', 'Building menus, confirmations, or forms']
+**When to use**: Need interactive user interfaces,Collecting user input beyond slash command options,Building menus, confirmations, or forms
-```python
```javascript
// Discord.js - Buttons and Select Menus
const {
@@ -245,38 +356,1100 @@ module.exports = {
if (i.customId === 'confirm') {
await i.update({ content: 'Confirmed!', components: [] });
collector.stop();
- } else if (i.custo
+ } else if (i.customId === 'cancel') {
+ await i.update({ content: 'Cancelled', components: [] });
+ collector.stop();
+ } else if (i.customId === 'select-role') {
+ await i.update({ content: `You selected: ${i.values.join(', ')}` });
+ }
+ });
+ }
+};
```
-## Anti-Patterns
+```javascript
+// Modals (forms)
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('feedback')
+ .setDescription('Submit feedback'),
-### ❌ Message Content for Commands
+ async execute(interaction) {
+ const modal = new ModalBuilder()
+ .setCustomId('feedback-modal')
+ .setTitle('Submit Feedback');
-**Why bad**: Message Content Intent is privileged and deprecated for bot commands.
-Slash commands are the intended approach.
+ const titleInput = new TextInputBuilder()
+ .setCustomId('feedback-title')
+ .setLabel('Title')
+ .setStyle(TextInputStyle.Short)
+ .setRequired(true)
+ .setMaxLength(100);
-### ❌ Syncing Commands on Every Start
+ const bodyInput = new TextInputBuilder()
+ .setCustomId('feedback-body')
+ .setLabel('Your feedback')
+ .setStyle(TextInputStyle.Paragraph)
+ .setRequired(true)
+ .setMaxLength(1000)
+ .setPlaceholder('Describe your feedback...');
-**Why bad**: Command registration is rate limited. Global commands take up to 1 hour
-to propagate. Syncing on every start wastes API calls and can hit limits.
+ modal.addComponents(
+ new ActionRowBuilder().addComponents(titleInput),
+ new ActionRowBuilder().addComponents(bodyInput)
+ );
-### ❌ Blocking the Event Loop
+ // Show modal - MUST be first response
+ await interaction.showModal(modal);
+ }
+};
-**Why bad**: Discord gateway requires regular heartbeats. Blocking operations
-cause missed heartbeats and disconnections.
+// Handle modal submission in interactionCreate
+if (interaction.isModalSubmit()) {
+ if (interaction.customId === 'feedback-modal') {
+ const title = interaction.fields.getTextInputValue('feedback-title');
+ const body = interaction.fields.getTextInputValue('feedback-body');
-## ⚠️ Sharp Edges
+ await interaction.reply({
+ content: `Thanks for your feedback!\n**${title}**\n${body}`,
+ ephemeral: true
+ });
+ }
+}
+```
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | critical | ## Acknowledge immediately, process later |
-| Issue | critical | ## Step 1: Enable in Developer Portal |
-| Issue | high | ## Use a separate deploy script (not on startup) |
-| Issue | critical | ## Never hardcode tokens |
-| Issue | high | ## Generate correct invite URL |
-| Issue | medium | ## Development: Use guild commands |
-| Issue | medium | ## Never block the event loop |
-| Issue | medium | ## Show modal immediately |
+```python
+# Pycord - Buttons and Views
+import discord
+
+class ConfirmView(discord.ui.View):
+ def __init__(self):
+ super().__init__(timeout=60)
+ self.value = None
+
+ @discord.ui.button(label="Confirm", style=discord.ButtonStyle.green)
+ async def confirm(self, button, interaction):
+ self.value = True
+ await interaction.response.edit_message(content="Confirmed!", view=None)
+ self.stop()
+
+ @discord.ui.button(label="Cancel", style=discord.ButtonStyle.red)
+ async def cancel(self, button, interaction):
+ self.value = False
+ await interaction.response.edit_message(content="Cancelled", view=None)
+ self.stop()
+
+@bot.slash_command(name="confirm")
+async def confirm_cmd(ctx: discord.ApplicationContext):
+ view = ConfirmView()
+ await ctx.respond("Are you sure?", view=view)
+
+ await view.wait() # Wait for user interaction
+ if view.value is None:
+ await ctx.followup.send("Timed out")
+
+# Select Menu
+class RoleSelect(discord.ui.Select):
+ def __init__(self):
+ options = [
+ discord.SelectOption(label="Developer", value="dev", emoji="💻"),
+ discord.SelectOption(label="Designer", value="design", emoji="🎨"),
+ ]
+ super().__init__(
+ placeholder="Select roles...",
+ min_values=1,
+ max_values=2,
+ options=options
+ )
+
+ async def callback(self, interaction):
+ await interaction.response.send_message(
+ f"You selected: {', '.join(self.values)}",
+ ephemeral=True
+ )
+
+class RoleView(discord.ui.View):
+ def __init__(self):
+ super().__init__()
+ self.add_item(RoleSelect())
+
+# Modal
+class FeedbackModal(discord.ui.Modal):
+ def __init__(self):
+ super().__init__(title="Submit Feedback")
+
+ self.add_item(discord.ui.InputText(
+ label="Title",
+ style=discord.InputTextStyle.short,
+ required=True,
+ max_length=100
+ ))
+ self.add_item(discord.ui.InputText(
+ label="Feedback",
+ style=discord.InputTextStyle.long,
+ required=True,
+ max_length=1000
+ ))
+
+ async def callback(self, interaction):
+ title = self.children[0].value
+ body = self.children[1].value
+ await interaction.response.send_message(
+ f"Thanks!\n**{title}**\n{body}",
+ ephemeral=True
+ )
+
+@bot.slash_command(name="feedback")
+async def feedback(ctx: discord.ApplicationContext):
+ await ctx.send_modal(FeedbackModal())
+```
+
+### Limits
+
+- 5 ActionRows per message/modal
+- 5 buttons per ActionRow
+- 1 select menu per ActionRow (takes all 5 slots)
+- 5 select menus max per message
+- 25 options per select menu
+- Modal must be first response (cannot defer first)
+
+### Deferred Response Pattern
+
+Handle slow operations without timing out
+
+**When to use**: Operation takes more than 3 seconds,Database queries, API calls, LLM responses,File processing or generation
+
+```javascript
+// Discord.js - Deferred response
+module.exports = {
+ data: new SlashCommandBuilder()
+ .setName('slow-task')
+ .setDescription('Performs a slow operation'),
+
+ async execute(interaction) {
+ // Defer immediately - you have 3 seconds!
+ await interaction.deferReply();
+ // For ephemeral: await interaction.deferReply({ ephemeral: true });
+
+ try {
+ // Now you have 15 minutes to complete
+ const result = await slowDatabaseQuery();
+ const aiResponse = await callOpenAI(result);
+
+ // Edit the deferred reply
+ await interaction.editReply({
+ content: `Result: ${aiResponse}`,
+ embeds: [resultEmbed]
+ });
+ } catch (error) {
+ await interaction.editReply({
+ content: 'An error occurred while processing your request.'
+ });
+ }
+ }
+};
+
+// For components (buttons, select menus)
+collector.on('collect', async i => {
+ await i.deferUpdate(); // Acknowledge without visual change
+ // Or: await i.deferReply({ ephemeral: true });
+
+ const result = await slowOperation();
+ await i.editReply({ content: result });
+});
+```
+
+```python
+# Pycord - Deferred response
+@bot.slash_command(name="slow-task")
+async def slow_task(ctx: discord.ApplicationContext):
+ # Defer immediately
+ await ctx.defer()
+ # For ephemeral: await ctx.defer(ephemeral=True)
+
+ try:
+ result = await slow_database_query()
+ ai_response = await call_openai(result)
+
+ await ctx.followup.send(f"Result: {ai_response}")
+ except Exception as e:
+ await ctx.followup.send("An error occurred")
+```
+
+### Timing
+
+- Initial_response: 3 seconds
+- Deferred_followup: 15 minutes
+- Ephemeral_note: Can only be set on initial response, not changed later
+
+### Embed Builder Pattern
+
+Rich embedded messages for professional-looking content
+
+**When to use**: Displaying formatted information,Status updates, help menus, logs,Data with structure (fields, images)
+
+```javascript
+const { EmbedBuilder, Colors } = require('discord.js');
+
+// Basic embed
+const embed = new EmbedBuilder()
+ .setColor(Colors.Blue)
+ .setTitle('Bot Status')
+ .setURL('https://example.com')
+ .setAuthor({
+ name: 'Bot Name',
+ iconURL: client.user.displayAvatarURL()
+ })
+ .setDescription('Current status and statistics')
+ .addFields(
+ { name: 'Servers', value: `${client.guilds.cache.size}`, inline: true },
+ { name: 'Users', value: `${client.users.cache.size}`, inline: true },
+ { name: 'Uptime', value: formatUptime(), inline: true }
+ )
+ .setThumbnail(client.user.displayAvatarURL())
+ .setImage('https://example.com/banner.png')
+ .setTimestamp()
+ .setFooter({
+ text: 'Requested by User',
+ iconURL: interaction.user.displayAvatarURL()
+ });
+
+await interaction.reply({ embeds: [embed] });
+
+// Multiple embeds (max 10)
+await interaction.reply({ embeds: [embed1, embed2, embed3] });
+```
+
+```python
+# Pycord
+embed = discord.Embed(
+ title="Bot Status",
+ description="Current status and statistics",
+ color=discord.Color.blue(),
+ url="https://example.com"
+)
+embed.set_author(
+ name="Bot Name",
+ icon_url=bot.user.display_avatar.url
+)
+embed.add_field(name="Servers", value=len(bot.guilds), inline=True)
+embed.add_field(name="Users", value=len(bot.users), inline=True)
+embed.set_thumbnail(url=bot.user.display_avatar.url)
+embed.set_image(url="https://example.com/banner.png")
+embed.set_footer(text="Requested by User", icon_url=ctx.author.display_avatar.url)
+embed.timestamp = discord.utils.utcnow()
+
+await ctx.respond(embed=embed)
+```
+
+### Limits
+
+- 10 embeds per message
+- 6000 characters total across all embeds
+- 256 characters for title
+- 4096 characters for description
+- 25 fields per embed
+- 256 characters per field name
+- 1024 characters per field value
+
+### Rate Limit Handling Pattern
+
+Gracefully handle Discord API rate limits
+
+**When to use**: High-volume operations,Bulk messaging or role assignments,Any repeated API calls
+
+```javascript
+// Discord.js handles rate limits automatically, but for custom handling:
+const { REST } = require('discord.js');
+
+const rest = new REST({ version: '10' })
+ .setToken(process.env.DISCORD_TOKEN);
+
+rest.on('rateLimited', (info) => {
+ console.log(`Rate limited! Retry after ${info.retryAfter}ms`);
+ console.log(`Route: ${info.route}`);
+ console.log(`Global: ${info.global}`);
+});
+
+// Queue pattern for bulk operations
+class RateLimitQueue {
+ constructor() {
+ this.queue = [];
+ this.processing = false;
+ this.requestsPerSecond = 40; // Safe margin below 50
+ }
+
+ async add(operation) {
+ return new Promise((resolve, reject) => {
+ this.queue.push({ operation, resolve, reject });
+ this.process();
+ });
+ }
+
+ async process() {
+ if (this.processing || this.queue.length === 0) return;
+ this.processing = true;
+
+ while (this.queue.length > 0) {
+ const { operation, resolve, reject } = this.queue.shift();
+
+ try {
+ const result = await operation();
+ resolve(result);
+ } catch (error) {
+ reject(error);
+ }
+
+ // Throttle: ~40 requests per second
+ await new Promise(r => setTimeout(r, 1000 / this.requestsPerSecond));
+ }
+
+ this.processing = false;
+ }
+}
+
+const queue = new RateLimitQueue();
+
+// Usage: Send 200 messages without hitting rate limits
+for (const user of users) {
+ await queue.add(() => user.send('Welcome!'));
+}
+```
+
+```python
+# Pycord/discord.py handles rate limits automatically
+# For custom handling:
+import asyncio
+from collections import deque
+
+class RateLimitQueue:
+ def __init__(self, requests_per_second=40):
+ self.queue = deque()
+ self.processing = False
+ self.delay = 1 / requests_per_second
+
+ async def add(self, coro):
+ future = asyncio.Future()
+ self.queue.append((coro, future))
+ if not self.processing:
+ asyncio.create_task(self._process())
+ return await future
+
+ async def _process(self):
+ self.processing = True
+ while self.queue:
+ coro, future = self.queue.popleft()
+ try:
+ result = await coro
+ future.set_result(result)
+ except Exception as e:
+ future.set_exception(e)
+ await asyncio.sleep(self.delay)
+ self.processing = False
+
+queue = RateLimitQueue()
+
+# Usage
+for member in guild.members:
+ await queue.add(member.send("Welcome!"))
+```
+
+### Rate_limits
+
+- Global: 50 requests per second
+- Gateway: 120 requests per 60 seconds
+- Specific: Messages to same channel: 5/5s, Bulk delete: 1/1s, Guild member requests: varies by guild size
+
+### Sharding Pattern
+
+Scale bots to 2500+ servers with sharding
+
+**When to use**: Bot approaching 2500 guilds (required),Want horizontal scaling,Memory optimization for large bots
+
+```javascript
+// Discord.js Sharding Manager
+// shard.js (main entry)
+const { ShardingManager } = require('discord.js');
+
+const manager = new ShardingManager('./bot.js', {
+ token: process.env.DISCORD_TOKEN,
+ totalShards: 'auto', // Discord determines optimal count
+ // Or specify: totalShards: 4
+});
+
+manager.on('shardCreate', shard => {
+ console.log(`Launched shard ${shard.id}`);
+
+ shard.on('ready', () => {
+ console.log(`Shard ${shard.id} ready`);
+ });
+
+ shard.on('disconnect', () => {
+ console.log(`Shard ${shard.id} disconnected`);
+ });
+});
+
+manager.spawn();
+
+// bot.js - Modified for sharding
+const { Client } = require('discord.js');
+
+const client = new Client({ intents: [...] });
+
+// Get shard info
+client.on('ready', () => {
+ console.log(`Shard ${client.shard.ids[0]} ready with ${client.guilds.cache.size} guilds`);
+});
+
+// Cross-shard data
+async function getTotalGuilds() {
+ const results = await client.shard.fetchClientValues('guilds.cache.size');
+ return results.reduce((acc, count) => acc + count, 0);
+}
+
+// Broadcast to all shards
+async function broadcastMessage(channelId, message) {
+ await client.shard.broadcastEval(
+ (c, { channelId, message }) => {
+ const channel = c.channels.cache.get(channelId);
+ if (channel) channel.send(message);
+ },
+ { context: { channelId, message } }
+ );
+}
+```
+
+```python
+# Pycord - AutoShardedBot
+import discord
+from discord.ext import commands
+
+# Automatically handles sharding
+bot = commands.AutoShardedBot(
+ command_prefix="!",
+ intents=discord.Intents.default(),
+ shard_count=None # Auto-determine
+)
+
+@bot.event
+async def on_ready():
+ print(f"Logged in on {len(bot.shards)} shards")
+ for shard_id, shard in bot.shards.items():
+ print(f"Shard {shard_id}: {shard.latency * 1000:.2f}ms")
+
+@bot.event
+async def on_shard_ready(shard_id):
+ print(f"Shard {shard_id} is ready")
+
+# Get guilds per shard
+for shard_id, guilds in bot.guilds_by_shard().items():
+ print(f"Shard {shard_id}: {len(guilds)} guilds")
+```
+
+### Scaling_guide
+
+- 1-2500 guilds: No sharding required
+- 2500+ guilds: Sharding required by Discord
+- Recommended: ~1000 guilds per shard
+- Memory: Each shard runs in separate process
+
+## Sharp Edges
+
+### Interaction Timeout (3 Second Rule)
+
+Severity: CRITICAL
+
+Situation: Handling slash commands, buttons, select menus, or modals
+
+Symptoms:
+User sees "This interaction failed" or "The application did not respond."
+Command works locally but fails in production.
+Slow operations never complete.
+
+Why this breaks:
+Discord requires ALL interactions to be acknowledged within 3 seconds:
+- Slash commands
+- Button clicks
+- Select menu selections
+- Context menu commands
+
+If you do ANY slow operation (database, API, file I/O) before responding,
+you'll miss the window. Discord shows an error even if your bot processes
+the request correctly afterward.
+
+After acknowledgment, you have 15 minutes for follow-up responses.
+
+Recommended fix:
+
+## Acknowledge immediately, process later
+
+```javascript
+// Discord.js - Defer for slow operations
+module.exports = {
+ async execute(interaction) {
+ // DEFER IMMEDIATELY - before any slow operation
+ await interaction.deferReply();
+ // For ephemeral: await interaction.deferReply({ ephemeral: true });
+
+ // Now you have 15 minutes
+ const result = await slowDatabaseQuery();
+ const aiResponse = await callLLM(result);
+
+ // Edit the deferred reply
+ await interaction.editReply(`Result: ${aiResponse}`);
+ }
+};
+```
+
+```python
+# Pycord
+@bot.slash_command()
+async def slow_command(ctx):
+ await ctx.defer() # Acknowledge immediately
+ # await ctx.defer(ephemeral=True) # For private response
+
+ result = await slow_operation()
+ await ctx.followup.send(f"Result: {result}")
+```
+
+## For components (buttons, menus)
+
+```javascript
+// If you're updating the message
+await interaction.deferUpdate();
+
+// If you're sending a new response
+await interaction.deferReply({ ephemeral: true });
+```
+
+### Missing Privileged Intent Configuration
+
+Severity: CRITICAL
+
+Situation: Bot needs member data, presences, or message content
+
+Symptoms:
+Members intent: member lists empty, on_member_join doesn't fire
+Presences intent: statuses always unknown/offline
+Message content intent: message.content is empty string
+
+Why this breaks:
+Discord has 3 privileged intents that require manual enablement:
+1. **GUILD_MEMBERS** - Member join/leave, member lists
+2. **GUILD_PRESENCES** - Online status, activities
+3. **MESSAGE_CONTENT** - Read message text (deprecated for commands)
+
+These must be:
+1. Enabled in Discord Developer Portal > Bot > Privileged Gateway Intents
+2. Requested in your bot code
+
+At 100+ servers, you need Discord verification to keep using them.
+
+Recommended fix:
+
+## Step 1: Enable in Developer Portal
+
+```
+1. Go to https://discord.com/developers/applications
+2. Select your application
+3. Go to Bot section
+4. Scroll to Privileged Gateway Intents
+5. Toggle ON the intents you need
+```
+
+## Step 2: Request in code
+
+```javascript
+// Discord.js
+const { Client, GatewayIntentBits } = require('discord.js');
+
+const client = new Client({
+ intents: [
+ GatewayIntentBits.Guilds,
+ GatewayIntentBits.GuildMembers, // PRIVILEGED
+ // GatewayIntentBits.GuildPresences, // PRIVILEGED
+ // GatewayIntentBits.MessageContent, // PRIVILEGED - avoid!
+ ]
+});
+```
+
+```python
+# Pycord
+intents = discord.Intents.default()
+intents.members = True # PRIVILEGED
+# intents.presences = True # PRIVILEGED
+# intents.message_content = True # PRIVILEGED - avoid!
+
+bot = commands.Bot(intents=intents)
+```
+
+## Avoid Message Content Intent if possible
+
+Use slash commands, buttons, and modals instead of message parsing.
+These don't require the Message Content intent.
+
+### Command Registration Rate Limited
+
+Severity: HIGH
+
+Situation: Registering slash commands
+
+Symptoms:
+Commands not appearing. 429 errors when deploying.
+"You are being rate limited" messages.
+Commands appear for some guilds but not others.
+
+Why this breaks:
+Command registration is rate limited:
+- Global commands: 200 creates/day, updates take up to 1 hour to propagate
+- Guild commands: 200 creates/day per guild, instant update
+
+Common mistakes:
+- Registering commands on every bot startup
+- Registering in every guild separately
+- Making changes in a loop without delays
+
+Recommended fix:
+
+## Use a separate deploy script (not on startup)
+
+```javascript
+// deploy-commands.js - Run manually, not on bot start
+const { REST, Routes } = require('discord.js');
+
+const rest = new REST().setToken(process.env.DISCORD_TOKEN);
+
+async function deploy() {
+ // For development: Guild commands (instant)
+ if (process.env.GUILD_ID) {
+ await rest.put(
+ Routes.applicationGuildCommands(
+ process.env.CLIENT_ID,
+ process.env.GUILD_ID
+ ),
+ { body: commands }
+ );
+ console.log('Guild commands deployed instantly');
+ }
+
+ // For production: Global commands (up to 1 hour)
+ else {
+ await rest.put(
+ Routes.applicationCommands(process.env.CLIENT_ID),
+ { body: commands }
+ );
+ console.log('Global commands deployed (may take up to 1 hour)');
+ }
+}
+
+deploy();
+```
+
+```python
+# Pycord - Don't sync on every startup
+@bot.event
+async def on_ready():
+ # DON'T DO THIS:
+ # await bot.sync_commands()
+
+ print(f"Ready! Commands should already be registered.")
+
+# Instead, sync manually or use a flag
+if __name__ == "__main__":
+ if "--sync" in sys.argv:
+ # Only sync when explicitly requested
+ bot.sync_commands_on_start = True
+ bot.run(token)
+```
+
+## Testing workflow
+
+1. Use guild commands during development (instant updates)
+2. Only deploy global commands when ready for production
+3. Run deploy script manually, not on every restart
+
+### Bot Token Exposed
+
+Severity: CRITICAL
+
+Situation: Storing or sharing bot token
+
+Symptoms:
+Unauthorized actions from your bot.
+Bot joins random servers.
+Bot sends spam or malicious content.
+"Invalid token" after Discord invalidates it.
+
+Why this breaks:
+Your bot token provides FULL control over your bot. Attackers can:
+- Send messages as your bot
+- Join servers, create invites
+- Access all data your bot can access
+- Potentially take over servers where bot has admin
+
+Discord actively scans GitHub for exposed tokens and invalidates them.
+Common exposure points:
+- Committed to Git
+- Shared in Discord itself
+- In client-side code
+- In public screenshots
+
+Recommended fix:
+
+## Never hardcode tokens
+
+```javascript
+// BAD - never do this
+const token = 'MTIzNDU2Nzg5MDEyMzQ1Njc4.ABCDEF.xyz...';
+
+// GOOD - environment variables
+require('dotenv').config();
+client.login(process.env.DISCORD_TOKEN);
+```
+
+## Use .gitignore
+
+```
+# .gitignore
+.env
+.env.local
+config.json
+```
+
+## If token is exposed
+
+1. Go to Developer Portal immediately
+2. Regenerate the token
+3. Update all deployments
+4. Review bot activity for unauthorized actions
+5. Check git history and force push to remove if needed
+
+## Use environment variables properly
+
+```bash
+# .env (never commit)
+DISCORD_TOKEN=your_token_here
+CLIENT_ID=your_client_id
+```
+
+```javascript
+// Load with dotenv
+require('dotenv').config();
+const token = process.env.DISCORD_TOKEN;
+```
+
+### Bot Missing applications.commands Scope
+
+Severity: HIGH
+
+Situation: Slash commands not appearing for users
+
+Symptoms:
+Bot is in server but slash commands don't show up.
+Typing / shows no commands from your bot.
+Commands worked in development server but not others.
+
+Why this breaks:
+Discord has two important OAuth scopes:
+- `bot` - Traditional bot permissions (messages, reactions, etc.)
+- `applications.commands` - Slash command permissions
+
+Many bots were invited with only the `bot` scope before slash commands
+existed. They need to be re-invited with both scopes.
+
+Recommended fix:
+
+## Generate correct invite URL
+
+```
+https://discord.com/api/oauth2/authorize
+ ?client_id=YOUR_CLIENT_ID
+ &permissions=0
+ &scope=bot%20applications.commands
+```
+
+## In Discord Developer Portal
+
+1. Go to OAuth2 > URL Generator
+2. Select BOTH:
+ - `bot`
+ - `applications.commands`
+3. Select required bot permissions
+4. Use generated URL
+
+## Re-invite without kicking
+
+Users can use the new invite URL even if bot is already in server.
+This adds the new scope without removing the bot.
+
+```javascript
+// Generate invite URL in code
+const inviteUrl = client.generateInvite({
+ scopes: ['bot', 'applications.commands'],
+ permissions: [
+ 'SendMessages',
+ 'EmbedLinks',
+ // Add other needed permissions
+ ]
+});
+```
+
+### Global Commands Not Appearing Immediately
+
+Severity: MEDIUM
+
+Situation: Deploying global slash commands
+
+Symptoms:
+Commands don't appear after deployment.
+Guild commands work but global commands don't.
+Commands appear after an hour.
+
+Why this breaks:
+Global commands can take up to 1 hour to propagate to all Discord servers.
+This is by design for Discord's caching and CDN.
+
+Guild commands are instant but only work in that specific guild.
+
+Recommended fix:
+
+## Development: Use guild commands
+
+```javascript
+// Instant updates for testing
+await rest.put(
+ Routes.applicationGuildCommands(CLIENT_ID, GUILD_ID),
+ { body: commands }
+);
+```
+
+## Production: Deploy global commands during off-peak
+
+```javascript
+// Takes up to 1 hour to propagate
+await rest.put(
+ Routes.applicationCommands(CLIENT_ID),
+ { body: commands }
+);
+```
+
+## Workflow
+
+1. Develop and test with guild commands (instant)
+2. When ready, deploy global commands
+3. Wait up to 1 hour for propagation
+4. Don't deploy global commands frequently
+
+### Frequent Gateway Disconnections
+
+Severity: MEDIUM
+
+Situation: Bot randomly goes offline or misses events
+
+Symptoms:
+Bot shows as offline intermittently.
+Events are missed (member joins, messages).
+Reconnection messages in logs.
+
+Why this breaks:
+Discord gateway requires regular heartbeats. Issues:
+- Blocking operations prevent heartbeat
+- Network instability
+- Memory pressure causing GC pauses
+- Too many guilds without sharding (2500+ requires sharding)
+
+Recommended fix:
+
+## Never block the event loop
+
+```javascript
+// BAD - blocks event loop
+const data = fs.readFileSync('file.json');
+
+// GOOD - async
+const data = await fs.promises.readFile('file.json');
+```
+
+## Handle reconnections gracefully
+
+```javascript
+client.on('shardResume', (id, replayedEvents) => {
+ console.log(`Shard ${id} resumed, replayed ${replayedEvents} events`);
+});
+
+client.on('shardDisconnect', (event, id) => {
+ console.log(`Shard ${id} disconnected`);
+});
+
+client.on('shardReconnecting', (id) => {
+ console.log(`Shard ${id} reconnecting...`);
+});
+```
+
+## Implement sharding at scale
+
+```javascript
+// Required at 2500+ guilds
+const manager = new ShardingManager('./bot.js', {
+ token: process.env.DISCORD_TOKEN,
+ totalShards: 'auto'
+});
+manager.spawn();
+```
+
+### Modal Must Be First Response
+
+Severity: MEDIUM
+
+Situation: Showing a modal from a slash command or button
+
+Symptoms:
+"Interaction has already been acknowledged" error.
+Modal doesn't appear.
+Works sometimes but not others.
+
+Why this breaks:
+Modals have a special requirement: showing a modal MUST be the first
+response to an interaction. You cannot:
+- defer() then showModal()
+- reply() then showModal()
+- Think for more than 3 seconds then showModal()
+
+Recommended fix:
+
+## Show modal immediately
+
+```javascript
+// CORRECT - modal is first response
+async execute(interaction) {
+ const modal = new ModalBuilder()
+ .setCustomId('my-modal')
+ .setTitle('Input Form');
+
+ // Show immediately - no defer, no reply first
+ await interaction.showModal(modal);
+}
+```
+
+```javascript
+// WRONG - deferred first
+async execute(interaction) {
+ await interaction.deferReply(); // CAN'T DO THIS
+ await interaction.showModal(modal); // Will fail
+}
+```
+
+## If you need to check something first
+
+```javascript
+async execute(interaction) {
+ // Quick sync check is OK (under 3 seconds)
+ if (!hasPermission(interaction.user.id)) {
+ return interaction.reply({
+ content: 'No permission',
+ ephemeral: true
+ });
+ }
+
+ // Show modal (still first interaction response for this path)
+ await interaction.showModal(modal);
+}
+```
+
+## Validation Checks
+
+### Hardcoded Discord Token
+
+Severity: ERROR
+
+Discord tokens must never be hardcoded
+
+Message: Hardcoded Discord token detected. Use environment variables.
+
+### Token Variable Assignment
+
+Severity: ERROR
+
+Tokens should come from environment, not strings
+
+Message: Token assigned from string literal. Use environment variable.
+
+### Token in Client-Side Code
+
+Severity: ERROR
+
+Never expose Discord tokens to browsers
+
+Message: Discord credentials exposed client-side. Only use server-side.
+
+### Slow Operation Without Defer
+
+Severity: WARNING
+
+Slow operations should be deferred to avoid timeout
+
+Message: Slow operation without defer. Interaction may timeout.
+
+### Interaction Without Error Handling
+
+Severity: WARNING
+
+Interactions should have try/catch for graceful errors
+
+Message: Interaction without error handling. Add try/catch.
+
+### Using Message Content Intent
+
+Severity: WARNING
+
+Message Content is privileged, prefer slash commands
+
+Message: Using Message Content intent. Consider slash commands instead.
+
+### Requesting All Intents
+
+Severity: WARNING
+
+Only request intents you actually need
+
+Message: Requesting all intents. Only enable what you need.
+
+### Syncing Commands on Ready Event
+
+Severity: WARNING
+
+Don't sync commands on every bot startup
+
+Message: Syncing commands on startup. Use separate deploy script.
+
+### Registering Commands in Loop
+
+Severity: WARNING
+
+Use bulk registration, not individual calls
+
+Message: Registering commands in loop. Use bulk registration.
+
+### No Rate Limit Handling
+
+Severity: INFO
+
+Consider handling rate limits for bulk operations
+
+Message: Bulk operation without rate limit handling.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs AI-powered Discord bot -> llm-architect (Integrate LLM for conversational Discord bot)
+- user needs Slack integration too -> slack-bot-builder (Cross-platform bot architecture)
+- user needs voice features -> voice-agents (Discord voice channel integration)
+- user needs database for bot data -> postgres-wizard (Store user data, server configs, moderation logs)
+- user needs workflow automation -> workflow-automation (Discord events trigger workflows)
+- user needs high availability -> devops (Sharding, scaling, monitoring for large bots)
+- user needs payment integration -> stripe-specialist (Premium bot features, subscription management)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills/skills/email-systems/SKILL.md b/plugins/antigravity-awesome-skills/skills/email-systems/SKILL.md
index ba119b5d..4c2c992f 100644
--- a/plugins/antigravity-awesome-skills/skills/email-systems/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/email-systems/SKILL.md
@@ -1,18 +1,36 @@
---
name: email-systems
-description: "You are an email systems engineer who has maintained 99.9% deliverability across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with blacklists, and optimized for inbox placement. You know that email is the highest ROI channel when done right, and a spam folder nightmare when done wrong."
+description: Email has the highest ROI of any marketing channel. $36 for every
+ $1 spent. Yet most startups treat it as an afterthought - bulk blasts, no
+ personalization, landing in spam folders.
risk: none
source: vibeship-spawner-skills (Apache 2.0)
-date_added: '2026-02-27'
+date_added: 2026-02-27
---
# Email Systems
-You are an email systems engineer who has maintained 99.9% deliverability
-across millions of emails. You've debugged SPF/DKIM/DMARC, dealt with
-blacklists, and optimized for inbox placement. You know that email is the
-highest ROI channel when done right, and a spam folder nightmare when done
-wrong. You treat deliverability as infrastructure, not an afterthought.
+Email has the highest ROI of any marketing channel. $36 for every $1 spent.
+Yet most startups treat it as an afterthought - bulk blasts, no personalization,
+landing in spam folders.
+
+This skill covers transactional email that works, marketing automation that
+converts, deliverability that reaches inboxes, and the infrastructure decisions
+that scale.
+
+## Principles
+
+- Transactional vs Marketing separation | Description: Transactional emails (password reset, receipts) need 100% delivery.
+Marketing emails (newsletters, promos) have lower priority. Use separate
+IP addresses and providers to protect transactional deliverability. | Examples: Good: Password resets via Postmark, marketing via ConvertKit | Bad: All emails through one SendGrid account
+- Permission is everything | Description: Only email people who asked to hear from you. Double opt-in for marketing.
+Easy unsubscribe. Clean your list ruthlessly. Bad lists destroy deliverability. | Examples: Good: Confirmed subscription + one-click unsubscribe | Bad: Scraped email list, hidden unsubscribe, bought contacts
+- Deliverability is infrastructure | Description: SPF, DKIM, DMARC are not optional. Warm up new IPs. Monitor bounce rates.
+Deliverability is earned through technical setup and good behavior. | Examples: Good: All DNS records configured, dedicated IP warmed for 4 weeks | Bad: Using free tier shared IP, no authentication records
+- One email, one goal | Description: Each email should have exactly one purpose and one CTA. Multiple asks
+means nothing gets clicked. Clear single action. | Examples: Good: "Click here to verify your email" (one button) | Bad: "Verify email, check out our blog, follow us on Twitter, refer a friend..."
+- Timing and frequency matter | Description: Wrong time = low open rates. Too frequent = unsubscribes. Let users
+set preferences. Test send times. Respect inbox fatigue. | Examples: Good: Weekly digest on Tuesday 10am user's timezone, preference center | Bad: Daily emails at random times, no way to reduce frequency
## Patterns
@@ -20,40 +38,642 @@ wrong. You treat deliverability as infrastructure, not an afterthought.
Queue all transactional emails with retry logic and monitoring
+**When to use**: Sending any critical email (password reset, receipts, confirmations)
+
+// Don't block request on email send
+await queue.add('email', {
+ template: 'password-reset',
+ to: user.email,
+ data: { resetToken, expiresAt }
+}, {
+ attempts: 3,
+ backoff: { type: 'exponential', delay: 2000 }
+});
+
### Email Event Tracking
Track delivery, opens, clicks, bounces, and complaints
+**When to use**: Any email campaign or transactional flow
+
+# Track lifecycle:
+- Queued: Email entered system
+- Sent: Handed to provider
+- Delivered: Reached inbox
+- Opened: Recipient viewed
+- Clicked: Recipient engaged
+- Bounced: Permanent failure
+- Complained: Marked as spam
+
### Template Versioning
Version email templates for rollback and A/B testing
-## Anti-Patterns
+**When to use**: Changing production email templates
-### ❌ HTML email soup
+templates/
+ password-reset/
+ v1.tsx (current)
+ v2.tsx (testing 10%)
+ v1-deprecated.tsx (archived)
-**Why bad**: Email clients render differently. Outlook breaks everything.
+# Deploy new version gradually
+# Monitor metrics before full rollout
-### ❌ No plain text fallback
+### Bounce Handling State Machine
-**Why bad**: Some clients strip HTML. Accessibility issues. Spam signal.
+Automatically handle bounces to protect sender reputation
-### ❌ Huge image emails
+**When to use**: Processing bounce and complaint webhooks
-**Why bad**: Images blocked by default. Spam trigger. Slow loading.
+switch (bounceType) {
+ case 'hard':
+ await markEmailInvalid(email);
+ break;
+ case 'soft':
+ await incrementBounceCount(email);
+ if (count >= 3) await markEmailInvalid(email);
+ break;
+ case 'complaint':
+ await unsubscribeImmediately(email);
+ break;
+}
-## ⚠️ Sharp Edges
+### React Email Components
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Missing SPF, DKIM, or DMARC records | critical | # Required DNS records: |
-| Using shared IP for transactional email | high | # Transactional email strategy: |
-| Not processing bounce notifications | high | # Bounce handling requirements: |
-| Missing or hidden unsubscribe link | critical | # Unsubscribe requirements: |
-| Sending HTML without plain text alternative | medium | # Always send multipart: |
-| Sending high volume from new IP immediately | high | # IP warm-up schedule: |
-| Emailing people who did not opt in | critical | # Permission requirements: |
-| Emails that are mostly or entirely images | medium | # Balance images and text: |
+Build emails with reusable React components
+
+**When to use**: Creating email templates
+
+import { Button, Html } from '@react-email/components';
+
+export default function WelcomeEmail({ userName }) {
+ return (
+
+
Welcome {userName}!
+
+
+ );
+}
+
+### Preference Center
+
+Let users control email frequency and topics
+
+**When to use**: Building marketing or notification systems
+
+Preferences:
+☑ Product updates (weekly)
+☑ New features (monthly)
+☐ Marketing promotions
+☑ Account notifications (always)
+
+# Respect preferences in all sends
+# Required for GDPR compliance
+
+## Sharp Edges
+
+### Missing SPF, DKIM, or DMARC records
+
+Severity: CRITICAL
+
+Situation: Sending emails without authentication. Emails going to spam folder.
+Low open rates. No idea why. Turns out DNS records were never set up.
+
+Symptoms:
+- Emails going to spam
+- Low deliverability rates
+- mail-tester.com score below 8
+- No DMARC reports received
+
+Why this breaks:
+Email authentication (SPF, DKIM, DMARC) tells receiving servers you're
+legit. Without them, you look like a spammer. Modern email providers
+increasingly require all three.
+
+Recommended fix:
+
+# Required DNS records:
+
+## SPF (Sender Policy Framework)
+TXT record: v=spf1 include:_spf.google.com include:sendgrid.net ~all
+
+## DKIM (DomainKeys Identified Mail)
+TXT record provided by your email provider
+Adds cryptographic signature to emails
+
+## DMARC (Domain-based Message Authentication)
+TXT record: v=DMARC1; p=quarantine; rua=mailto:dmarc@yourdomain.com
+
+# Verify setup:
+- Send test email to mail-tester.com
+- Check MXToolbox for record validation
+- Monitor DMARC reports
+
+### Using shared IP for transactional email
+
+Severity: HIGH
+
+Situation: Password resets going to spam. Using free tier of email provider.
+Some other customer on your shared IP got flagged for spam.
+Your reputation is ruined by association.
+
+Symptoms:
+- Transactional emails in spam
+- Inconsistent delivery
+- Using same provider for marketing and transactional
+
+Why this breaks:
+Shared IPs share reputation. One bad actor affects everyone. For
+critical transactional email, you need your own IP or a provider
+with strict shared IP policies.
+
+Recommended fix:
+
+# Transactional email strategy:
+
+## Option 1: Dedicated IP (high volume)
+- Get dedicated IP from your provider
+- Warm it up slowly (start with 100/day)
+- Maintain consistent volume
+
+## Option 2: Transactional-only provider
+- Postmark (very strict, great reputation)
+- Includes shared pool with high standards
+
+## Separate concerns:
+- Transactional: Postmark or Resend
+- Marketing: ConvertKit or Customer.io
+- Never mix marketing and transactional
+
+### Not processing bounce notifications
+
+Severity: HIGH
+
+Situation: Emailing same dead addresses over and over. Bounce rate climbing.
+Email provider threatening to suspend account. List is 40% dead.
+
+Symptoms:
+- Bounce rate above 2%
+- No webhook handlers for bounces
+- Same emails failing repeatedly
+
+Why this breaks:
+Bounces damage sender reputation. Email providers track bounce rates.
+Above 2% and you start looking like a spammer. Dead addresses must
+be removed immediately.
+
+Recommended fix:
+
+# Bounce handling requirements:
+
+## Hard bounces:
+Remove immediately on first occurrence
+Invalid address, domain doesn't exist
+
+## Soft bounces:
+Retry 3 times over 72 hours
+After 3 failures, treat as hard bounce
+
+## Implementation:
+```typescript
+// Webhook handler for bounces
+app.post('/webhooks/email', (req, res) => {
+ const event = req.body;
+ if (event.type === 'bounce') {
+ await markEmailInvalid(event.email);
+ await removeFromAllLists(event.email);
+ }
+});
+```
+
+## Monitor:
+Track bounce rate by campaign
+Alert if bounce rate exceeds 1%
+
+### Missing or hidden unsubscribe link
+
+Severity: CRITICAL
+
+Situation: Users marking as spam because they cannot unsubscribe. Spam complaints
+rising. CAN-SPAM violation. Email provider suspends account.
+
+Symptoms:
+- Hidden unsubscribe links
+- Multi-step unsubscribe process
+- No List-Unsubscribe header
+- High spam complaint rate
+
+Why this breaks:
+Users who cannot unsubscribe will mark as spam. Spam complaints hurt
+reputation more than unsubscribes. Also it is literally illegal.
+CAN-SPAM, GDPR all require clear unsubscribe.
+
+Recommended fix:
+
+# Unsubscribe requirements:
+
+## Visible:
+- Above the fold in email footer
+- Clear text, not hidden
+- Not styled to be invisible
+
+## One-click:
+- Link directly unsubscribes
+- No login required
+- No "are you sure" hoops
+
+## List-Unsubscribe header:
+```
+List-Unsubscribe: ,
+
+List-Unsubscribe-Post: List-Unsubscribe=One-Click
+```
+
+## Preference center:
+Option to reduce frequency instead of full unsubscribe
+
+### Sending HTML without plain text alternative
+
+Severity: MEDIUM
+
+Situation: Some users see blank emails. Spam filters flagging emails. Accessibility
+issues for screen readers. Email clients that strip HTML show nothing.
+
+Symptoms:
+- No text/plain part in emails
+- Blank emails for some users
+- Lower engagement in some segments
+
+Why this breaks:
+Not everyone can render HTML. Screen readers work better with plain text.
+Spam filters are suspicious of HTML-only. Multipart is the standard.
+
+Recommended fix:
+
+# Always send multipart:
+```typescript
+await resend.emails.send({
+ from: 'you@example.com',
+ to: 'user@example.com',
+ subject: 'Welcome!',
+ html: '
Welcome!
Thanks for signing up.
',
+ text: 'Welcome!\n\nThanks for signing up.',
+});
+```
+
+# Auto-generate text from HTML:
+Use html-to-text library as fallback
+But hand-crafted plain text is better
+
+# Plain text should be readable:
+Not just HTML stripped of tags
+Actual formatted text content
+
+### Sending high volume from new IP immediately
+
+Severity: HIGH
+
+Situation: Just switched providers. Started sending 50,000 emails/day immediately.
+Massive deliverability issues. New IP has no reputation. Looks like spam.
+
+Symptoms:
+- New IP/provider
+- Sending high volume immediately
+- Sudden deliverability drop
+
+Why this breaks:
+New IPs have no reputation. Sending high volume immediately looks
+like a spammer who just spun up. You need to gradually build trust.
+
+Recommended fix:
+
+# IP warm-up schedule:
+
+Week 1: 50-100 emails/day
+Week 2: 200-500 emails/day
+Week 3: 500-1000 emails/day
+Week 4: 1000-5000 emails/day
+Continue doubling until at volume
+
+# Best practices:
+- Start with most engaged users
+- Send to Gmail/Microsoft first (they set reputation)
+- Maintain consistent volume
+- Don't spike and drop
+
+# During warm-up:
+- Monitor deliverability closely
+- Check feedback loops
+- Adjust pace if issues arise
+
+### Emailing people who did not opt in
+
+Severity: CRITICAL
+
+Situation: Bought an email list. Scraped emails from LinkedIn. Added conference
+contacts. Spam complaints through the roof. Provider suspends account.
+Maybe a lawsuit.
+
+Symptoms:
+- Purchased email lists
+- Scraped contacts
+- High unsubscribe rate on first send
+- Spam complaints above 0.1%
+
+Why this breaks:
+Permission-based email is not optional. It is the law (CAN-SPAM, GDPR).
+It is also effective - unwilling recipients hurt your metrics and
+reputation more than they help.
+
+Recommended fix:
+
+# Permission requirements:
+
+## Explicit opt-in:
+- User actively chooses to receive email
+- Not pre-checked boxes
+- Clear what they are signing up for
+
+## Double opt-in:
+- Confirmation email with link
+- Only add to list after confirmation
+- Best practice for marketing lists
+
+## What you cannot do:
+- Buy email lists
+- Scrape emails from websites
+- Add conference contacts without consent
+- Use partner/customer lists without consent
+
+## Transactional exception:
+Password resets, receipts, account alerts
+do not need marketing opt-in
+
+### Emails that are mostly or entirely images
+
+Severity: MEDIUM
+
+Situation: Beautiful designed email that is one big image. Users with images
+blocked see nothing. Spam filters flag it. Mobile loading is slow.
+No one can copy text.
+
+Symptoms:
+- Single image emails
+- No text content visible
+- Missing or generic alt text
+- Low engagement when images blocked
+
+Why this breaks:
+Images are blocked by default in many clients. Spam filters are
+suspicious of image-only emails. Accessibility suffers. Load times
+increase.
+
+Recommended fix:
+
+# Balance images and text:
+
+## 60/40 rule:
+- At least 60% text content
+- Images for enhancement, not content
+
+## Always include:
+- Alt text on every image
+- Key message in text, not just image
+- Fallback for images-off view
+
+## Test:
+- Preview with images disabled
+- Should still be usable
+
+# Example:
+```html
+
+
Use code SAVE50 to save 50% this week.
+```
+
+### Missing or default preview text
+
+Severity: MEDIUM
+
+Situation: Inbox shows "View this email in browser" or random HTML as preview.
+Lower open rates. First impression wasted on boilerplate.
+
+Symptoms:
+- View in browser as preview
+- HTML code visible in preview
+- No preview component in template
+
+Why this breaks:
+Preview text is prime real estate - appears right after subject line.
+Default or missing preview text wastes this space. Good preview text
+increases open rates 10-30%.
+
+Recommended fix:
+
+# Add explicit preview text:
+
+## In HTML:
+```html
+
+ Your preview text here. This appears in inbox preview.
+
+
+
+```
+
+## With React Email:
+```tsx
+
+ Your preview text here. This appears in inbox preview.
+
+```
+
+## Best practices:
+- Complement the subject line
+- 40-100 characters optimal
+- Create curiosity or value
+- Different from first line of email
+
+### Not handling partial send failures
+
+Severity: HIGH
+
+Situation: Sending to 10,000 users. API fails at 3,000. No tracking of what sent.
+Either double-send or lose 7,000. No way to know who got the email.
+
+Symptoms:
+- No per-recipient send logging
+- Cannot tell who received email
+- Double-sending issues
+- No retry mechanism
+
+Why this breaks:
+Bulk sends fail partially. APIs timeout. Rate limits hit. Without
+tracking individual send status, you cannot recover gracefully.
+
+Recommended fix:
+
+# Track each send individually:
+
+```typescript
+async function sendCampaign(emails: string[]) {
+ const results = await Promise.allSettled(
+ emails.map(async (email) => {
+ try {
+ const result = await resend.emails.send({ to: email, ... });
+ await db.emailLog.create({
+ email,
+ status: 'sent',
+ messageId: result.id,
+ });
+ return result;
+ } catch (error) {
+ await db.emailLog.create({
+ email,
+ status: 'failed',
+ error: error.message,
+ });
+ throw error;
+ }
+ })
+ );
+
+ const failed = results.filter(r => r.status === 'rejected');
+ // Retry failed sends or alert
+}
+```
+
+# Best practices:
+- Log every send attempt
+- Include message ID for tracking
+- Build retry queue for failures
+- Monitor success rate per campaign
+
+## Validation Checks
+
+### Missing plain text email part
+
+Severity: WARNING
+
+Emails should always include a plain text alternative
+
+Message: Email being sent with HTML but no plain text part. Add 'text:' property for accessibility and deliverability.
+
+### Hardcoded from email address
+
+Severity: WARNING
+
+From addresses should come from environment variables
+
+Message: From email appears hardcoded. Use environment variable for flexibility.
+
+### Missing bounce webhook handler
+
+Severity: WARNING
+
+Email bounces should be handled to maintain list hygiene
+
+Message: Email provider used but no bounce handling detected. Implement webhook handler for bounces.
+
+### Missing List-Unsubscribe header
+
+Severity: INFO
+
+Marketing emails should include List-Unsubscribe header
+
+Message: Marketing email detected without List-Unsubscribe header. Add header for better deliverability.
+
+### Synchronous email send in request handler
+
+Severity: WARNING
+
+Email sends should be queued, not blocking
+
+Message: Email sent synchronously in request handler. Consider queuing for better reliability.
+
+### Email send without retry logic
+
+Severity: INFO
+
+Email sends should have retry mechanism for failures
+
+Message: Email send without apparent retry logic. Add retry for transient failures.
+
+### Email API key in code
+
+Severity: ERROR
+
+API keys should come from environment variables
+
+Message: Email API key appears hardcoded in source code. Use environment variable.
+
+### Bulk email without rate limiting
+
+Severity: WARNING
+
+Bulk sends should respect provider rate limits
+
+Message: Bulk email sending without apparent rate limiting. Add throttling to avoid hitting limits.
+
+### Email without preview text
+
+Severity: INFO
+
+Emails should include preview/preheader text
+
+Message: Email template without preview text. Add hidden preheader for inbox preview.
+
+### Email send without logging
+
+Severity: WARNING
+
+Email sends should be logged for debugging and auditing
+
+Message: Email being sent without apparent logging. Log sends for debugging and compliance.
+
+## Collaboration
+
+### Delegation Triggers
+
+- copy|subject|messaging|content -> copywriting (Email needs copy)
+- design|template|visual|layout -> ui-design (Email needs design)
+- track|analytics|measure|metrics -> analytics-architecture (Email needs tracking)
+- infrastructure|deploy|server|queue -> devops (Email needs infrastructure)
+
+### Email Marketing Stack
+
+Skills: email-systems, copywriting, marketing, analytics-architecture
+
+Workflow:
+
+```
+1. Infrastructure setup (email-systems)
+2. Template creation (email-systems)
+3. Copy writing (copywriting)
+4. Campaign launch (marketing)
+5. Performance tracking (analytics-architecture)
+```
+
+### Transactional Email
+
+Skills: email-systems, backend, devops
+
+Workflow:
+
+```
+1. Provider setup (email-systems)
+2. Template coding (email-systems)
+3. Queue integration (backend)
+4. Monitoring (devops)
+```
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills/skills/file-uploads/SKILL.md b/plugins/antigravity-awesome-skills/skills/file-uploads/SKILL.md
index 598db0af..b0814728 100644
--- a/plugins/antigravity-awesome-skills/skills/file-uploads/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/file-uploads/SKILL.md
@@ -1,27 +1,228 @@
---
name: file-uploads
-description: "Careful about security and performance. Never trusts file extensions. Knows that large uploads need special handling. Prefers presigned URLs over server proxying."
+description: Expert at handling file uploads and cloud storage. Covers S3,
+ Cloudflare R2, presigned URLs, multipart uploads, and image optimization.
+ Knows how to handle large files without blocking.
risk: none
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# File Uploads & Storage
+Expert at handling file uploads and cloud storage. Covers S3,
+Cloudflare R2, presigned URLs, multipart uploads, and image
+optimization. Knows how to handle large files without blocking.
+
**Role**: File Upload Specialist
Careful about security and performance. Never trusts file
extensions. Knows that large uploads need special handling.
Prefers presigned URLs over server proxying.
-## ⚠️ Sharp Edges
+### Principles
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Trusting client-provided file type | critical | # CHECK MAGIC BYTES |
-| No upload size restrictions | high | # SET SIZE LIMITS |
-| User-controlled filename allows path traversal | critical | # SANITIZE FILENAMES |
-| Presigned URL shared or cached incorrectly | medium | # CONTROL PRESIGNED URL DISTRIBUTION |
+- Never trust client file type claims
+- Use presigned URLs for direct uploads
+- Stream large files, never buffer
+- Validate on upload, optimize after
+
+## Sharp Edges
+
+### Trusting client-provided file type
+
+Severity: CRITICAL
+
+Situation: User uploads malware.exe renamed to image.jpg. You check
+extension, looks fine. Store it. Serve it. Another user
+downloads and executes it.
+
+Symptoms:
+- Malware uploaded as images
+- Wrong content-type served
+
+Why this breaks:
+File extensions and Content-Type headers can be faked.
+Attackers rename executables to bypass filters.
+
+Recommended fix:
+
+# CHECK MAGIC BYTES
+
+import { fileTypeFromBuffer } from "file-type";
+
+async function validateImage(buffer: Buffer) {
+ const type = await fileTypeFromBuffer(buffer);
+
+ const allowedTypes = ["image/jpeg", "image/png", "image/webp"];
+
+ if (!type || !allowedTypes.includes(type.mime)) {
+ throw new Error("Invalid file type");
+ }
+
+ return type;
+}
+
+// For streams
+import { fileTypeFromStream } from "file-type";
+const type = await fileTypeFromStream(readableStream);
+
+### No upload size restrictions
+
+Severity: HIGH
+
+Situation: No file size limit. Attacker uploads 10GB file. Server runs
+out of memory or disk. Denial of service. Or massive
+storage bill.
+
+Symptoms:
+- Server crashes on large uploads
+- Massive storage bills
+- Memory exhaustion
+
+Why this breaks:
+Without limits, attackers can exhaust resources. Even
+legitimate users might accidentally upload huge files.
+
+Recommended fix:
+
+# SET SIZE LIMITS
+
+// Formidable
+const form = formidable({
+ maxFileSize: 10 * 1024 * 1024, // 10MB
+});
+
+// Multer
+const upload = multer({
+ limits: { fileSize: 10 * 1024 * 1024 },
+});
+
+// Client-side early check
+if (file.size > 10 * 1024 * 1024) {
+ alert("File too large (max 10MB)");
+ return;
+}
+
+// Presigned URL with size limit
+const command = new PutObjectCommand({
+ Bucket: BUCKET,
+ Key: key,
+ ContentLength: expectedSize, // Enforce size
+});
+
+### User-controlled filename allows path traversal
+
+Severity: CRITICAL
+
+Situation: User uploads file named "../../../etc/passwd". You use
+filename directly. File saved outside upload directory.
+System files overwritten.
+
+Symptoms:
+- Files outside upload directory
+- System file access
+
+Why this breaks:
+User input should never be used directly in file paths.
+Path traversal sequences can escape intended directories.
+
+Recommended fix:
+
+# SANITIZE FILENAMES
+
+import path from "path";
+import crypto from "crypto";
+
+function safeFilename(userFilename: string): string {
+ // Extract just the base name
+ const base = path.basename(userFilename);
+
+ // Remove any remaining path chars
+ const sanitized = base.replace(/[^a-zA-Z0-9.-]/g, "_");
+
+ // Or better: generate new name entirely
+ const ext = path.extname(userFilename).toLowerCase();
+ const allowed = [".jpg", ".png", ".pdf"];
+
+ if (!allowed.includes(ext)) {
+ throw new Error("Invalid extension");
+ }
+
+ return crypto.randomUUID() + ext;
+}
+
+// Never do this
+const path = "uploads/" + req.body.filename; // DANGER!
+
+// Do this
+const path = "uploads/" + safeFilename(req.body.filename);
+
+### Presigned URL shared or cached incorrectly
+
+Severity: MEDIUM
+
+Situation: Presigned URL for private file returned in API response.
+Response cached by CDN. Anyone with cached URL can access
+private file for hours.
+
+Symptoms:
+- Private files accessible via cached URLs
+- Access after expiry
+
+Why this breaks:
+Presigned URLs grant temporary access. If cached or shared,
+access extends beyond intended scope.
+
+Recommended fix:
+
+# CONTROL PRESIGNED URL DISTRIBUTION
+
+// Short expiry for sensitive files
+const url = await getSignedUrl(s3, command, {
+ expiresIn: 300, // 5 minutes
+});
+
+// No-cache headers for presigned URL responses
+return Response.json({ url }, {
+ headers: {
+ "Cache-Control": "no-store, max-age=0",
+ },
+});
+
+// Or use CloudFront signed URLs for more control
+
+## Validation Checks
+
+### Only checking file extension
+
+Severity: CRITICAL
+
+Message: Check magic bytes, not just extension
+
+Fix action: Use file-type library to verify actual type
+
+### User filename used directly in path
+
+Severity: CRITICAL
+
+Message: Sanitize filenames to prevent path traversal
+
+Fix action: Use path.basename() and generate safe name
+
+## Collaboration
+
+### Delegation Triggers
+
+- image optimization CDN -> performance-optimization (Image delivery)
+- storing file metadata -> postgres-wizard (Database schema)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: file upload
+- User mentions or implies: S3
+- User mentions or implies: R2
+- User mentions or implies: presigned URL
+- User mentions or implies: multipart
+- User mentions or implies: image upload
+- User mentions or implies: cloud storage
diff --git a/plugins/antigravity-awesome-skills/skills/firebase/SKILL.md b/plugins/antigravity-awesome-skills/skills/firebase/SKILL.md
index 811518b9..c2532e44 100644
--- a/plugins/antigravity-awesome-skills/skills/firebase/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/firebase/SKILL.md
@@ -1,23 +1,38 @@
---
name: firebase
-description: "You're a developer who has shipped dozens of Firebase projects. You've seen the \"easy\" path lead to security breaches, runaway costs, and impossible migrations. You know Firebase is powerful, but you also know its sharp edges."
+description: Firebase gives you a complete backend in minutes - auth, database,
+ storage, functions, hosting. But the ease of setup hides real complexity.
+ Security rules are your last line of defense, and they're often wrong.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# Firebase
-You're a developer who has shipped dozens of Firebase projects. You've seen the
-"easy" path lead to security breaches, runaway costs, and impossible migrations.
-You know Firebase is powerful, but you also know its sharp edges.
+Firebase gives you a complete backend in minutes - auth, database, storage,
+functions, hosting. But the ease of setup hides real complexity. Security rules
+are your last line of defense, and they're often wrong. Firestore queries are
+limited, and you learn this after you've designed your data model.
-Your hard-won lessons: The team that skipped security rules got pwned. The team
-that designed Firestore like SQL couldn't query their data. The team that
-attached listeners to large collections got a $10k bill. You've learned from
-all of them.
+This skill covers Firebase Authentication, Firestore, Realtime Database, Cloud
+Functions, Cloud Storage, and Firebase Hosting. Key insight: Firebase is
+optimized for read-heavy, denormalized data. If you're thinking relationally,
+you're thinking wrong.
-You advocate for Firebase w
+2025 lesson: Firestore pricing can surprise you. Reads are cheap until they're
+not. A poorly designed listener can cost more than a dedicated database. Plan
+your data model for your query patterns, not your data relationships.
+
+## Principles
+
+- Design data for queries, not relationships
+- Security rules are mandatory, not optional
+- Denormalize aggressively - duplication is cheap, joins are expensive
+- Batch writes and transactions for consistency
+- Use offline persistence wisely - it's not free
+- Cloud Functions for what clients shouldn't do
+- Environment-based config, never hardcode keys in client
## Capabilities
@@ -31,31 +46,646 @@ You advocate for Firebase w
- firebase-admin-sdk
- firebase-emulators
+## Scope
+
+- general-backend-architecture -> backend
+- payment-processing -> stripe
+- email-sending -> email
+- advanced-auth-flows -> authentication-oauth
+- kubernetes-deployment -> devops
+
+## Tooling
+
+### Core
+
+- firebase - When: Client-side SDK Note: Modular SDK - tree-shakeable
+- firebase-admin - When: Server-side / Cloud Functions Note: Full access, bypasses security rules
+- firebase-functions - When: Cloud Functions v2 Note: v2 functions are recommended
+
+### Testing
+
+- @firebase/rules-unit-testing - When: Testing security rules Note: Essential - rules bugs are security bugs
+- firebase-tools - When: Emulator suite Note: Local development without hitting production
+
+### Frameworks
+
+- reactfire - When: React + Firebase Note: Hooks-based, handles subscriptions
+- vuefire - When: Vue + Firebase Note: Vue-specific bindings
+- angularfire - When: Angular + Firebase Note: Official Angular bindings
+
## Patterns
### Modular SDK Import
Import only what you need for smaller bundles
+**When to use**: Client-side Firebase usage
+
+# MODULAR IMPORTS:
+
+"""
+Firebase v9+ uses modular SDK. Import only what you need.
+This enables tree-shaking and smaller bundles.
+"""
+
+// WRONG: v8-compat style (larger bundle)
+import firebase from 'firebase/compat/app';
+import 'firebase/compat/firestore';
+const db = firebase.firestore();
+
+// RIGHT: v9+ modular (tree-shakeable)
+import { initializeApp } from 'firebase/app';
+import { getFirestore, collection, doc, getDoc } from 'firebase/firestore';
+
+const app = initializeApp(firebaseConfig);
+const db = getFirestore(app);
+
+// Get a document
+const docRef = doc(db, 'users', 'userId');
+const docSnap = await getDoc(docRef);
+
+if (docSnap.exists()) {
+ console.log(docSnap.data());
+}
+
+// Query with constraints
+import { query, where, orderBy, limit } from 'firebase/firestore';
+
+const q = query(
+ collection(db, 'posts'),
+ where('published', '==', true),
+ orderBy('createdAt', 'desc'),
+ limit(10)
+);
+
### Security Rules Design
Secure your data with proper rules from day one
+**When to use**: Any Firestore database
+
+# FIRESTORE SECURITY RULES:
+
+"""
+Rules are your last line of defense. Every read and write
+goes through them. Get them wrong, and your data is exposed.
+"""
+
+rules_version = '2';
+service cloud.firestore {
+ match /databases/{database}/documents {
+
+ // Helper functions
+ function isSignedIn() {
+ return request.auth != null;
+ }
+
+ function isOwner(userId) {
+ return request.auth.uid == userId;
+ }
+
+ function isAdmin() {
+ return request.auth.token.admin == true;
+ }
+
+ // Users collection
+ match /users/{userId} {
+ // Anyone can read public profile
+ allow read: if true;
+
+ // Only owner can write their own data
+ allow write: if isOwner(userId);
+
+ // Private subcollection
+ match /private/{document=**} {
+ allow read, write: if isOwner(userId);
+ }
+ }
+
+ // Posts collection
+ match /posts/{postId} {
+ // Anyone can read published posts
+ allow read: if resource.data.published == true
+ || isOwner(resource.data.authorId);
+
+ // Only authenticated users can create
+ allow create: if isSignedIn()
+ && request.resource.data.authorId == request.auth.uid;
+
+ // Only author can update/delete
+ allow update, delete: if isOwner(resource.data.authorId);
+ }
+
+ // Admin-only collection
+ match /admin/{document=**} {
+ allow read, write: if isAdmin();
+ }
+ }
+}
+
### Data Modeling for Queries
Design Firestore data structure around query patterns
-## Anti-Patterns
+**When to use**: Designing Firestore schema
-### ❌ No Security Rules
+# FIRESTORE DATA MODELING:
-### ❌ Client-Side Admin Operations
+"""
+Firestore is NOT relational. You can't JOIN.
+Design your data for how you'll QUERY it, not how it relates.
+"""
-### ❌ Listener on Large Collections
+// WRONG: Normalized (SQL thinking)
+// users/{userId}
+// posts/{postId} with authorId field
+// To get "posts by user" - need to query posts collection
+
+// RIGHT: Denormalized for queries
+// users/{userId}/posts/{postId} - subcollection
+// OR
+// posts/{postId} with embedded author data
+
+// Document structure for a post
+const post = {
+ id: 'post123',
+ title: 'My Post',
+ content: '...',
+
+ // Embed frequently-needed author data
+ author: {
+ id: 'user456',
+ name: 'Jane Doe',
+ avatarUrl: '...'
+ },
+
+ // Arrays for IN queries (max 30 items for 'in')
+ tags: ['javascript', 'firebase'],
+
+ // Maps for compound queries
+ stats: {
+ likes: 42,
+ comments: 7,
+ views: 1000
+ },
+
+ // Timestamps
+ createdAt: serverTimestamp(),
+ updatedAt: serverTimestamp(),
+
+ // Booleans for filtering
+ published: true,
+ featured: false
+};
+
+// Query patterns this enables:
+// - Get post with author info: 1 read (no join needed)
+// - Posts by tag: where('tags', 'array-contains', 'javascript')
+// - Featured posts: where('featured', '==', true)
+// - Recent posts: orderBy('createdAt', 'desc')
+
+// When author updates their name, update all their posts
+// This is the tradeoff: writes are more complex, reads are fast
+
+### Real-time Listeners
+
+Subscribe to data changes with proper cleanup
+
+**When to use**: Real-time features
+
+# REAL-TIME LISTENERS:
+
+"""
+onSnapshot creates a persistent connection. Always unsubscribe
+when component unmounts to prevent memory leaks and extra reads.
+"""
+
+// React hook for real-time document
+function useDocument(path) {
+ const [data, setData] = useState(null);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ const docRef = doc(db, path);
+
+ // Subscribe to document
+ const unsubscribe = onSnapshot(
+ docRef,
+ (snapshot) => {
+ if (snapshot.exists()) {
+ setData({ id: snapshot.id, ...snapshot.data() });
+ } else {
+ setData(null);
+ }
+ setLoading(false);
+ },
+ (err) => {
+ setError(err);
+ setLoading(false);
+ }
+ );
+
+ // Cleanup on unmount
+ return () => unsubscribe();
+ }, [path]);
+
+ return { data, loading, error };
+}
+
+// Usage
+function UserProfile({ userId }) {
+ const { data: user, loading } = useDocument(`users/${userId}`);
+
+ if (loading) return ;
+ return
{user?.name}
;
+}
+
+// Collection with query
+function usePosts(limit = 10) {
+ const [posts, setPosts] = useState([]);
+
+ useEffect(() => {
+ const q = query(
+ collection(db, 'posts'),
+ where('published', '==', true),
+ orderBy('createdAt', 'desc'),
+ limit(limit)
+ );
+
+ const unsubscribe = onSnapshot(q, (snapshot) => {
+ const results = snapshot.docs.map(doc => ({
+ id: doc.id,
+ ...doc.data()
+ }));
+ setPosts(results);
+ });
+
+ return () => unsubscribe();
+ }, [limit]);
+
+ return posts;
+}
+
+### Cloud Functions Patterns
+
+Server-side logic with Cloud Functions v2
+
+**When to use**: Backend logic, triggers, scheduled tasks
+
+# CLOUD FUNCTIONS V2:
+
+"""
+Cloud Functions run server-side code triggered by events.
+V2 uses more standard Node.js patterns and better scaling.
+"""
+
+import { onRequest } from 'firebase-functions/v2/https';
+import { onDocumentCreated } from 'firebase-functions/v2/firestore';
+import { onSchedule } from 'firebase-functions/v2/scheduler';
+import { getFirestore } from 'firebase-admin/firestore';
+import { initializeApp } from 'firebase-admin/app';
+
+initializeApp();
+const db = getFirestore();
+
+// HTTP function
+export const api = onRequest(
+ { cors: true, region: 'us-central1' },
+ async (req, res) => {
+ // Verify auth token
+ const token = req.headers.authorization?.split('Bearer ')[1];
+ if (!token) {
+ res.status(401).json({ error: 'Unauthorized' });
+ return;
+ }
+
+ try {
+ const decoded = await getAuth().verifyIdToken(token);
+ // Process request with decoded.uid
+ res.json({ userId: decoded.uid });
+ } catch (error) {
+ res.status(401).json({ error: 'Invalid token' });
+ }
+ }
+);
+
+// Firestore trigger - on document create
+export const onUserCreated = onDocumentCreated(
+ 'users/{userId}',
+ async (event) => {
+ const snapshot = event.data;
+ const userId = event.params.userId;
+
+ if (!snapshot) return;
+
+ const userData = snapshot.data();
+
+ // Send welcome email, create related documents, etc.
+ await db.collection('notifications').add({
+ userId,
+ type: 'welcome',
+ message: `Welcome, ${userData.name}!`,
+ createdAt: FieldValue.serverTimestamp()
+ });
+ }
+);
+
+// Scheduled function (every day at midnight)
+export const dailyCleanup = onSchedule(
+ { schedule: '0 0 * * *', timeZone: 'UTC' },
+ async (event) => {
+ const cutoff = new Date();
+ cutoff.setDate(cutoff.getDate() - 30);
+
+ // Delete old documents
+ const oldDocs = await db.collection('logs')
+ .where('createdAt', '<', cutoff)
+ .limit(500)
+ .get();
+
+ const batch = db.batch();
+ oldDocs.docs.forEach(doc => batch.delete(doc.ref));
+ await batch.commit();
+
+ console.log(`Deleted ${oldDocs.size} old logs`);
+ }
+);
+
+### Batch Operations
+
+Atomic writes and transactions for consistency
+
+**When to use**: Multiple document updates that must succeed together
+
+# BATCH WRITES AND TRANSACTIONS:
+
+"""
+Batches: Multiple writes that all succeed or all fail.
+Transactions: Read-then-write operations with consistency.
+Max 500 operations per batch/transaction.
+"""
+
+import {
+ writeBatch, runTransaction, doc, getDoc,
+ increment, serverTimestamp
+} from 'firebase/firestore';
+
+// Batch write - no reads, just writes
+async function createPostWithTags(post, tags) {
+ const batch = writeBatch(db);
+
+ // Create post
+ const postRef = doc(collection(db, 'posts'));
+ batch.set(postRef, {
+ ...post,
+ createdAt: serverTimestamp()
+ });
+
+ // Update tag counts
+ for (const tag of tags) {
+ const tagRef = doc(db, 'tags', tag);
+ batch.set(tagRef, {
+ count: increment(1),
+ lastUsed: serverTimestamp()
+ }, { merge: true });
+ }
+
+ await batch.commit();
+ return postRef.id;
+}
+
+// Transaction - read and write atomically
+async function likePost(postId, userId) {
+ return runTransaction(db, async (transaction) => {
+ const postRef = doc(db, 'posts', postId);
+ const likeRef = doc(db, 'posts', postId, 'likes', userId);
+
+ const postSnap = await transaction.get(postRef);
+ if (!postSnap.exists()) {
+ throw new Error('Post not found');
+ }
+
+ const likeSnap = await transaction.get(likeRef);
+ if (likeSnap.exists()) {
+ throw new Error('Already liked');
+ }
+
+ // Increment like count and add like document
+ transaction.update(postRef, {
+ likeCount: increment(1)
+ });
+
+ transaction.set(likeRef, {
+ userId,
+ createdAt: serverTimestamp()
+ });
+
+ return postSnap.data().likeCount + 1;
+ });
+}
+
+### Social Login (Google, GitHub, etc.)
+
+OAuth provider setup and authentication flows
+
+**When to use**: Social login implementation
+
+# SOCIAL LOGIN WITH FIREBASE AUTH
+
+import {
+ getAuth, signInWithPopup, signInWithRedirect,
+ GoogleAuthProvider, GithubAuthProvider, OAuthProvider
+} from "firebase/auth";
+
+const auth = getAuth();
+
+// GOOGLE
+const googleProvider = new GoogleAuthProvider();
+googleProvider.addScope("email");
+googleProvider.setCustomParameters({ prompt: "select_account" });
+
+async function signInWithGoogle() {
+ try {
+ const result = await signInWithPopup(auth, googleProvider);
+ return result.user;
+ } catch (error) {
+ if (error.code === "auth/account-exists-with-different-credential") {
+ return handleAccountConflict(error);
+ }
+ throw error;
+ }
+}
+
+// GITHUB
+const githubProvider = new GithubAuthProvider();
+githubProvider.addScope("read:user");
+
+// APPLE (Required for iOS apps!)
+const appleProvider = new OAuthProvider("apple.com");
+appleProvider.addScope("email");
+appleProvider.addScope("name");
+
+### Popup vs Redirect Auth
+
+When to use popup vs redirect for OAuth
+
+**When to use**: Choosing authentication flow
+
+# Popup: Desktop, SPA (simpler, can be blocked)
+# Redirect: Mobile, iOS Safari (always works)
+
+async function signIn(provider) {
+ if (/iPhone|iPad|Android/i.test(navigator.userAgent)) {
+ return signInWithRedirect(auth, provider);
+ }
+ try {
+ return await signInWithPopup(auth, provider);
+ } catch (e) {
+ if (e.code === "auth/popup-blocked") {
+ return signInWithRedirect(auth, provider);
+ }
+ throw e;
+ }
+}
+
+// Check redirect result on page load
+useEffect(() => {
+ getRedirectResult(auth).then(r => r && setUser(r.user));
+}, []);
+
+### Account Linking
+
+Link multiple providers to one account
+
+**When to use**: User has accounts with different providers
+
+import { fetchSignInMethodsForEmail, linkWithCredential } from "firebase/auth";
+
+async function handleAccountConflict(error) {
+ const email = error.customData?.email;
+ const pendingCred = OAuthProvider.credentialFromError(error);
+ const methods = await fetchSignInMethodsForEmail(auth, email);
+
+ if (methods.includes("google.com")) {
+ alert("Sign in with Google to link accounts");
+ const result = await signInWithPopup(auth, new GoogleAuthProvider());
+ await linkWithCredential(result.user, pendingCred);
+ return result.user;
+ }
+}
+
+// Link new provider
+await linkWithPopup(auth.currentUser, new GithubAuthProvider());
+
+// Unlink provider (keep at least one!)
+await unlink(auth.currentUser, "github.com");
+
+### Auth State Persistence
+
+Control session lifetime
+
+**When to use**: Managing user sessions
+
+import { setPersistence, browserLocalPersistence, browserSessionPersistence } from "firebase/auth";
+
+// LOCAL: survives browser close (default)
+// SESSION: cleared on tab close
+
+async function signInWithRememberMe(email, pass, remember) {
+ await setPersistence(auth, remember ? browserLocalPersistence : browserSessionPersistence);
+ return signInWithEmailAndPassword(auth, email, pass);
+}
+
+// React auth hook
+function useAuth() {
+ const [user, setUser] = useState(null);
+ const [loading, setLoading] = useState(true);
+ useEffect(() => onAuthStateChanged(auth, u => { setUser(u); setLoading(false); }), []);
+ return { user, loading };
+}
+
+### Email Verification and Password Reset
+
+Complete email auth flow
+
+**When to use**: Email/password authentication
+
+import { sendEmailVerification, sendPasswordResetEmail, reauthenticateWithCredential } from "firebase/auth";
+
+// Sign up with verification
+async function signUp(email, password) {
+ const result = await createUserWithEmailAndPassword(auth, email, password);
+ await sendEmailVerification(result.user);
+ return result.user;
+}
+
+// Password reset
+await sendPasswordResetEmail(auth, email);
+
+// Change password (requires recent auth)
+const cred = EmailAuthProvider.credential(user.email, currentPass);
+await reauthenticateWithCredential(user, cred);
+await updatePassword(user, newPass);
+
+### Token Management for APIs
+
+Handle ID tokens for backend calls
+
+**When to use**: Authenticating with backend APIs
+
+import { getIdToken, onIdTokenChanged } from "firebase/auth";
+
+// Get token (auto-refreshes if expired)
+const token = await getIdToken(auth.currentUser);
+
+// API helper with auto-retry
+async function apiCall(url, opts = {}) {
+ const token = await getIdToken(auth.currentUser);
+ const res = await fetch(url, {
+ ...opts,
+ headers: { ...opts.headers, Authorization: "Bearer " + token }
+ });
+ if (res.status === 401) {
+ const newToken = await getIdToken(auth.currentUser, true);
+ return fetch(url, { ...opts, headers: { ...opts.headers, Authorization: "Bearer " + newToken }});
+ }
+ return res;
+}
+
+// Sync to cookie for SSR
+onIdTokenChanged(auth, async u => {
+ document.cookie = u ? "__session=" + await u.getIdToken() : "__session=; max-age=0";
+});
+
+// Check admin claim
+const { claims } = await auth.currentUser.getIdTokenResult();
+const isAdmin = claims.admin === true;
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs complex OAuth flow -> authentication-oauth (Firebase Auth handles basics, complex flows need OAuth skill)
+- user needs payment integration -> stripe (Firebase + Stripe common pattern)
+- user needs email functionality -> email (Firebase doesn't include email - use SendGrid, Resend, etc.)
+- user needs container deployment -> devops (Beyond Firebase Hosting - Kubernetes, Docker)
+- user needs relational data model -> postgres-wizard (Firestore is wrong choice for highly relational data)
+- user needs full-text search -> elasticsearch-search (Firestore doesn't support full-text search - use Algolia/Elastic)
## Related Skills
Works well with: `nextjs-app-router`, `react-patterns`, `authentication-oauth`, `stripe`
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+- User mentions or implies: firebase
+- User mentions or implies: firestore
+- User mentions or implies: firebase auth
+- User mentions or implies: cloud functions
+- User mentions or implies: firebase storage
+- User mentions or implies: realtime database
+- User mentions or implies: firebase hosting
+- User mentions or implies: firebase emulator
+- User mentions or implies: security rules
+- User mentions or implies: firebase admin
diff --git a/plugins/antigravity-awesome-skills/skills/gcp-cloud-run/SKILL.md b/plugins/antigravity-awesome-skills/skills/gcp-cloud-run/SKILL.md
index 71749529..8a24ac02 100644
--- a/plugins/antigravity-awesome-skills/skills/gcp-cloud-run/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/gcp-cloud-run/SKILL.md
@@ -1,22 +1,38 @@
---
name: gcp-cloud-run
-description: "When to use: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']"
+description: Specialized skill for building production-ready serverless
+ applications on GCP. Covers Cloud Run services (containerized), Cloud Run
+ Functions (event-driven), cold start optimization, and event-driven
+ architecture with Pub/Sub.
risk: unknown
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# GCP Cloud Run
+Specialized skill for building production-ready serverless applications on GCP.
+Covers Cloud Run services (containerized), Cloud Run Functions (event-driven),
+cold start optimization, and event-driven architecture with Pub/Sub.
+
+## Principles
+
+- Cloud Run for containers, Functions for simple event handlers
+- Optimize for cold starts with startup CPU boost and min instances
+- Set concurrency based on workload (start with 8, adjust)
+- Memory includes /tmp filesystem - plan accordingly
+- Use VPC Connector only when needed (adds latency)
+- Containers should start fast and be stateless
+- Handle signals gracefully for clean shutdown
+
## Patterns
### Cloud Run Service Pattern
Containerized web service on Cloud Run
-**When to use**: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']
+**When to use**: Web applications and APIs,Need any runtime or library,Complex services with multiple endpoints,Stateless containerized workloads
-```javascript
```dockerfile
# Dockerfile - Multi-stage build for smaller image
FROM node:20-slim AS builder
@@ -106,16 +122,44 @@ steps:
- '--cpu=1'
- '--min-instances=1'
- '--max-instances=100'
-
+ - '--concurrency=80'
+ - '--cpu-boost'
+
+images:
+ - 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA'
```
+### Structure
+
+project/
+├── Dockerfile
+├── .dockerignore
+├── src/
+│ ├── index.js
+│ └── routes/
+├── package.json
+└── cloudbuild.yaml
+
+### Gcloud_deploy
+
+# Direct gcloud deployment
+gcloud run deploy my-service \
+ --source . \
+ --region us-central1 \
+ --allow-unauthenticated \
+ --memory 512Mi \
+ --cpu 1 \
+ --min-instances 1 \
+ --max-instances 100 \
+ --concurrency 80 \
+ --cpu-boost
+
### Cloud Run Functions Pattern
Event-driven functions (formerly Cloud Functions)
-**When to use**: ['Simple event handlers', 'Pub/Sub message processing', 'Cloud Storage triggers', 'HTTP webhooks']
+**When to use**: Simple event handlers,Pub/Sub message processing,Cloud Storage triggers,HTTP webhooks
-```javascript
```javascript
// HTTP Function
// index.js
@@ -186,15 +230,13 @@ gcloud functions deploy process-uploads \
--trigger-event-filters="bucket=my-bucket" \
--region us-central1
```
-```
### Cold Start Optimization Pattern
Minimize cold start latency for Cloud Run
-**When to use**: ['Latency-sensitive applications', 'User-facing APIs', 'High-traffic services']
+**When to use**: Latency-sensitive applications,User-facing APIs,High-traffic services
-```javascript
## 1. Enable Startup CPU Boost
```bash
@@ -258,36 +300,1079 @@ gcloud run deploy my-service \
--cpu 2 \
--region us-central1
```
+
+### Optimization_impact
+
+- Startup_cpu_boost: 50% faster cold starts
+- Min_instances: Eliminates cold starts for traffic spikes
+- Distroless_image: Smaller attack surface, faster pull
+- Lazy_init: Defers heavy loading to first request
+
+### Concurrency Configuration Pattern
+
+Proper concurrency settings for Cloud Run
+
+**When to use**: Need to optimize instance utilization,Handle traffic spikes efficiently,Reduce cold starts
+
+## Understanding Concurrency
+
+```bash
+# Default concurrency is 80
+# Adjust based on your workload
+
+# For I/O-bound workloads (most web apps)
+gcloud run deploy my-service \
+ --concurrency 80 \
+ --cpu 1
+
+# For CPU-bound workloads
+gcloud run deploy my-service \
+ --concurrency 1 \
+ --cpu 1
+
+# For memory-intensive workloads
+gcloud run deploy my-service \
+ --concurrency 10 \
+ --memory 2Gi
```
-## Anti-Patterns
+## Node.js Concurrency
-### ❌ CPU-Intensive Work Without Concurrency=1
+```javascript
+// Node.js is single-threaded but handles I/O concurrently
+// Use async/await for all I/O operations
-**Why bad**: CPU is shared across concurrent requests. CPU-bound work
-will starve other requests, causing timeouts.
+// GOOD - async I/O
+app.get('/api/data', async (req, res) => {
+ const [users, products] = await Promise.all([
+ fetchUsers(),
+ fetchProducts()
+ ]);
+ res.json({ users, products });
+});
-### ❌ Writing Large Files to /tmp
+// BAD - blocking operation
+app.get('/api/compute', (req, res) => {
+ const result = heavyCpuOperation(); // Blocks other requests!
+ res.json(result);
+});
+```
-**Why bad**: /tmp is an in-memory filesystem. Large files consume
-your memory allocation and can cause OOM errors.
+## Python Concurrency with Gunicorn
-### ❌ Long-Running Background Tasks
+```dockerfile
+FROM python:3.11-slim
+WORKDIR /app
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+COPY . .
-**Why bad**: Cloud Run throttles CPU to near-zero when not handling
-requests. Background tasks will be extremely slow or stall.
+# 4 workers for concurrency
+CMD exec gunicorn --bind :$PORT --workers 4 --threads 2 main:app
+```
-## ⚠️ Sharp Edges
+```python
+# main.py
+from flask import Flask
+app = Flask(__name__)
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Issue | high | ## Calculate memory including /tmp usage |
-| Issue | high | ## Set appropriate concurrency |
-| Issue | high | ## Enable CPU always allocated |
-| Issue | medium | ## Configure connection pool with keep-alive |
-| Issue | high | ## Enable startup CPU boost |
-| Issue | medium | ## Explicitly set execution environment |
-| Issue | medium | ## Set consistent timeouts |
+@app.route('/api/data')
+def get_data():
+ return {'status': 'ok'}
+```
+
+### Concurrency_guidelines
+
+- Concurrency=1: Only for CPU-bound or unsafe code
+- Concurrency=8 20: Memory-intensive workloads
+- Concurrency=80: Default, good for I/O-bound
+- Concurrency=250: Maximum, for very lightweight handlers
+
+### Pub/Sub Integration Pattern
+
+Event-driven processing with Cloud Pub/Sub
+
+**When to use**: Asynchronous message processing,Decoupled microservices,Event-driven architecture
+
+## Push Subscription to Cloud Run
+
+```bash
+# Create topic
+gcloud pubsub topics create orders
+
+# Create push subscription to Cloud Run
+gcloud pubsub subscriptions create orders-push \
+ --topic orders \
+ --push-endpoint https://my-service-xxx.run.app/pubsub \
+ --ack-deadline 600
+```
+
+```javascript
+// Handle Pub/Sub push messages
+const express = require('express');
+const app = express();
+app.use(express.json());
+
+app.post('/pubsub', async (req, res) => {
+ // Verify the request is from Pub/Sub
+ if (!req.body.message) {
+ return res.status(400).send('Invalid Pub/Sub message');
+ }
+
+ try {
+ // Decode message data
+ const message = req.body.message;
+ const data = message.data
+ ? JSON.parse(Buffer.from(message.data, 'base64').toString())
+ : {};
+
+ console.log('Processing order:', data);
+
+ await processOrder(data);
+
+ // Return 200 to acknowledge
+ res.status(200).send('OK');
+ } catch (error) {
+ console.error('Processing failed:', error);
+ // Return 500 to trigger retry
+ res.status(500).send('Processing failed');
+ }
+});
+```
+
+## Publishing Messages
+
+```javascript
+const { PubSub } = require('@google-cloud/pubsub');
+const pubsub = new PubSub();
+
+async function publishOrder(order) {
+ const topic = pubsub.topic('orders');
+ const messageBuffer = Buffer.from(JSON.stringify(order));
+
+ const messageId = await topic.publishMessage({
+ data: messageBuffer,
+ attributes: {
+ type: 'order_created',
+ priority: 'high'
+ }
+ });
+
+ console.log(`Published message ${messageId}`);
+ return messageId;
+}
+```
+
+## Dead Letter Queue
+
+```bash
+# Create DLQ topic
+gcloud pubsub topics create orders-dlq
+
+# Update subscription with DLQ
+gcloud pubsub subscriptions update orders-push \
+ --dead-letter-topic orders-dlq \
+ --max-delivery-attempts 5
+```
+
+### Cloud SQL Connection Pattern
+
+Connect Cloud Run to Cloud SQL securely
+
+**When to use**: Need relational database,Migrating existing applications,Complex queries and transactions
+
+```bash
+# Deploy with Cloud SQL connection
+gcloud run deploy my-service \
+ --add-cloudsql-instances PROJECT:REGION:INSTANCE \
+ --set-env-vars INSTANCE_CONNECTION_NAME="PROJECT:REGION:INSTANCE" \
+ --set-env-vars DB_NAME="mydb" \
+ --set-env-vars DB_USER="myuser"
+```
+
+```javascript
+// Using Unix socket connection
+const { Pool } = require('pg');
+
+const pool = new Pool({
+ user: process.env.DB_USER,
+ password: process.env.DB_PASS,
+ database: process.env.DB_NAME,
+ // Cloud SQL connector uses Unix socket
+ host: `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`,
+ max: 5, // Connection pool size
+ idleTimeoutMillis: 30000,
+ connectionTimeoutMillis: 10000,
+});
+
+app.get('/api/users', async (req, res) => {
+ const client = await pool.connect();
+ try {
+ const result = await client.query('SELECT * FROM users LIMIT 100');
+ res.json(result.rows);
+ } finally {
+ client.release();
+ }
+});
+```
+
+```python
+# Python with SQLAlchemy
+import os
+from sqlalchemy import create_engine
+
+def get_engine():
+ instance_connection_name = os.environ["INSTANCE_CONNECTION_NAME"]
+ db_user = os.environ["DB_USER"]
+ db_pass = os.environ["DB_PASS"]
+ db_name = os.environ["DB_NAME"]
+
+ engine = create_engine(
+ f"postgresql+pg8000://{db_user}:{db_pass}@/{db_name}",
+ connect_args={
+ "unix_sock": f"/cloudsql/{instance_connection_name}/.s.PGSQL.5432"
+ },
+ pool_size=5,
+ max_overflow=2,
+ pool_timeout=30,
+ pool_recycle=1800,
+ )
+ return engine
+```
+
+### Best_practices
+
+- Use connection pooling (max 5-10 per instance)
+- Set appropriate idle timeouts
+- Handle connection errors gracefully
+- Consider Cloud SQL Proxy for local development
+
+### Secret Manager Integration
+
+Securely manage secrets in Cloud Run
+
+**When to use**: API keys, database passwords,Service account keys,Any sensitive configuration
+
+```bash
+# Create secret
+echo -n "my-secret-value" | gcloud secrets create my-secret --data-file=-
+
+# Mount as environment variable
+gcloud run deploy my-service \
+ --update-secrets=API_KEY=my-secret:latest
+
+# Mount as file volume
+gcloud run deploy my-service \
+ --update-secrets=/secrets/api-key=my-secret:latest
+```
+
+```javascript
+// Access mounted as environment variable
+const apiKey = process.env.API_KEY;
+
+// Access mounted as file
+const fs = require('fs');
+const apiKey = fs.readFileSync('/secrets/api-key', 'utf8');
+
+// Access via Secret Manager API (when not mounted)
+const { SecretManagerServiceClient } = require('@google-cloud/secret-manager');
+const client = new SecretManagerServiceClient();
+
+async function getSecret(name) {
+ const [version] = await client.accessSecretVersion({
+ name: `projects/${projectId}/secrets/${name}/versions/latest`
+ });
+ return version.payload.data.toString();
+}
+```
+
+## Sharp Edges
+
+### /tmp Filesystem Counts Against Memory
+
+Severity: HIGH
+
+Situation: Writing files to /tmp directory in Cloud Run
+
+Symptoms:
+Container killed with OOM error.
+Memory usage spikes unexpectedly.
+File operations cause container restarts.
+"Container memory limit exceeded" in logs.
+
+Why this breaks:
+Cloud Run uses an in-memory filesystem for /tmp. Any files written
+to /tmp consume memory from your container's allocation.
+
+Common scenarios:
+- Downloading files temporarily
+- Creating temp processing files
+- Libraries caching to /tmp
+- Large log buffers
+
+A 512MB container that downloads a 200MB file to /tmp only has
+~300MB left for the application.
+
+Recommended fix:
+
+## Calculate memory including /tmp usage
+
+```yaml
+# cloudbuild.yaml
+steps:
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args:
+ - 'run'
+ - 'deploy'
+ - 'my-service'
+ - '--memory=1Gi' # Include /tmp overhead
+ - '--image=gcr.io/$PROJECT_ID/my-service'
+```
+
+## Stream instead of buffering
+
+```python
+# BAD - buffers entire file in /tmp
+def process_large_file(bucket_name, blob_name):
+ blob = bucket.blob(blob_name)
+ blob.download_to_filename('/tmp/large_file')
+ with open('/tmp/large_file', 'rb') as f:
+ process(f.read())
+
+# GOOD - stream processing
+def process_large_file(bucket_name, blob_name):
+ blob = bucket.blob(blob_name)
+ with blob.open('rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ process_chunk(chunk)
+```
+
+## Use Cloud Storage for large files
+
+```python
+from google.cloud import storage
+
+def process_with_gcs(bucket_name, input_blob, output_blob):
+ client = storage.Client()
+ bucket = client.bucket(bucket_name)
+
+ # Process directly to/from GCS
+ input_blob = bucket.blob(input_blob)
+ output_blob = bucket.blob(output_blob)
+
+ with input_blob.open('rb') as reader:
+ with output_blob.open('wb') as writer:
+ for chunk in iter(lambda: reader.read(65536), b''):
+ processed = transform(chunk)
+ writer.write(processed)
+```
+
+## Monitor memory usage
+
+```python
+import psutil
+import logging
+
+def log_memory():
+ memory = psutil.virtual_memory()
+ logging.info(f"Memory: {memory.percent}% used, "
+ f"{memory.available / 1024 / 1024:.0f}MB available")
+```
+
+### Concurrency=1 Causes Scaling Bottlenecks
+
+Severity: HIGH
+
+Situation: Setting concurrency to 1 for request isolation
+
+Symptoms:
+Auto-scaling creates many container instances.
+High latency during traffic spikes.
+Increased cold starts.
+Higher costs from more instances.
+
+Why this breaks:
+Setting concurrency to 1 means each container handles only one
+request at a time. During traffic spikes:
+
+- 100 concurrent requests = 100 container instances
+- Each instance has cold start overhead
+- More instances = higher costs
+- Scaling takes time, requests queue up
+
+This should only be used when:
+- Processing is truly single-threaded
+- Memory-heavy per-request processing
+- Using thread-unsafe libraries
+
+Recommended fix:
+
+## Set appropriate concurrency
+
+```bash
+# For I/O-bound workloads (most web apps)
+gcloud run deploy my-service \
+ --concurrency=80 \
+ --max-instances=100
+
+# For CPU-bound workloads
+gcloud run deploy my-service \
+ --concurrency=4 \
+ --cpu=2
+
+# Only use 1 when absolutely necessary
+gcloud run deploy my-service \
+ --concurrency=1 \
+ --max-instances=1000 # Be prepared for many instances
+```
+
+## Node.js - use async properly
+
+```javascript
+// With high concurrency, ensure async operations
+const express = require('express');
+const app = express();
+
+app.get('/api/data', async (req, res) => {
+ // All I/O should be async
+ const data = await fetchFromDatabase();
+ const enriched = await enrichData(data);
+ res.json(enriched);
+});
+
+// Concurrency 80+ is safe for async I/O workloads
+```
+
+## Python - use async framework
+
+```python
+from fastapi import FastAPI
+import asyncio
+import httpx
+
+app = FastAPI()
+
+@app.get("/api/data")
+async def get_data():
+ # Async I/O allows high concurrency
+ async with httpx.AsyncClient() as client:
+ response = await client.get("https://api.example.com/data")
+ return response.json()
+
+# Concurrency 80+ safe with async framework
+```
+
+## Calculate concurrency
+
+```
+concurrency = memory_limit / per_request_memory
+
+Example:
+- 512MB container
+- 20MB per request overhead
+- Safe concurrency: ~25
+```
+
+### CPU Throttled When Not Handling Requests
+
+Severity: HIGH
+
+Situation: Running background tasks or processing between requests
+
+Symptoms:
+Background tasks run extremely slowly.
+Scheduled work doesn't complete.
+Metrics collection fails.
+Connection keep-alive breaks.
+
+Why this breaks:
+By default, Cloud Run throttles CPU to near-zero when not actively
+handling a request. This is "CPU only during requests" mode.
+
+Affected operations:
+- Background threads
+- Connection pool maintenance
+- Metrics/telemetry emission
+- Scheduled tasks within container
+- Cleanup operations after response
+
+Recommended fix:
+
+## Enable CPU always allocated
+
+```bash
+# CPU allocated even outside requests
+gcloud run deploy my-service \
+ --cpu-throttling=false \
+ --min-instances=1
+
+# Note: This increases costs but enables background work
+```
+
+## Use startup CPU boost for initialization
+
+```bash
+# Boost CPU during cold start only
+gcloud run deploy my-service \
+ --cpu-boost \
+ --cpu-throttling=true # Default, throttle after request
+```
+
+## Move background work to Cloud Tasks
+
+```python
+from google.cloud import tasks_v2
+import json
+
+def create_background_task(payload):
+ client = tasks_v2.CloudTasksClient()
+ parent = client.queue_path(
+ "my-project", "us-central1", "my-queue"
+ )
+
+ task = {
+ "http_request": {
+ "http_method": tasks_v2.HttpMethod.POST,
+ "url": "https://my-service.run.app/process",
+ "body": json.dumps(payload).encode(),
+ "headers": {"Content-Type": "application/json"}
+ }
+ }
+
+ client.create_task(parent=parent, task=task)
+
+# Handle response immediately, background via Cloud Tasks
+@app.post("/api/order")
+async def create_order(order: Order):
+ order_id = await save_order(order)
+
+ # Queue background processing
+ create_background_task({"order_id": order_id})
+
+ return {"order_id": order_id, "status": "processing"}
+```
+
+## Use Pub/Sub for async processing
+
+```yaml
+# Move heavy processing to separate service
+steps:
+ # Main service - responds quickly
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'api-service',
+ '--cpu-throttling=true']
+
+ # Worker service - processes messages
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'worker-service',
+ '--cpu-throttling=false',
+ '--min-instances=1']
+```
+
+### VPC Connector 10-Minute Idle Timeout
+
+Severity: MEDIUM
+
+Situation: Cloud Run service connecting to VPC resources
+
+Symptoms:
+Connection errors after period of inactivity.
+"Connection reset" or "Connection refused" errors.
+Sporadic failures to VPC resources.
+Database connections drop unexpectedly.
+
+Why this breaks:
+Cloud Run's VPC connector has a 10-minute idle timeout on connections.
+If a connection is idle for 10 minutes, it's silently closed.
+
+Affects:
+- Database connection pools
+- Redis connections
+- Internal API connections
+- Any persistent VPC connection
+
+Recommended fix:
+
+## Configure connection pool with keep-alive
+
+```python
+# SQLAlchemy with connection recycling
+from sqlalchemy import create_engine
+
+engine = create_engine(
+ DATABASE_URL,
+ pool_size=5,
+ max_overflow=2,
+ pool_recycle=300, # Recycle connections every 5 minutes
+ pool_pre_ping=True # Validate connection before use
+)
+```
+
+## TCP keep-alive for custom connections
+
+```python
+import socket
+
+sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 60)
+sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
+```
+
+## Redis with connection validation
+
+```python
+import redis
+
+pool = redis.ConnectionPool(
+ host=REDIS_HOST,
+ port=6379,
+ socket_keepalive=True,
+ socket_keepalive_options={
+ socket.TCP_KEEPIDLE: 60,
+ socket.TCP_KEEPINTVL: 60,
+ socket.TCP_KEEPCNT: 5
+ },
+ health_check_interval=30
+)
+client = redis.Redis(connection_pool=pool)
+```
+
+## Use Cloud SQL Proxy sidecar
+
+```yaml
+# Use Cloud SQL connector which handles reconnection
+# requirements.txt
+cloud-sql-python-connector[pg8000]
+```
+
+```python
+from google.cloud.sql.connector import Connector
+import sqlalchemy
+
+connector = Connector()
+
+def getconn():
+ return connector.connect(
+ "project:region:instance",
+ "pg8000",
+ user="user",
+ password="password",
+ db="database"
+ )
+
+engine = sqlalchemy.create_engine(
+ "postgresql+pg8000://",
+ creator=getconn
+)
+```
+
+### Container Startup Timeout (4 minutes max)
+
+Severity: HIGH
+
+Situation: Deploying containers with slow initialization
+
+Symptoms:
+Deployment fails with "Container failed to start".
+Service never becomes healthy.
+"Revision failed to become ready" errors.
+Works locally but fails on Cloud Run.
+
+Why this breaks:
+Cloud Run expects your container to start listening on PORT within
+4 minutes (240 seconds). If it doesn't, the instance is killed.
+
+Common causes:
+- Heavy framework initialization (ML models, etc.)
+- Waiting for external dependencies at startup
+- Large dependency loading
+- Database migrations on startup
+
+Recommended fix:
+
+## Enable startup CPU boost
+
+```bash
+gcloud run deploy my-service \
+ --cpu-boost \
+ --startup-cpu-boost
+```
+
+## Lazy initialization
+
+```python
+from functools import lru_cache
+from fastapi import FastAPI
+
+app = FastAPI()
+
+# Don't load at import time
+model = None
+
+@lru_cache()
+def get_model():
+ global model
+ if model is None:
+ # Load on first request, not at startup
+ model = load_heavy_model()
+ return model
+
+@app.get("/predict")
+async def predict(data: dict):
+ model = get_model() # Loads on first call only
+ return model.predict(data)
+
+# Startup is fast - model loads on first request
+```
+
+## Start listening immediately
+
+```python
+import asyncio
+from fastapi import FastAPI
+import uvicorn
+
+app = FastAPI()
+
+# Global state for async initialization
+initialized = asyncio.Event()
+
+@app.on_event("startup")
+async def startup():
+ # Start background initialization
+ asyncio.create_task(async_init())
+
+async def async_init():
+ # Heavy initialization happens after server starts
+ await load_models()
+ await warm_up_connections()
+ initialized.set()
+
+@app.get("/ready")
+async def ready():
+ if not initialized.is_set():
+ raise HTTPException(503, "Still initializing")
+ return {"status": "ready"}
+
+@app.get("/health")
+async def health():
+ # Always respond - health check passes
+ return {"status": "healthy"}
+```
+
+## Use multi-stage builds
+
+```dockerfile
+# Build stage - slow
+FROM python:3.11 as builder
+WORKDIR /app
+COPY requirements.txt .
+RUN pip wheel --no-cache-dir --wheel-dir /wheels -r requirements.txt
+
+# Runtime stage - fast startup
+FROM python:3.11-slim
+WORKDIR /app
+COPY --from=builder /wheels /wheels
+RUN pip install --no-cache /wheels/* && rm -rf /wheels
+COPY . .
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
+```
+
+## Run migrations separately
+
+```bash
+# Don't migrate on startup - use Cloud Build
+steps:
+ # Run migrations first
+ - name: 'gcr.io/cloud-builders/gcloud'
+ entrypoint: 'bash'
+ args:
+ - '-c'
+ - |
+ gcloud run jobs execute migrate-job --wait
+
+ # Then deploy
+ - name: 'gcr.io/cloud-builders/gcloud'
+ args: ['run', 'deploy', 'my-service', ...]
+```
+
+### Second Generation Execution Environment Differences
+
+Severity: MEDIUM
+
+Situation: Migrating to or using Cloud Run second-gen execution environment
+
+Symptoms:
+Network behavior changes.
+Different syscall support.
+File system behavior differences.
+Container behaves differently than in first-gen.
+
+Why this breaks:
+Cloud Run's second-generation execution environment uses a different
+sandbox (gVisor) with different characteristics:
+
+- More Linux syscalls supported
+- Full /proc and /sys access
+- Different network stack
+- No automatic HTTPS redirect
+- Different tmp filesystem behavior
+
+Recommended fix:
+
+## Explicitly set execution environment
+
+```bash
+# First generation (legacy)
+gcloud run deploy my-service \
+ --execution-environment=gen1
+
+# Second generation (recommended for most)
+gcloud run deploy my-service \
+ --execution-environment=gen2
+```
+
+## Handle network differences
+
+```python
+# Second-gen doesn't auto-redirect HTTP to HTTPS
+from fastapi import FastAPI, Request
+from fastapi.responses import RedirectResponse
+
+app = FastAPI()
+
+@app.middleware("http")
+async def redirect_https(request: Request, call_next):
+ # Check X-Forwarded-Proto header
+ if request.headers.get("X-Forwarded-Proto") == "http":
+ url = request.url.replace(scheme="https")
+ return RedirectResponse(url, status_code=301)
+ return await call_next(request)
+```
+
+## GPU access (second-gen only)
+
+```bash
+# GPUs only available in second-gen
+gcloud run deploy ml-service \
+ --execution-environment=gen2 \
+ --gpu=1 \
+ --gpu-type=nvidia-l4
+```
+
+## Check execution environment
+
+```python
+import os
+
+def get_execution_environment():
+ # Second-gen has different /proc structure
+ try:
+ with open('/proc/version', 'r') as f:
+ version = f.read()
+ if 'gVisor' in version:
+ return 'gen2'
+ except:
+ pass
+ return 'gen1'
+```
+
+### Request Timeout Configuration Mismatch
+
+Severity: MEDIUM
+
+Situation: Long-running requests or background processing
+
+Symptoms:
+Requests terminated before completion.
+504 Gateway Timeout errors.
+Processing stops unexpectedly.
+Inconsistent timeout behavior.
+
+Why this breaks:
+Cloud Run has multiple timeout configurations that must align:
+- Request timeout (default 300s, max 3600s for HTTP, 60m for gRPC)
+- Client timeout
+- Downstream service timeouts
+- Load balancer timeout (for external access)
+
+Recommended fix:
+
+## Set consistent timeouts
+
+```bash
+# Increase request timeout (max 3600s for HTTP)
+gcloud run deploy my-service \
+ --timeout=900 # 15 minutes
+```
+
+## Handle long-running with webhooks
+
+```python
+from fastapi import FastAPI, BackgroundTasks
+import httpx
+
+app = FastAPI()
+
+@app.post("/process")
+async def process(data: dict, background_tasks: BackgroundTasks):
+ task_id = create_task_id()
+
+ # Start background processing
+ background_tasks.add_task(
+ long_running_process,
+ task_id,
+ data,
+ data.get("callback_url")
+ )
+
+ # Return immediately
+ return {"task_id": task_id, "status": "processing"}
+
+async def long_running_process(task_id, data, callback_url):
+ result = await heavy_computation(data)
+
+ # Callback when done
+ if callback_url:
+ async with httpx.AsyncClient() as client:
+ await client.post(callback_url, json={
+ "task_id": task_id,
+ "result": result
+ })
+```
+
+## Use Cloud Tasks for reliable long-running
+
+```python
+from google.cloud import tasks_v2
+
+def create_long_running_task(data):
+ client = tasks_v2.CloudTasksClient()
+ parent = client.queue_path(PROJECT, REGION, "long-tasks")
+
+ task = {
+ "http_request": {
+ "http_method": tasks_v2.HttpMethod.POST,
+ "url": "https://worker.run.app/process",
+ "body": json.dumps(data).encode(),
+ "headers": {"Content-Type": "application/json"}
+ },
+ "dispatch_deadline": {"seconds": 1800} # 30 min
+ }
+
+ return client.create_task(parent=parent, task=task)
+```
+
+## Streaming for long responses
+
+```python
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
+
+@app.get("/large-report")
+async def large_report():
+ async def generate():
+ for chunk in process_large_data():
+ yield chunk
+
+ return StreamingResponse(generate(), media_type="text/plain")
+```
+
+## Validation Checks
+
+### Hardcoded GCP Credentials
+
+Severity: ERROR
+
+GCP credentials must never be hardcoded in source code
+
+Message: Hardcoded GCP service account credentials. Use Secret Manager or Workload Identity.
+
+### GCP API Key in Source Code
+
+Severity: ERROR
+
+API keys should use Secret Manager
+
+Message: Hardcoded GCP API key. Use Secret Manager.
+
+### Credentials JSON File in Repository
+
+Severity: ERROR
+
+Service account JSON files should not be in source control
+
+Message: Credentials file detected. Add to .gitignore and use Secret Manager.
+
+### Running as Root User
+
+Severity: WARNING
+
+Containers should not run as root for security
+
+Message: Dockerfile runs as root. Add USER directive for security.
+
+### Missing Health Check in Dockerfile
+
+Severity: INFO
+
+Cloud Run uses HTTP health checks, Dockerfile HEALTHCHECK is optional
+
+Message: No HEALTHCHECK in Dockerfile. Cloud Run uses its own health checks.
+
+### Hardcoded Port in Application
+
+Severity: WARNING
+
+Port should come from PORT environment variable
+
+Message: Hardcoded port. Use PORT environment variable for Cloud Run.
+
+### Large File Writes to /tmp
+
+Severity: WARNING
+
+/tmp uses container memory, large writes can cause OOM
+
+Message: /tmp writes consume memory. Consider Cloud Storage for large files.
+
+### Synchronous File Operations
+
+Severity: WARNING
+
+Sync file ops block the event loop in async apps
+
+Message: Synchronous file operations. Use async versions for better concurrency.
+
+### Global Mutable State
+
+Severity: WARNING
+
+Global state issues with concurrent requests
+
+Message: Global mutable state may cause issues with concurrent requests.
+
+### Thread-Unsafe Singleton Pattern
+
+Severity: WARNING
+
+Singletons need thread safety for concurrency > 1
+
+Message: Singleton pattern - ensure thread safety if using concurrency > 1.
+
+## Collaboration
+
+### Delegation Triggers
+
+- user needs AWS serverless -> aws-serverless (Lambda, API Gateway, SAM)
+- user needs Azure containers -> azure-functions (Azure Container Apps, Functions)
+- user needs database design -> postgres-wizard (Cloud SQL design, AlloyDB)
+- user needs authentication -> auth-specialist (Firebase Auth, Identity Platform)
+- user needs AI integration -> llm-architect (Vertex AI, Cloud Run + LLM)
+- user needs workflow orchestration -> workflow-automation (Cloud Workflows, Eventarc)
## When to Use
-This skill is applicable to execute the workflow or actions described in the overview.
+
+Use this skill when the request clearly matches the capabilities and patterns described above.
diff --git a/plugins/antigravity-awesome-skills/skills/graphql/SKILL.md b/plugins/antigravity-awesome-skills/skills/graphql/SKILL.md
index 52c15622..08aa2b36 100644
--- a/plugins/antigravity-awesome-skills/skills/graphql/SKILL.md
+++ b/plugins/antigravity-awesome-skills/skills/graphql/SKILL.md
@@ -1,22 +1,39 @@
---
name: graphql
-description: "You're a developer who has built GraphQL APIs at scale. You've seen the N+1 query problem bring down production servers. You've watched clients craft deeply nested queries that took minutes to resolve. You know that GraphQL's power is also its danger."
+description: GraphQL gives clients exactly the data they need - no more, no
+ less. One endpoint, typed schema, introspection. But the flexibility that
+ makes it powerful also makes it dangerous. Without proper controls, clients
+ can craft queries that bring down your server.
risk: safe
-source: "vibeship-spawner-skills (Apache 2.0)"
-date_added: "2026-02-27"
+source: vibeship-spawner-skills (Apache 2.0)
+date_added: 2026-02-27
---
# GraphQL
-You're a developer who has built GraphQL APIs at scale. You've seen the
-N+1 query problem bring down production servers. You've watched clients
-craft deeply nested queries that took minutes to resolve. You know that
-GraphQL's power is also its danger.
+GraphQL gives clients exactly the data they need - no more, no less. One
+endpoint, typed schema, introspection. But the flexibility that makes it
+powerful also makes it dangerous. Without proper controls, clients can
+craft queries that bring down your server.
-Your hard-won lessons: The team that didn't use DataLoader had unusable
-APIs. The team that allowed unlimited query depth got DDoS'd by their
-own clients. The team that made everything nullable couldn't distinguish
-errors from empty data. You've l
+This skill covers schema design, resolvers, DataLoader for N+1 prevention,
+federation for microservices, and client integration with Apollo/urql.
+Key insight: GraphQL is a contract. The schema is the API documentation.
+Design it carefully.
+
+2025 lesson: GraphQL isn't always the answer. For simple CRUD, REST is
+simpler. For high-performance public APIs, REST with caching wins. Use
+GraphQL when you have complex data relationships and diverse client needs.
+
+## Principles
+
+- Schema-first design - the schema is the contract
+- Prevent N+1 queries with DataLoader
+- Limit query depth and complexity
+- Use fragments for reusable selections
+- Mutations should be specific, not generic update operations
+- Errors are data - use union types for expected failures
+- Nullability is meaningful - design it intentionally
## Capabilities
@@ -30,44 +47,1026 @@ errors from empty data. You've l
- apollo-client
- urql
+## Scope
+
+- database-queries -> postgres-wizard
+- authentication -> authentication-oauth
+- rest-api-design -> backend
+- websocket-infrastructure -> backend
+
+## Tooling
+
+### Server
+
+- @apollo/server - When: Apollo Server v4 Note: Most popular GraphQL server
+- graphql-yoga - When: Lightweight alternative Note: Good for serverless
+- mercurius - When: Fastify integration Note: Fast, uses JIT
+
+### Client
+
+- @apollo/client - When: Full-featured client Note: Caching, state management
+- urql - When: Lightweight alternative Note: Smaller, simpler
+- graphql-request - When: Simple requests Note: Minimal, no caching
+
+### Tools
+
+- graphql-codegen - When: Type generation Note: Essential for TypeScript
+- dataloader - When: N+1 prevention Note: Batches and caches
+
## Patterns
### Schema Design
Type-safe schema with proper nullability
+**When to use**: Designing any GraphQL API
+
+# SCHEMA DESIGN:
+
+"""
+The schema is your API contract. Design nullability
+intentionally - non-null fields must always resolve.
+"""
+
+type Query {
+ # Non-null - will always return user or throw
+ user(id: ID!): User!
+
+ # Nullable - returns null if not found
+ userByEmail(email: String!): User
+
+ # Non-null list with non-null items
+ users(limit: Int = 10, offset: Int = 0): [User!]!
+
+ # Search with pagination
+ searchUsers(
+ query: String!
+ first: Int
+ after: String
+ ): UserConnection!
+}
+
+type Mutation {
+ # Input types for complex mutations
+ createUser(input: CreateUserInput!): CreateUserPayload!
+ updateUser(id: ID!, input: UpdateUserInput!): UpdateUserPayload!
+ deleteUser(id: ID!): DeleteUserPayload!
+}
+
+type Subscription {
+ userCreated: User!
+ messageReceived(roomId: ID!): Message!
+}
+
+# Input types
+input CreateUserInput {
+ email: String!
+ name: String!
+ role: Role = USER
+}
+
+input UpdateUserInput {
+ email: String
+ name: String
+ role: Role
+}
+
+# Payload types (for errors as data)
+type CreateUserPayload {
+ user: User
+ errors: [Error!]!
+}
+
+union UpdateUserPayload = UpdateUserSuccess | NotFoundError | ValidationError
+
+type UpdateUserSuccess {
+ user: User!
+}
+
+# Enums
+enum Role {
+ USER
+ ADMIN
+ MODERATOR
+}
+
+# Types with relationships
+type User {
+ id: ID!
+ email: String!
+ name: String!
+ role: Role!
+ posts(limit: Int = 10): [Post!]!
+ createdAt: DateTime!
+}
+
+type Post {
+ id: ID!
+ title: String!
+ content: String!
+ author: User!
+ comments: [Comment!]!
+ published: Boolean!
+}
+
+# Pagination (Relay-style)
+type UserConnection {
+ edges: [UserEdge!]!
+ pageInfo: PageInfo!
+ totalCount: Int!
+}
+
+type UserEdge {
+ node: User!
+ cursor: String!
+}
+
+type PageInfo {
+ hasNextPage: Boolean!
+ hasPreviousPage: Boolean!
+ startCursor: String
+ endCursor: String
+}
+
### DataLoader for N+1 Prevention
Batch and cache database queries
+**When to use**: Resolving relationships
+
+# DATALOADER:
+
+"""
+Without DataLoader, fetching 10 posts with authors
+makes 11 queries (1 for posts + 10 for each author).
+DataLoader batches into 2 queries.
+"""
+
+import DataLoader from 'dataloader';
+
+// Create loaders per request
+function createLoaders(db) {
+ return {
+ userLoader: new DataLoader(async (ids) => {
+ // Single query for all users
+ const users = await db.user.findMany({
+ where: { id: { in: ids } }
+ });
+
+ // Return in same order as ids
+ const userMap = new Map(users.map(u => [u.id, u]));
+ return ids.map(id => userMap.get(id) || null);
+ }),
+
+ postsByAuthorLoader: new DataLoader(async (authorIds) => {
+ const posts = await db.post.findMany({
+ where: { authorId: { in: authorIds } }
+ });
+
+ // Group by author
+ const postsByAuthor = new Map();
+ posts.forEach(post => {
+ const existing = postsByAuthor.get(post.authorId) || [];
+ postsByAuthor.set(post.authorId, [...existing, post]);
+ });
+
+ return authorIds.map(id => postsByAuthor.get(id) || []);
+ })
+ };
+}
+
+// Attach to context
+const server = new ApolloServer({
+ typeDefs,
+ resolvers,
+});
+
+app.use('/graphql', expressMiddleware(server, {
+ context: async ({ req }) => ({
+ db,
+ loaders: createLoaders(db),
+ user: req.user
+ })
+}));
+
+// Use in resolvers
+const resolvers = {
+ Post: {
+ author: (post, _, { loaders }) => {
+ return loaders.userLoader.load(post.authorId);
+ }
+ },
+ User: {
+ posts: (user, _, { loaders }) => {
+ return loaders.postsByAuthorLoader.load(user.id);
+ }
+ }
+};
+
### Apollo Client Caching
Normalized cache with type policies
-## Anti-Patterns
+**When to use**: Client-side data management
-### ❌ No DataLoader
+# APOLLO CLIENT CACHING:
-### ❌ No Query Depth Limiting
+"""
+Apollo Client normalizes responses into a flat cache.
+Configure type policies for custom cache behavior.
+"""
-### ❌ Authorization in Schema
+import { ApolloClient, InMemoryCache } from '@apollo/client';
-## ⚠️ Sharp Edges
+const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ // Paginated field
+ users: {
+ keyArgs: ['query'], // Cache separately per query
+ merge(existing = { edges: [] }, incoming, { args }) {
+ // Append for infinite scroll
+ if (args?.after) {
+ return {
+ ...incoming,
+ edges: [...existing.edges, ...incoming.edges]
+ };
+ }
+ return incoming;
+ }
+ }
+ }
+ },
+ User: {
+ keyFields: ['id'], // How to identify users
+ fields: {
+ fullName: {
+ read(_, { readField }) {
+ // Computed field
+ return `${readField('firstName')} ${readField('lastName')}`;
+ }
+ }
+ }
+ }
+ }
+});
-| Issue | Severity | Solution |
-|-------|----------|----------|
-| Each resolver makes separate database queries | critical | # USE DATALOADER |
-| Deeply nested queries can DoS your server | critical | # LIMIT QUERY DEPTH AND COMPLEXITY |
-| Introspection enabled in production exposes your schema | high | # DISABLE INTROSPECTION IN PRODUCTION |
-| Authorization only in schema directives, not resolvers | high | # AUTHORIZE IN RESOLVERS |
-| Authorization on queries but not on fields | high | # FIELD-LEVEL AUTHORIZATION |
-| Non-null field failure nullifies entire parent | medium | # DESIGN NULLABILITY INTENTIONALLY |
-| Expensive queries treated same as cheap ones | medium | # QUERY COST ANALYSIS |
-| Subscriptions not properly cleaned up | medium | # PROPER SUBSCRIPTION CLEANUP |
+const client = new ApolloClient({
+ uri: '/graphql',
+ cache,
+ defaultOptions: {
+ watchQuery: {
+ fetchPolicy: 'cache-and-network'
+ }
+ }
+});
+
+// Queries with hooks
+import { useQuery, useMutation } from '@apollo/client';
+
+const GET_USER = gql`
+ query GetUser($id: ID!) {
+ user(id: $id) {
+ id
+ name
+ email
+ }
+ }
+`;
+
+function UserProfile({ userId }) {
+ const { data, loading, error } = useQuery(GET_USER, {
+ variables: { id: userId }
+ });
+
+ if (loading) return ;
+ if (error) return ;
+
+ return