diff --git a/docs/integrations/jetski-cortex.md b/docs/integrations/jetski-cortex.md new file mode 100644 index 00000000..270d542c --- /dev/null +++ b/docs/integrations/jetski-cortex.md @@ -0,0 +1,238 @@ +--- +title: Jetski/Cortex + Gemini Integration Guide +description: "Come usare antigravity-awesome-skills con Jetski/Cortex evitando l’overflow di contesto con 1.200+ skill." +--- + +# Jetski/Cortex + Gemini: integrazione sicura con 1.200+ skill + +Questa guida mostra come integrare il repository `antigravity-awesome-skills` con un agente basato su **Jetski/Cortex + Gemini** (o framework simili) **senza superare il context window** del modello. + +L’errore tipico visto in Jetski/Cortex è: + +> `TrajectoryChatConverter: could not convert a single message before hitting truncation` + +Il problema non è nelle skill, ma **nel modo in cui vengono caricate**. + +--- + +## 1. Antipattern da evitare + +Non bisogna mai: + +- leggere **tutte** le directory `skills/*/SKILL.md` all’avvio; +- concatenare il contenuto di tutte le `SKILL.md` in un singolo system prompt; +- reiniettare l’intera libreria per **ogni** richiesta. + +Con oltre 1.200 skill, questo approccio riempie il context window prima ancora di aggiungere i messaggi dell’utente, causando l’errore di truncation. + +--- + +## 2. Pattern raccomandato + +Principi chiave: + +- **Manifest leggero**: usare `data/skills_index.json` per sapere *quali* skill esistono, senza caricare i testi completi. +- **Lazy loading**: leggere `SKILL.md` **solo** per le skill effettivamente invocate in una conversazione (es. quando compare `@skill-id`). +- **Limiti espliciti**: imporre un massimo di skill/tokens caricati per turno, con fallback chiari. + +Il flusso consigliato è: + +1. **Bootstrap**: all’avvio dell’agente leggere `data/skills_index.json` e costruire una mappa `id -> meta`. +2. **Parsing dei messaggi**: prima di chiamare il modello, estrarre tutti i riferimenti `@skill-id` dai messaggi utente/sistema. +3. **Risoluzione**: mappare gli id trovati in oggetti `SkillMeta` usando la mappa di bootstrap. +4. **Lazy load**: leggere i file `SKILL.md` solo per questi id (fino a un massimo configurabile). +5. **Prompt building**: costruire i system messages del modello includendo solo le definizioni delle skill selezionate. + +--- + +## 3. Struttura di `skills_index.json` + +Il file `data/skills_index.json` è un array di oggetti, ad esempio: + +```json +{ + "id": "brainstorming", + "path": "skills/brainstorming", + "category": "planning", + "name": "brainstorming", + "description": "Use before any creative or constructive work.", + "risk": "safe", + "source": "official", + "date_added": "2026-02-27" +} +``` + +Campi chiave: + +- **`id`**: identificatore usato nelle menzioni `@id` (es. `@brainstorming`). +- **`path`**: directory che contiene la `SKILL.md` (es. `skills/brainstorming/`). + +Per ottenere il percorso alla definizione della skill: + +- `fullPath = path.join(SKILLS_ROOT, meta.path, "SKILL.md")`. + +> Nota: `SKILLS_ROOT` è la directory radice dove avete installato il repository (es. `~/.agent/skills`). + +--- + +## 4. Pseudo‑codice di integrazione (TypeScript) + +> Esempio completo in: [`examples/jetski-gemini-loader/`](../../examples/jetski-gemini-loader/). + +### 4.1. Tipi di base + +```ts +type SkillMeta = { + id: string; + path: string; + name: string; + description?: string; + category?: string; + risk?: string; +}; +``` + +### 4.2. Bootstrap: caricare il manifest + +```ts +function loadSkillIndex(indexPath: string): Map { + const raw = fs.readFileSync(indexPath, "utf8"); + const arr = JSON.parse(raw) as SkillMeta[]; + const map = new Map(); + for (const meta of arr) { + map.set(meta.id, meta); + } + return map; +} +``` + +### 4.3. Parsing dei messaggi per trovare `@skill-id` + +```ts +const SKILL_ID_REGEX = /@([a-zA-Z0-9-_./]+)/g; + +function resolveSkillsFromMessages( + messages: { role: string; content: string }[], + index: Map, + maxSkills: number +): SkillMeta[] { + const found = new Set(); + + for (const msg of messages) { + let match: RegExpExecArray | null; + while ((match = SKILL_ID_REGEX.exec(msg.content)) !== null) { + const id = match[1]; + if (index.has(id)) { + found.add(id); + } + } + } + + const metas: SkillMeta[] = []; + for (const id of found) { + const meta = index.get(id); + if (meta) metas.push(meta); + if (metas.length >= maxSkills) break; + } + + return metas; +} +``` + +### 4.4. Lazy loading dei file `SKILL.md` + +```ts +async function loadSkillBodies( + skillsRoot: string, + metas: SkillMeta[] +): Promise { + const bodies: string[] = []; + + for (const meta of metas) { + const fullPath = path.join(skillsRoot, meta.path, "SKILL.md"); + const text = await fs.promises.readFile(fullPath, "utf8"); + bodies.push(text); + } + + return bodies; +} +``` + +### 4.5. Costruzione del prompt Jetski/Cortex + +Pseudocodice per la fase di pre‑processing, prima del `TrajectoryChatConverter`: + +```ts +async function buildModelMessages( + baseSystemMessages: { role: "system"; content: string }[], + trajectory: { role: "user" | "assistant" | "system"; content: string }[], + skillIndex: Map, + skillsRoot: string, + maxSkillsPerTurn: number +): Promise<{ role: string; content: string }[]> { + const selectedMetas = resolveSkillsFromMessages( + trajectory, + skillIndex, + maxSkillsPerTurn + ); + + const skillBodies = await loadSkillBodies(skillsRoot, selectedMetas); + + const skillMessages = skillBodies.map((body) => ({ + role: "system" as const, + content: body, + })); + + return [...baseSystemMessages, ...skillMessages, ...trajectory]; +} +``` + +> Suggerimento: aggiungete una stima dei token per troncare o riassumere i `SKILL.md` se il context window si avvicina al limite. + +--- + +## 5. Gestione degli overflow di contesto + +Per evitare errori difficili da capire per l’utente, impostate: + +- una **soglia di sicurezza** (es. 70–80% del context window); +- un **limite massimo di skill per turno** (es. 5–10). + +Strategie quando si supera la soglia: + +- ridurre il numero di skill incluse (es. in base a recenza o priorità); oppure +- restituire un errore chiaro all’utente, ad esempio: + +> "Sono state richieste troppe skill in un singolo turno. Riduci il numero di `@skill-id` nel messaggio o dividili in più passaggi." + +--- + +## 6. Scenari di test raccomandati + +- **Scenario 1 – Messaggio semplice ("hi")** + - Nessun `@skill-id` → nessuna `SKILL.md` caricata → il prompt rimane piccolo → nessun errore. +- **Scenario 2 – Poche skill** + - Messaggio con 1–2 `@skill-id` → solo le relative `SKILL.md` vengono caricate → nessun overflow. +- **Scenario 3 – Molte skill** + - Messaggio con molte `@skill-id` → si attiva il limite `maxSkillsPerTurn` o il controllo di token → nessun overflow silenzioso. + +--- + +## 7. Sottoinsiemi di skill e bundle + +Per ulteriore controllo: + +- spostate le skill non necessarie in `skills/.disabled/` per escluderle in certi ambienti; +- usate i **bundle** descritti in [`docs/users/bundles.md`](../users/bundles.md) per caricare solo gruppi tematici. + +--- + +## 8. Riepilogo + +- Non concatenate mai tutte le `SKILL.md` in un singolo prompt. +- Usate `data/skills_index.json` come manifest leggero. +- Caricate le skill **on‑demand** in base a `@skill-id`. +- Impostate limiti chiari (max skill per turno, soglia di token). + +Seguendo questo pattern, Jetski/Cortex + Gemini può usare l’intera libreria di `antigravity-awesome-skills` in modo sicuro, scalabile e compatibile con il context window dei modelli moderni. + diff --git a/docs/users/faq.md b/docs/users/faq.md index 7b8bc8f8..8ebeb4e9 100644 --- a/docs/users/faq.md +++ b/docs/users/faq.md @@ -47,6 +47,20 @@ Start from: - ✅ Free for commercial use - ✅ You can modify them +### How do these skills avoid overflowing the model context? + +Some host tools (for example custom agents built on Jetski/Cortex + Gemini) might be tempted to **concatenate every `SKILL.md` file into a single system prompt**. +This is **not** how this repository is designed to be used, and it will almost certainly overflow the model’s context window with 1,200+ skills. + +Instead, hosts should: + +- use `data/skills_index.json` as a **lightweight manifest** for discovery; and +- load individual `SKILL.md` files **only when a skill is invoked** (e.g. via `@skill-id` in the conversation). + +For a concrete example (including pseudo‑code) see: + +- [`docs/integrations/jetski-cortex.md`](../integrations/jetski-cortex.md) + ### Do skills work offline? The skill files themselves are stored locally on your computer, but your AI assistant needs an internet connection to function. diff --git a/docs/users/usage.md b/docs/users/usage.md index 4089e72f..d5cb6a5b 100644 --- a/docs/users/usage.md +++ b/docs/users/usage.md @@ -314,6 +314,19 @@ Usually no, but if your AI doesn't recognize a skill: 2. Check the installation path matches your tool 3. Try the explicit path: `npx antigravity-awesome-skills --claude` (or `--cursor`, `--gemini`, etc.) +### "Can I load all skills into the model at once?" + +No. Even though you have 1,200+ skills installed locally, you should **not** concatenate every `SKILL.md` into a single system prompt or context block. + +The intended pattern is: + +- use `data/skills_index.json` (the manifest) to discover which skills exist; and +- only load the `SKILL.md` files for the specific `@skill-id` values you actually use in a conversation. + +If you are building your own host/agent (e.g. Jetski/Cortex + Gemini), see: + +- [`docs/integrations/jetski-cortex.md`](../integrations/jetski-cortex.md) + ### "Can I create my own skills?" Yes! Use the `@skill-creator` skill: diff --git a/examples/jetski-gemini-loader/README.md b/examples/jetski-gemini-loader/README.md new file mode 100644 index 00000000..9ffdd137 --- /dev/null +++ b/examples/jetski-gemini-loader/README.md @@ -0,0 +1,91 @@ +# Jetski + Gemini Lazy Skill Loader (Example) + +This example shows one way to integrate **antigravity-awesome-skills** with a Jetski/Cortex‑style agent using **lazy loading** based on `@skill-id` mentions, instead of concatenating every `SKILL.md` into the prompt. + +> This is **not** a production‑ready library – it is a minimal reference you can adapt to your own host/agent implementation. + +--- + +## What this example demonstrates + +- How to: + - load the global manifest `data/skills_index.json` once at startup; + - scan conversation messages for `@skill-id` patterns; + - resolve those ids to entries in the manifest; + - read only the corresponding `SKILL.md` files from disk (lazy loading); + - build a prompt array with: + - your base system messages; + - one system message per selected skill; + - the rest of the trajectory. +- How to enforce a **maximum number of skills per turn** via `maxSkillsPerTurn`. + +This pattern avoids context overflow when you have 1,200+ skills installed. + +--- + +## Files + +- `loader.ts` + - Implements: + - `loadSkillIndex(indexPath)`; + - `resolveSkillsFromMessages(messages, index, maxSkills)`; + - `loadSkillBodies(skillsRoot, metas)`; + - `buildModelMessages({...})`. +- See also the integration guide: + - [`docs/integrations/jetski-cortex.md`](../../docs/integrations/jetski-cortex.md) + +--- + +## Basic usage (pseudo‑code) + +```ts +import path from "path"; +import { + loadSkillIndex, + buildModelMessages, + Message, +} from "./loader"; + +const REPO_ROOT = "/path/to/antigravity-awesome-skills"; +const SKILLS_ROOT = REPO_ROOT; +const INDEX_PATH = path.join(REPO_ROOT, "data", "skills_index.json"); + +// 1. Bootstrap once at agent startup +const skillIndex = loadSkillIndex(INDEX_PATH); + +// 2. Before calling the model, build messages with lazy‑loaded skills +async function runTurn(trajectory: Message[]) { + const baseSystemMessages: Message[] = [ + { + role: "system", + content: "You are a helpful coding agent.", + }, + ]; + + const modelMessages = await buildModelMessages({ + baseSystemMessages, + trajectory, + skillIndex, + skillsRoot: SKILLS_ROOT, + maxSkillsPerTurn: 8, + }); + + // 3. Pass `modelMessages` to your Jetski/Cortex + Gemini client + // e.g. trajectoryChatConverter.convert(modelMessages) +} +``` + +Adapt the paths and model call to your environment. + +--- + +## Important notes + +- **Do not** iterate through `skills/*/SKILL.md` and load everything at once. +- This example: + - assumes skills live under the same repo root as `data/skills_index.json`; + - uses Node.js `fs`/`path` APIs and TypeScript types for clarity. +- In a real host: + - wire `buildModelMessages` into the point where you currently assemble the prompt before `TrajectoryChatConverter`; + - add token‑counting / truncation logic if you want a stricter safety budget. + diff --git a/examples/jetski-gemini-loader/loader.ts b/examples/jetski-gemini-loader/loader.ts new file mode 100644 index 00000000..1ed8970f --- /dev/null +++ b/examples/jetski-gemini-loader/loader.ts @@ -0,0 +1,112 @@ +import fs from "fs"; +import path from "path"; + +export type SkillMeta = { + id: string; + path: string; + name: string; + description?: string; + category?: string; + risk?: string; +}; + +export type Message = { + role: "system" | "user" | "assistant"; + content: string; +}; + +const SKILL_ID_REGEX = /@([a-zA-Z0-9-_./]+)/g; + +export function loadSkillIndex(indexPath: string): Map { + const raw = fs.readFileSync(indexPath, "utf8"); + const arr = JSON.parse(raw) as SkillMeta[]; + const map = new Map(); + + for (const meta of arr) { + map.set(meta.id, meta); + } + + return map; +} + +export function resolveSkillsFromMessages( + messages: Message[], + index: Map, + maxSkills: number +): SkillMeta[] { + const found = new Set(); + + for (const msg of messages) { + let match: RegExpExecArray | null; + while ((match = SKILL_ID_REGEX.exec(msg.content)) !== null) { + const id = match[1]; + if (index.has(id)) { + found.add(id); + } + } + } + + const metas: SkillMeta[] = []; + for (const id of found) { + const meta = index.get(id); + if (meta) { + metas.push(meta); + } + if (metas.length >= maxSkills) { + break; + } + } + + return metas; +} + +export async function loadSkillBodies( + skillsRoot: string, + metas: SkillMeta[] +): Promise { + const bodies: string[] = []; + + for (const meta of metas) { + const fullPath = path.join(skillsRoot, meta.path, "SKILL.md"); + const text = await fs.promises.readFile(fullPath, "utf8"); + bodies.push(text); + } + + return bodies; +} + +export async function buildModelMessages(options: { + baseSystemMessages: Message[]; + trajectory: Message[]; + skillIndex: Map; + skillsRoot: string; + maxSkillsPerTurn?: number; +}): Promise { + const { + baseSystemMessages, + trajectory, + skillIndex, + skillsRoot, + maxSkillsPerTurn = 8, + } = options; + + const selectedMetas = resolveSkillsFromMessages( + trajectory, + skillIndex, + maxSkillsPerTurn + ); + + if (selectedMetas.length === 0) { + return [...baseSystemMessages, ...trajectory]; + } + + const skillBodies = await loadSkillBodies(skillsRoot, selectedMetas); + + const skillMessages: Message[] = skillBodies.map((body) => ({ + role: "system", + content: body, + })); + + return [...baseSystemMessages, ...skillMessages, ...trajectory]; +} +