refactor: reorganize repo docs and tooling layout

Consolidate the repository into clearer apps, tools, and layered docs areas so contributors can navigate and maintain it more reliably. Align validation, metadata sync, and CI around the same canonical workflow to reduce drift across local checks and GitHub Actions.
This commit is contained in:
sck_0
2026-03-06 15:01:38 +01:00
parent 5d17564608
commit 45844de534
3384 changed files with 13894 additions and 586586 deletions

View File

@@ -1,259 +0,0 @@
#!/usr/bin/env python3
"""
Auto-categorize skills based on their names and descriptions.
Removes "uncategorized" by intelligently assigning categories.
Usage:
python auto_categorize_skills.py
python auto_categorize_skills.py --dry-run (shows what would change)
"""
import os
import re
import json
import sys
import argparse
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# Category keywords mapping
CATEGORY_KEYWORDS = {
'web-development': [
'react', 'vue', 'angular', 'svelte', 'nextjs', 'gatsby', 'remix',
'html', 'css', 'javascript', 'typescript', 'frontend', 'web', 'tailwind',
'bootstrap', 'sass', 'less', 'webpack', 'vite', 'rollup', 'parcel',
'rest api', 'graphql', 'http', 'fetch', 'axios', 'cors',
'responsive', 'seo', 'accessibility', 'a11y', 'pwa', 'progressive',
'dom', 'jsx', 'tsx', 'component', 'router', 'routing'
],
'backend': [
'nodejs', 'node.js', 'express', 'fastapi', 'django', 'flask',
'spring', 'java', 'python', 'golang', 'rust', 'c#', 'csharp',
'dotnet', '.net', 'laravel', 'php', 'ruby', 'rails',
'server', 'backend', 'api', 'rest', 'graphql', 'database',
'sql', 'mongodb', 'postgres', 'mysql', 'redis', 'cache',
'authentication', 'auth', 'jwt', 'oauth', 'session',
'middleware', 'routing', 'controller', 'model'
],
'database': [
'database', 'sql', 'postgres', 'postgresql', 'mysql', 'mariadb',
'mongodb', 'nosql', 'firestore', 'dynamodb', 'cassandra',
'elasticsearch', 'redis', 'memcached', 'graphql', 'prisma',
'orm', 'query', 'migration', 'schema', 'index'
],
'ai-ml': [
'ai', 'artificial intelligence', 'machine learning', 'ml',
'deep learning', 'neural', 'tensorflow', 'pytorch', 'scikit',
'nlp', 'computer vision', 'cv', 'llm', 'gpt', 'bert',
'classification', 'regression', 'clustering', 'transformer',
'embedding', 'vector', 'embedding', 'training', 'model'
],
'devops': [
'devops', 'docker', 'kubernetes', 'k8s', 'ci/cd', 'git',
'github', 'gitlab', 'jenkins', 'gitlab-ci', 'github actions',
'aws', 'azure', 'gcp', 'terraform', 'ansible', 'vagrant',
'deploy', 'deployment', 'container', 'orchestration',
'monitoring', 'logging', 'prometheus', 'grafana'
],
'cloud': [
'aws', 'amazon', 'azure', 'gcp', 'google cloud', 'cloud',
'ec2', 's3', 'lambda', 'cloudformation', 'terraform',
'serverless', 'functions', 'storage', 'cdn', 'distributed'
],
'security': [
'security', 'encryption', 'cryptography', 'ssl', 'tls',
'hashing', 'bcrypt', 'jwt', 'oauth', 'authentication',
'authorization', 'firewall', 'penetration', 'audit',
'vulnerability', 'privacy', 'gdpr', 'compliance'
],
'testing': [
'test', 'testing', 'jest', 'mocha', 'jasmine', 'pytest',
'unittest', 'cypress', 'selenium', 'puppeteer', 'e2e',
'unit test', 'integration', 'coverage', 'ci/cd'
],
'mobile': [
'mobile', 'android', 'ios', 'react native', 'flutter',
'swift', 'kotlin', 'objective-c', 'app', 'native',
'cross-platform', 'expo', 'cordova', 'xamarin'
],
'game-development': [
'game', 'unity', 'unreal', 'godot', 'canvas', 'webgl',
'threejs', 'babylon', 'phaser', 'sprite', 'physics',
'collision', '2d', '3d', 'shader', 'rendering'
],
'data-science': [
'data', 'analytics', 'science', 'pandas', 'numpy', 'scipy',
'jupyter', 'notebook', 'visualization', 'matplotlib', 'plotly',
'statistics', 'correlation', 'regression', 'clustering'
],
'automation': [
'automation', 'scripting', 'selenium', 'puppeteer', 'robot',
'workflow', 'automation', 'scheduled', 'trigger', 'integration'
],
'content': [
'markdown', 'documentation', 'content', 'blog', 'writing',
'seo', 'meta', 'schema', 'og', 'twitter', 'description'
]
}
def categorize_skill(skill_name, description):
"""
Intelligently categorize a skill based on name and description.
Returns the best matching category or None if no match.
"""
combined_text = f"{skill_name} {description}".lower()
# Score each category based on keyword matches
scores = {}
for category, keywords in CATEGORY_KEYWORDS.items():
score = 0
for keyword in keywords:
# Prefer exact phrase matches with word boundaries
if re.search(r'\b' + re.escape(keyword) + r'\b', combined_text):
score += 2
elif keyword in combined_text:
score += 1
if score > 0:
scores[category] = score
# Return the category with highest score
if scores:
best_category = max(scores, key=scores.get)
return best_category
return None
import yaml
def auto_categorize(skills_dir, dry_run=False):
"""Auto-categorize skills and update SKILL.md files"""
skills = []
categorized_count = 0
already_categorized = 0
failed_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
skill_id = os.path.basename(root)
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
# Extract frontmatter and body
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
continue
fm_text = fm_match.group(1)
body = content[fm_match.end():]
try:
metadata = yaml.safe_load(fm_text) or {}
except yaml.YAMLError as e:
print(f"⚠️ {skill_id}: YAML error - {e}")
continue
skill_name = metadata.get('name', skill_id)
description = metadata.get('description', '')
current_category = metadata.get('category', 'uncategorized')
# Skip if already has a meaningful category
if current_category and current_category != 'uncategorized':
already_categorized += 1
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'action': 'SKIP'
})
continue
# Try to auto-categorize
new_category = categorize_skill(skill_name, description)
if new_category:
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'new': new_category,
'action': 'UPDATE'
})
if not dry_run:
metadata['category'] = new_category
new_fm = yaml.dump(metadata, sort_keys=False, allow_unicode=True, width=1000).strip()
new_content = f"---\n{new_fm}\n---" + body
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
categorized_count += 1
else:
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'action': 'FAILED'
})
failed_count += 1
except Exception as e:
print(f"❌ Error processing {skill_id}: {str(e)}")
# Print report
print("\n" + "="*70)
print("AUTO-CATEGORIZATION REPORT")
print("="*70)
print(f"\n📊 Summary:")
print(f" ✅ Categorized: {categorized_count}")
print(f" ⏭️ Already categorized: {already_categorized}")
print(f" ❌ Failed to categorize: {failed_count}")
print(f" 📈 Total processed: {len(skills)}")
if categorized_count > 0:
print(f"\n📋 Sample changes:")
for skill in skills[:10]:
if skill['action'] == 'UPDATE':
print(f"{skill['id']}")
print(f" {skill['current']}{skill['new']}")
if dry_run:
print(f"\n🔍 DRY RUN MODE - No changes made")
else:
print(f"\n💾 Changes saved to SKILL.md files")
return categorized_count
def main():
parser = argparse.ArgumentParser(
description="Auto-categorize skills based on content",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python auto_categorize_skills.py --dry-run
python auto_categorize_skills.py
"""
)
parser.add_argument('--dry-run', action='store_true',
help='Show what would be changed without making changes')
args = parser.parse_args()
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
auto_categorize(skills_path, dry_run=args.dry_run)
if __name__ == "__main__":
main()

View File

@@ -1,679 +0,0 @@
const fs = require("fs");
const path = require("path");
const {
listSkillIdsRecursive,
readSkill,
tokenize,
unique,
} = require("../lib/skill-utils");
const ROOT = path.resolve(__dirname, "..");
const SKILLS_DIR = path.join(ROOT, "skills");
const STOPWORDS = new Set([
"a",
"an",
"and",
"are",
"as",
"at",
"be",
"but",
"by",
"for",
"from",
"has",
"have",
"in",
"into",
"is",
"it",
"its",
"of",
"on",
"or",
"our",
"out",
"over",
"that",
"the",
"their",
"they",
"this",
"to",
"use",
"when",
"with",
"you",
"your",
"will",
"can",
"if",
"not",
"only",
"also",
"more",
"best",
"practice",
"practices",
"expert",
"specialist",
"focused",
"focus",
"master",
"modern",
"advanced",
"comprehensive",
"production",
"production-ready",
"ready",
"build",
"create",
"deliver",
"design",
"implement",
"implementation",
"strategy",
"strategies",
"patterns",
"pattern",
"workflow",
"workflows",
"guide",
"template",
"templates",
"tool",
"tools",
"project",
"projects",
"support",
"manage",
"management",
"system",
"systems",
"services",
"service",
"across",
"end",
"end-to-end",
"using",
"based",
"ensure",
"ensure",
"help",
"needs",
"need",
"focuses",
"handles",
"builds",
"make",
]);
const TAG_STOPWORDS = new Set([
"pro",
"expert",
"patterns",
"pattern",
"workflow",
"workflows",
"templates",
"template",
"toolkit",
"tools",
"tool",
"project",
"projects",
"guide",
"management",
"engineer",
"architect",
"developer",
"specialist",
"assistant",
"analysis",
"review",
"reviewer",
"automation",
"orchestration",
"scaffold",
"scaffolding",
"implementation",
"strategy",
"context",
"management",
"feature",
"features",
"smart",
"system",
"systems",
"design",
"development",
"development",
"test",
"testing",
"workflow",
]);
const CATEGORY_RULES = [
{
name: "security",
keywords: [
"security",
"sast",
"compliance",
"privacy",
"threat",
"vulnerability",
"owasp",
"pci",
"gdpr",
"secrets",
"risk",
"malware",
"forensics",
"attack",
"incident",
"auth",
"mtls",
"zero",
"trust",
],
},
{
name: "infrastructure",
keywords: [
"kubernetes",
"k8s",
"helm",
"terraform",
"cloud",
"network",
"devops",
"gitops",
"prometheus",
"grafana",
"observability",
"monitoring",
"logging",
"tracing",
"deployment",
"istio",
"linkerd",
"service",
"mesh",
"slo",
"sre",
"oncall",
"incident",
"pipeline",
"cicd",
"ci",
"cd",
"kafka",
],
},
{
name: "data-ai",
keywords: [
"data",
"database",
"db",
"sql",
"postgres",
"mysql",
"analytics",
"etl",
"warehouse",
"dbt",
"ml",
"ai",
"llm",
"rag",
"vector",
"embedding",
"spark",
"airflow",
"cdc",
"pipeline",
],
},
{
name: "development",
keywords: [
"python",
"javascript",
"typescript",
"java",
"golang",
"go",
"rust",
"csharp",
"dotnet",
"php",
"ruby",
"node",
"react",
"frontend",
"backend",
"mobile",
"ios",
"android",
"flutter",
"fastapi",
"django",
"nextjs",
"vue",
"api",
],
},
{
name: "architecture",
keywords: [
"architecture",
"c4",
"microservices",
"event",
"cqrs",
"saga",
"domain",
"ddd",
"patterns",
"decision",
"adr",
],
},
{
name: "testing",
keywords: ["testing", "tdd", "unit", "e2e", "qa", "test"],
},
{
name: "business",
keywords: [
"business",
"market",
"sales",
"finance",
"startup",
"legal",
"hr",
"product",
"customer",
"seo",
"marketing",
"kpi",
"contract",
"employment",
],
},
{
name: "workflow",
keywords: [
"workflow",
"orchestration",
"conductor",
"automation",
"process",
"collaboration",
],
},
];
const BUNDLE_RULES = {
"core-dev": {
description:
"Core development skills across languages, frameworks, and backend/frontend fundamentals.",
keywords: [
"python",
"javascript",
"typescript",
"go",
"golang",
"rust",
"java",
"node",
"frontend",
"backend",
"react",
"fastapi",
"django",
"nextjs",
"api",
"mobile",
"ios",
"android",
"flutter",
"php",
"ruby",
],
},
"security-core": {
description: "Security, privacy, and compliance essentials.",
keywords: [
"security",
"sast",
"compliance",
"threat",
"risk",
"privacy",
"secrets",
"owasp",
"gdpr",
"pci",
"vulnerability",
"auth",
],
},
"k8s-core": {
description: "Kubernetes and service mesh essentials.",
keywords: [
"kubernetes",
"k8s",
"helm",
"istio",
"linkerd",
"service",
"mesh",
],
},
"data-core": {
description: "Data engineering and analytics foundations.",
keywords: [
"data",
"database",
"sql",
"dbt",
"airflow",
"spark",
"analytics",
"etl",
"warehouse",
"postgres",
"mysql",
"kafka",
],
},
"ops-core": {
description: "Operations, observability, and delivery pipelines.",
keywords: [
"observability",
"monitoring",
"logging",
"tracing",
"prometheus",
"grafana",
"devops",
"gitops",
"deployment",
"cicd",
"pipeline",
"slo",
"sre",
"incident",
],
},
};
const CURATED_COMMON = [
"bash-pro",
"python-pro",
"javascript-pro",
"typescript-pro",
"golang-pro",
"rust-pro",
"java-pro",
"frontend-developer",
"backend-architect",
"nodejs-backend-patterns",
"fastapi-pro",
"api-design-principles",
"sql-pro",
"database-architect",
"kubernetes-architect",
"terraform-specialist",
"observability-engineer",
"security-auditor",
"sast-configuration",
"gitops-workflow",
];
function normalizeTokens(tokens) {
return unique(tokens.map((token) => token.toLowerCase())).filter(Boolean);
}
function deriveTags(skill) {
let tags = Array.isArray(skill.tags) ? skill.tags : [];
tags = tags.map((tag) => tag.toLowerCase()).filter(Boolean);
if (!tags.length) {
tags = skill.id
.split("-")
.map((tag) => tag.toLowerCase())
.filter((tag) => tag && !TAG_STOPWORDS.has(tag));
}
return normalizeTokens(tags);
}
function detectCategory(skill, tags) {
const haystack = normalizeTokens([
...tags,
...tokenize(skill.name),
...tokenize(skill.description),
]);
const haystackSet = new Set(haystack);
for (const rule of CATEGORY_RULES) {
for (const keyword of rule.keywords) {
if (haystackSet.has(keyword)) {
return rule.name;
}
}
}
return "general";
}
function buildTriggers(skill, tags) {
const tokens = tokenize(`${skill.name} ${skill.description}`).filter(
(token) => token.length >= 2 && !STOPWORDS.has(token),
);
return unique([...tags, ...tokens]).slice(0, 12);
}
/** Common typo aliases (e.g. em dash — instead of hyphen -) for skill lookup. */
const TYPO_ALIASES = {
"shopify—development": "shopify-development",
};
function buildAliases(skills) {
const existingIds = new Set(skills.map((skill) => skill.id));
const aliases = {};
const used = new Set();
for (const skill of skills) {
if (skill.name && skill.name !== skill.id) {
const alias = skill.name.toLowerCase();
if (!existingIds.has(alias) && !used.has(alias)) {
aliases[alias] = skill.id;
used.add(alias);
}
}
const tokens = skill.id.split("-").filter(Boolean);
if (skill.id.length < 28 || tokens.length < 4) continue;
const deduped = [];
const tokenSeen = new Set();
for (const token of tokens) {
if (tokenSeen.has(token)) continue;
tokenSeen.add(token);
deduped.push(token);
}
const aliasTokens =
deduped.length > 3
? [deduped[0], deduped[1], deduped[deduped.length - 1]]
: deduped;
const alias = unique(aliasTokens).join("-");
if (!alias || alias === skill.id) continue;
if (existingIds.has(alias) || used.has(alias)) continue;
aliases[alias] = skill.id;
used.add(alias);
}
for (const [typo, canonicalId] of Object.entries(TYPO_ALIASES)) {
if (existingIds.has(canonicalId) && !aliases[typo]) {
aliases[typo] = canonicalId;
}
}
return aliases;
}
function buildBundles(skills) {
const bundles = {};
const skillTokens = new Map();
for (const skill of skills) {
const tokens = normalizeTokens([
...skill.tags,
...tokenize(skill.name),
...tokenize(skill.description),
]);
skillTokens.set(skill.id, new Set(tokens));
}
for (const [bundleName, rule] of Object.entries(BUNDLE_RULES)) {
const bundleSkills = [];
const keywords = rule.keywords.map((keyword) => keyword.toLowerCase());
for (const skill of skills) {
const tokenSet = skillTokens.get(skill.id) || new Set();
if (keywords.some((keyword) => tokenSet.has(keyword))) {
bundleSkills.push(skill.id);
}
}
bundles[bundleName] = {
description: rule.description,
skills: bundleSkills.sort(),
};
}
const common = CURATED_COMMON.filter((skillId) => skillTokens.has(skillId));
return { bundles, common };
}
function truncate(value, limit) {
if (!value || value.length <= limit) return value || "";
return `${value.slice(0, limit - 3)}...`;
}
function renderCatalogMarkdown(catalog) {
const lines = [];
lines.push("# Skill Catalog");
lines.push("");
lines.push(`Generated at: ${catalog.generatedAt}`);
lines.push("");
lines.push(`Total skills: ${catalog.total}`);
lines.push("");
const categories = Array.from(
new Set(catalog.skills.map((skill) => skill.category)),
).sort();
for (const category of categories) {
const grouped = catalog.skills.filter(
(skill) => skill.category === category,
);
lines.push(`## ${category} (${grouped.length})`);
lines.push("");
lines.push("| Skill | Description | Tags | Triggers |");
lines.push("| --- | --- | --- | --- |");
for (const skill of grouped) {
const description = truncate(skill.description, 160).replace(
/\|/g,
"\\|",
);
const tags = skill.tags.join(", ");
const triggers = skill.triggers.join(", ");
lines.push(
`| \`${skill.id}\` | ${description} | ${tags} | ${triggers} |`,
);
}
lines.push("");
}
return lines.join("\n");
}
function buildCatalog() {
const skillRelPaths = listSkillIdsRecursive(SKILLS_DIR);
const skills = skillRelPaths.map((relPath) => readSkill(SKILLS_DIR, relPath));
const catalogSkills = [];
for (const skill of skills) {
const tags = deriveTags(skill);
const category = detectCategory(skill, tags);
const triggers = buildTriggers(skill, tags);
catalogSkills.push({
id: skill.id,
name: skill.name,
description: skill.description,
category,
tags,
triggers,
// Normalize separators for deterministic cross-platform output.
path: path.relative(ROOT, skill.path).split(path.sep).join("/"),
});
}
const catalog = {
generatedAt: process.env.SOURCE_DATE_EPOCH
? new Date(process.env.SOURCE_DATE_EPOCH * 1000).toISOString()
: "2026-02-08T00:00:00.000Z",
total: catalogSkills.length,
skills: catalogSkills.sort((a, b) =>
a.id < b.id ? -1 : a.id > b.id ? 1 : 0,
),
};
const aliases = buildAliases(catalog.skills);
const bundleData = buildBundles(catalog.skills);
const catalogPath = path.join(ROOT, "data", "catalog.json");
const catalogMarkdownPath = path.join(ROOT, "CATALOG.md");
const bundlesPath = path.join(ROOT, "data", "bundles.json");
const aliasesPath = path.join(ROOT, "data", "aliases.json");
fs.writeFileSync(catalogPath, JSON.stringify(catalog, null, 2));
fs.writeFileSync(catalogMarkdownPath, renderCatalogMarkdown(catalog));
fs.writeFileSync(
bundlesPath,
JSON.stringify(
{ generatedAt: catalog.generatedAt, ...bundleData },
null,
2,
),
);
fs.writeFileSync(
aliasesPath,
JSON.stringify({ generatedAt: catalog.generatedAt, aliases }, null, 2),
);
return catalog;
}
if (require.main === module) {
const catalog = buildCatalog();
console.log(`Generated catalog for ${catalog.total} skills.`);
}
module.exports = {
buildCatalog,
};

View File

@@ -1,71 +0,0 @@
#!/usr/bin/env node
'use strict';
const fs = require('node:fs');
const path = require('node:path');
const args = process.argv.slice(2);
if (args.length !== 2) {
console.error('Usage: node scripts/copy-file.js <source> <destination>');
process.exit(1);
}
const [sourceInput, destinationInput] = args;
const projectRoot = path.resolve(__dirname, '..');
const sourcePath = path.resolve(projectRoot, sourceInput);
const destinationPath = path.resolve(projectRoot, destinationInput);
const destinationDir = path.dirname(destinationPath);
function fail(message) {
console.error(message);
process.exit(1);
}
function isInsideProjectRoot(targetPath) {
const relativePath = path.relative(projectRoot, targetPath);
return relativePath === '' || (!relativePath.startsWith('..') && !path.isAbsolute(relativePath));
}
if (!isInsideProjectRoot(sourcePath) || !isInsideProjectRoot(destinationPath)) {
fail('Source and destination must resolve inside the project root.');
}
if (sourcePath === destinationPath) {
fail('Source and destination must be different files.');
}
if (!fs.existsSync(sourcePath)) {
fail(`Source file not found: ${sourceInput}`);
}
let sourceStats;
try {
sourceStats = fs.statSync(sourcePath);
} catch (error) {
fail(`Unable to read source file "${sourceInput}": ${error.message}`);
}
if (!sourceStats.isFile()) {
fail(`Source is not a file: ${sourceInput}`);
}
let destinationDirStats;
try {
destinationDirStats = fs.statSync(destinationDir);
} catch {
fail(`Destination directory not found: ${path.relative(projectRoot, destinationDir)}`);
}
if (!destinationDirStats.isDirectory()) {
fail(`Destination parent is not a directory: ${path.relative(projectRoot, destinationDir)}`);
}
try {
fs.copyFileSync(sourcePath, destinationPath);
} catch (error) {
fail(`Copy failed (${sourceInput} -> ${destinationInput}): ${error.message}`);
}
console.log(`Copied ${sourceInput} -> ${destinationInput}`);

View File

@@ -1,52 +0,0 @@
import os
import re
def fix_dangling_links(skills_dir):
print(f"Scanning for dangling links in {skills_dir}...")
pattern = re.compile(r'\[([^\]]*)\]\(([^)]+)\)')
fixed_count = 0
for root, dirs, files in os.walk(skills_dir):
# Skip hidden directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
for file in files:
if not file.endswith('.md'): continue
file_path = os.path.join(root, file)
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception:
continue
def replacer(match):
nonlocal fixed_count
text = match.group(1)
href = match.group(2)
href_clean = href.split('#')[0].strip()
# Ignore empty links, web URLs, emails, etc.
if not href_clean or href_clean.startswith(('http://', 'https://', 'mailto:', '<', '>')):
return match.group(0)
if os.path.isabs(href_clean):
return match.group(0)
target_path = os.path.normpath(os.path.join(root, href_clean))
if not os.path.exists(target_path):
# Dangling link detected. Replace markdown link with just its text.
print(f"Fixing dangling link in {os.path.relpath(file_path, skills_dir)}: {href}")
fixed_count += 1
return text
return match.group(0)
new_content = pattern.sub(replacer, content)
if new_content != content:
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"Total dangling links fixed: {fixed_count}")
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
fix_dangling_links(os.path.join(base_dir, 'skills'))

View File

@@ -1,50 +0,0 @@
import os
import re
import yaml
def fix_skills(skills_dir):
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
continue
fm_text = fm_match.group(1)
body = content[fm_match.end():]
folder_name = os.path.basename(root)
try:
metadata = yaml.safe_load(fm_text) or {}
except yaml.YAMLError as e:
print(f"⚠️ {skill_path}: YAML error - {e}")
continue
changed = False
# 1. Fix Name
if metadata.get('name') != folder_name:
metadata['name'] = folder_name
changed = True
# 2. Fix Description length
desc = metadata.get('description', '')
if isinstance(desc, str) and len(desc) > 200:
metadata['description'] = desc[:197] + "..."
changed = True
if changed:
new_fm = yaml.dump(metadata, sort_keys=False, allow_unicode=True, width=1000).strip()
new_content = f"---\n{new_fm}\n---" + body
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"Fixed {skill_path}")
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
fix_skills(skills_path)

View File

@@ -1,45 +0,0 @@
import os
import re
import yaml
def fix_yaml_quotes(skills_dir):
print(f"Normalizing YAML frontmatter in {skills_dir}...")
fixed_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if 'SKILL.md' in files:
file_path = os.path.join(root, 'SKILL.md')
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception:
continue
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
continue
fm_text = fm_match.group(1)
body = content[fm_match.end():]
try:
# safe_load and then dump will normalize quoting automatically
metadata = yaml.safe_load(fm_text) or {}
new_fm = yaml.dump(metadata, sort_keys=False, allow_unicode=True, width=1000).strip()
# Check if it actually changed something significant (beyond just style)
# but normalization is good anyway. We'll just compare the fm_text.
if new_fm.strip() != fm_text.strip():
new_content = f"---\n{new_fm}\n---" + body
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
fixed_count += 1
except yaml.YAMLError as e:
print(f"⚠️ {file_path}: YAML error - {e}")
print(f"Total files normalized: {fixed_count}")
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
fix_yaml_quotes(os.path.join(base_dir, 'skills'))

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env python3
"""
Update all skill dates from 2025 to 2026.
Fixes the year mismatch issue.
"""
import os
import re
import sys
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def update_dates(skills_dir):
"""Update all dates from 2025 to 2026"""
updated_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
skill_id = os.path.basename(root)
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
# Replace 2025 with 2026 in date_added field
if 'date_added: "2025-' in content:
new_content = content.replace('date_added: "2025-', 'date_added: "2026-')
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"OK {skill_id}")
updated_count += 1
except Exception as e:
print(f"Error updating {skill_id}: {str(e)}")
print(f"\nUpdated {updated_count} skills to 2026")
return updated_count
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
print("Updating all dates from 2025 to 2026...\n")
update_dates(skills_path)
print("\nDone! Run: python scripts/generate_index.py")

View File

@@ -1,132 +0,0 @@
import os
import json
import re
import sys
import yaml
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def parse_frontmatter(content):
"""
Parses YAML frontmatter, sanitizing unquoted values containing @.
Handles single values and comma-separated lists by quoting the entire line.
"""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return {}
yaml_text = fm_match.group(1)
# Process line by line to handle values containing @ and commas
sanitized_lines = []
for line in yaml_text.splitlines():
# Match "key: value" (handles keys with dashes like 'package-name')
match = re.match(r'^(\s*[\w-]+):\s*(.*)$', line)
if match:
key, val = match.groups()
val_s = val.strip()
# If value contains @ and isn't already quoted, wrap the whole string in double quotes
if '@' in val_s and not (val_s.startswith('"') or val_s.startswith("'")):
# Escape any existing double quotes within the value string
safe_val = val_s.replace('"', '\\"')
line = f'{key}: "{safe_val}"'
sanitized_lines.append(line)
sanitized_yaml = '\n'.join(sanitized_lines)
try:
return yaml.safe_load(sanitized_yaml) or {}
except yaml.YAMLError as e:
print(f"⚠️ YAML parsing error: {e}")
return {}
def generate_index(skills_dir, output_file):
print(f"🏗️ Generating index from: {skills_dir}")
skills = []
for root, dirs, files in os.walk(skills_dir):
# Skip .disabled or hidden directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
dir_name = os.path.basename(root)
parent_dir = os.path.basename(os.path.dirname(root))
# Default values
rel_path = os.path.relpath(root, os.path.dirname(skills_dir))
# Force forward slashes for cross-platform JSON compatibility
skill_info = {
"id": dir_name,
"path": rel_path.replace(os.sep, '/'),
"category": parent_dir if parent_dir != "skills" else None, # Will be overridden by frontmatter if present
"name": dir_name.replace("-", " ").title(),
"description": "",
"risk": "unknown",
"source": "unknown",
"date_added": None
}
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
print(f"⚠️ Error reading {skill_path}: {e}")
continue
# Parse Metadata
metadata = parse_frontmatter(content)
# Merge Metadata (frontmatter takes priority)
if "name" in metadata: skill_info["name"] = metadata["name"]
if "description" in metadata: skill_info["description"] = metadata["description"]
if "risk" in metadata: skill_info["risk"] = metadata["risk"]
if "source" in metadata: skill_info["source"] = metadata["source"]
if "date_added" in metadata: skill_info["date_added"] = metadata["date_added"]
# Category: prefer frontmatter, then folder structure, then default
if "category" in metadata:
skill_info["category"] = metadata["category"]
elif skill_info["category"] is None:
skill_info["category"] = "uncategorized"
# Fallback for description if missing in frontmatter (legacy support)
if not skill_info["description"]:
body = content
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if fm_match:
body = content[fm_match.end():].strip()
# Simple extraction of first non-header paragraph
lines = body.split('\n')
desc_lines = []
for line in lines:
if line.startswith('#') or not line.strip():
if desc_lines: break
continue
desc_lines.append(line.strip())
if desc_lines:
skill_info["description"] = " ".join(desc_lines)[:250].strip()
skills.append(skill_info)
# Sort validation: by name
skills.sort(key=lambda x: (x["name"].lower(), x["id"].lower()))
with open(output_file, 'w', encoding='utf-8', newline='\n') as f:
json.dump(skills, f, indent=2)
print(f"✅ Generated rich index with {len(skills)} skills at: {output_file}")
return skills
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
output_path = os.path.join(base_dir, "skills_index.json")
generate_index(skills_path, output_path)

View File

@@ -1,126 +0,0 @@
#!/usr/bin/env python3
"""
Generate a report of skills with their date_added metadata in JSON format.
Usage:
python generate_skills_report.py [--output report.json] [--sort date|name]
"""
import os
import re
import json
import sys
import argparse
from datetime import datetime
from pathlib import Path
def get_project_root():
"""Get the project root directory."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import yaml
def parse_frontmatter(content):
"""Parse frontmatter from SKILL.md content using PyYAML."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None
fm_text = fm_match.group(1)
try:
return yaml.safe_load(fm_text) or {}
except yaml.YAMLError:
return None
def generate_skills_report(output_file=None, sort_by='date'):
"""Generate a report of all skills with their metadata."""
skills_dir = os.path.join(get_project_root(), 'skills')
skills_data = []
for root, dirs, files in os.walk(skills_dir):
# Skip hidden/disabled directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata = parse_frontmatter(content)
if metadata is None:
continue
skill_info = {
'id': metadata.get('id', skill_name),
'name': metadata.get('name', skill_name),
'description': metadata.get('description', ''),
'date_added': metadata.get('date_added', None),
'source': metadata.get('source', 'unknown'),
'risk': metadata.get('risk', 'unknown'),
'category': metadata.get('category', metadata.get('id', '').split('-')[0] if '-' in metadata.get('id', '') else 'other'),
}
skills_data.append(skill_info)
except Exception as e:
print(f"⚠️ Error reading {skill_path}: {str(e)}", file=sys.stderr)
# Sort data
if sort_by == 'date':
# Sort by date_added (newest first), then by name
skills_data.sort(key=lambda x: (x['date_added'] or '0000-00-00', x['name']), reverse=True)
elif sort_by == 'name':
skills_data.sort(key=lambda x: x['name'])
# Prepare report
report = {
'generated_at': datetime.now().isoformat(),
'total_skills': len(skills_data),
'skills_with_dates': sum(1 for s in skills_data if s['date_added']),
'skills_without_dates': sum(1 for s in skills_data if not s['date_added']),
'coverage_percentage': round(
sum(1 for s in skills_data if s['date_added']) / len(skills_data) * 100 if skills_data else 0,
1
),
'sorted_by': sort_by,
'skills': skills_data
}
# Output
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)
print(f"✅ Report saved to: {output_file}")
except Exception as e:
print(f"❌ Error saving report: {str(e)}")
return None
else:
# Print to stdout
print(json.dumps(report, indent=2, ensure_ascii=False))
return report
def main():
parser = argparse.ArgumentParser(
description="Generate a skills report with date_added metadata",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python generate_skills_report.py
python generate_skills_report.py --output skills_report.json
python generate_skills_report.py --sort name --output sorted_skills.json
"""
)
parser.add_argument('--output', '-o', help='Output file (JSON). If not specified, prints to stdout')
parser.add_argument('--sort', choices=['date', 'name'], default='date', help='Sort order (default: date)')
args = parser.parse_args()
generate_skills_report(output_file=args.output, sort_by=args.sort)
if __name__ == '__main__':
main()

View File

@@ -1,300 +0,0 @@
#!/usr/bin/env python3
"""
Manage skill date_added metadata.
Usage:
python manage_skill_dates.py list # List all skills with their dates
python manage_skill_dates.py add-missing [--date YYYY-MM-DD] # Add dates to skills without them
python manage_skill_dates.py add-all [--date YYYY-MM-DD] # Add/update dates for all skills
python manage_skill_dates.py update <skill-id> YYYY-MM-DD # Update a specific skill's date
"""
import os
import re
import sys
import argparse
from datetime import datetime
from pathlib import Path
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def get_project_root():
"""Get the project root directory."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import yaml
def parse_frontmatter(content):
"""Parse frontmatter from SKILL.md content using PyYAML."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None, content
fm_text = fm_match.group(1)
try:
metadata = yaml.safe_load(fm_text) or {}
return metadata, content
except yaml.YAMLError as e:
print(f"⚠️ YAML parsing error: {e}")
return None, content
def reconstruct_frontmatter(metadata):
"""Reconstruct frontmatter from metadata dict using PyYAML."""
# Ensure important keys are at the top if they exist
ordered = {}
priority_keys = ['id', 'name', 'description', 'category', 'risk', 'source', 'tags', 'date_added']
for key in priority_keys:
if key in metadata:
ordered[key] = metadata[key]
# Add any remaining keys
for key, value in metadata.items():
if key not in ordered:
ordered[key] = value
fm_text = yaml.dump(ordered, sort_keys=False, allow_unicode=True, width=1000).strip()
return f"---\n{fm_text}\n---"
def update_skill_frontmatter(skill_path, metadata):
"""Update a skill's frontmatter with new metadata."""
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
old_metadata, body_content = parse_frontmatter(content)
if old_metadata is None:
print(f"{skill_path}: Could not parse frontmatter")
return False
# Merge metadata
old_metadata.update(metadata)
# Reconstruct content
new_frontmatter = reconstruct_frontmatter(old_metadata)
# Find where the frontmatter ends in the original content
fm_end = content.find('---', 3) # Skip first ---
if fm_end == -1:
print(f"{skill_path}: Could not locate frontmatter boundary")
return False
body_start = fm_end + 3
body = content[body_start:]
new_content = new_frontmatter + body
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
return True
except Exception as e:
print(f"❌ Error updating {skill_path}: {str(e)}")
return False
def list_skills():
"""List all skills with their date_added values."""
skills_dir = os.path.join(get_project_root(), 'skills')
skills_with_dates = []
skills_without_dates = []
for root, dirs, files in os.walk(skills_dir):
# Skip hidden/disabled directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata, _ = parse_frontmatter(content)
if metadata is None:
continue
date_added = metadata.get('date_added', 'N/A')
if date_added == 'N/A':
skills_without_dates.append(skill_name)
else:
skills_with_dates.append((skill_name, date_added))
except Exception as e:
print(f"⚠️ Error reading {skill_path}: {str(e)}", file=sys.stderr)
# Sort by date
skills_with_dates.sort(key=lambda x: x[1], reverse=True)
print(f"\n📅 Skills with Date Added ({len(skills_with_dates)}):")
print("=" * 60)
if skills_with_dates:
for skill_name, date in skills_with_dates:
print(f" {date}{skill_name}")
else:
print(" (none)")
print(f"\n⏳ Skills without Date Added ({len(skills_without_dates)}):")
print("=" * 60)
if skills_without_dates:
for skill_name in sorted(skills_without_dates):
print(f" {skill_name}")
else:
print(" (none)")
total = len(skills_with_dates) + len(skills_without_dates)
percentage = (len(skills_with_dates) / total * 100) if total > 0 else 0
print(f"\n📊 Coverage: {len(skills_with_dates)}/{total} ({percentage:.1f}%)")
def add_missing_dates(date_str=None):
"""Add date_added to skills that don't have it."""
if date_str is None:
date_str = datetime.now().strftime('%Y-%m-%d')
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
updated_count = 0
skipped_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata, _ = parse_frontmatter(content)
if metadata is None:
print(f"⚠️ {skill_name}: Could not parse frontmatter, skipping")
continue
if 'date_added' not in metadata:
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Added date_added: {date_str}")
updated_count += 1
else:
print(f"{skill_name}: Failed to update")
else:
skipped_count += 1
except Exception as e:
print(f"❌ Error processing {skill_name}: {str(e)}")
print(f"\n✨ Updated {updated_count} skills, skipped {skipped_count} that already had dates")
return True
def add_all_dates(date_str=None):
"""Add/update date_added for all skills."""
if date_str is None:
date_str = datetime.now().strftime('%Y-%m-%d')
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
updated_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Set date_added: {date_str}")
updated_count += 1
else:
print(f"{skill_name}: Failed to update")
except Exception as e:
print(f"❌ Error processing {skill_name}: {str(e)}")
print(f"\n✨ Updated {updated_count} skills")
return True
def update_skill_date(skill_name, date_str):
"""Update a specific skill's date_added."""
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
skill_path = os.path.join(skills_dir, skill_name, 'SKILL.md')
if not os.path.exists(skill_path):
print(f"❌ Skill not found: {skill_name}")
return False
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Updated date_added to {date_str}")
return True
else:
print(f"{skill_name}: Failed to update")
return False
def main():
parser = argparse.ArgumentParser(
description="Manage skill date_added metadata",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python manage_skill_dates.py list
python manage_skill_dates.py add-missing
python manage_skill_dates.py add-missing --date 2024-01-15
python manage_skill_dates.py add-all --date 2025-01-01
python manage_skill_dates.py update my-skill-name 2024-06-01
"""
)
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# list command
subparsers.add_parser('list', help='List all skills with their date_added values')
# add-missing command
add_missing_parser = subparsers.add_parser('add-missing', help='Add date_added to skills without it')
add_missing_parser.add_argument('--date', help='Date to use (YYYY-MM-DD), defaults to today')
# add-all command
add_all_parser = subparsers.add_parser('add-all', help='Add/update date_added for all skills')
add_all_parser.add_argument('--date', help='Date to use (YYYY-MM-DD), defaults to today')
# update command
update_parser = subparsers.add_parser('update', help='Update a specific skill date')
update_parser.add_argument('skill_name', help='Name of the skill')
update_parser.add_argument('date', help='Date to set (YYYY-MM-DD)')
args = parser.parse_args()
if not args.command:
parser.print_help()
return
if args.command == 'list':
list_skills()
elif args.command == 'add-missing':
add_missing_dates(args.date)
elif args.command == 'add-all':
add_all_dates(args.date)
elif args.command == 'update':
update_skill_date(args.skill_name, args.date)
if __name__ == '__main__':
main()

View File

@@ -1,155 +0,0 @@
const fs = require('fs');
const path = require('path');
const yaml = require('yaml');
const { listSkillIds, parseFrontmatter } = require('../lib/skill-utils');
const ROOT = path.resolve(__dirname, '..');
const SKILLS_DIR = path.join(ROOT, 'skills');
const ALLOWED_FIELDS = new Set([
'name',
'description',
'risk',
'source',
'license',
'compatibility',
'metadata',
'allowed-tools',
'date_added',
'category',
'id',
]);
function isPlainObject(value) {
return value && typeof value === 'object' && !Array.isArray(value);
}
function coerceToString(value) {
if (value === null || value === undefined) return '';
if (typeof value === 'string') return value.trim();
if (typeof value === 'number' || typeof value === 'boolean') return String(value);
if (Array.isArray(value)) {
const simple = value.every(item => ['string', 'number', 'boolean'].includes(typeof item));
return simple ? value.map(item => String(item).trim()).filter(Boolean).join(', ') : JSON.stringify(value);
}
if (isPlainObject(value)) {
return JSON.stringify(value);
}
return String(value).trim();
}
function appendMetadata(metadata, key, value) {
const nextValue = coerceToString(value);
if (!nextValue) return;
if (!metadata[key]) {
metadata[key] = nextValue;
return;
}
if (metadata[key].includes(nextValue)) return;
metadata[key] = `${metadata[key]}, ${nextValue}`;
}
function collectAllowedTools(value, toolSet) {
if (!value) return;
if (typeof value === 'string') {
value
.split(/[\s,]+/)
.map(token => token.trim())
.filter(Boolean)
.forEach(token => toolSet.add(token));
return;
}
if (Array.isArray(value)) {
value
.map(token => String(token).trim())
.filter(Boolean)
.forEach(token => toolSet.add(token));
}
}
function normalizeSkill(skillId) {
const skillPath = path.join(SKILLS_DIR, skillId, 'SKILL.md');
const content = fs.readFileSync(skillPath, 'utf8');
const { data, body, hasFrontmatter } = parseFrontmatter(content);
if (!hasFrontmatter) return false;
let modified = false;
const updated = { ...data };
const metadata = isPlainObject(updated.metadata) ? { ...updated.metadata } : {};
if (updated.metadata !== undefined && !isPlainObject(updated.metadata)) {
appendMetadata(metadata, 'legacy_metadata', updated.metadata);
modified = true;
}
const allowedTools = new Set();
collectAllowedTools(updated['allowed-tools'], allowedTools);
collectAllowedTools(updated.tools, allowedTools);
collectAllowedTools(updated.tool_access, allowedTools);
if (updated.tools !== undefined) {
delete updated.tools;
modified = true;
}
if (updated.tool_access !== undefined) {
delete updated.tool_access;
modified = true;
}
for (const key of Object.keys(updated)) {
if (ALLOWED_FIELDS.has(key)) continue;
if (key === 'tags') {
appendMetadata(metadata, 'tags', updated[key]);
} else {
appendMetadata(metadata, key, updated[key]);
}
delete updated[key];
modified = true;
}
if (allowedTools.size) {
updated['allowed-tools'] = Array.from(allowedTools).join(' ');
modified = true;
} else if (updated['allowed-tools'] !== undefined) {
delete updated['allowed-tools'];
modified = true;
}
if (Object.keys(metadata).length) {
updated.metadata = metadata;
modified = true;
} else if (updated.metadata !== undefined) {
delete updated.metadata;
modified = true;
}
if (!modified) return false;
const ordered = {};
const order = ['id', 'name', 'description', 'category', 'risk', 'source', 'license', 'compatibility', 'date_added', 'allowed-tools', 'metadata'];
for (const key of order) {
if (updated[key] !== undefined) {
ordered[key] = updated[key];
}
}
const fm = yaml.stringify(ordered).trimEnd();
const bodyPrefix = body.length && (body.startsWith('\n') || body.startsWith('\r\n')) ? '' : '\n';
const next = `---\n${fm}\n---${bodyPrefix}${body}`;
fs.writeFileSync(skillPath, next);
return true;
}
function run() {
const skillIds = listSkillIds(SKILLS_DIR);
let updatedCount = 0;
for (const skillId of skillIds) {
if (normalizeSkill(skillId)) updatedCount += 1;
}
console.log(`Normalized frontmatter for ${updatedCount} skills.`);
}
if (require.main === module) {
run();
}
module.exports = { run };

View File

@@ -1,66 +0,0 @@
#!/bin/bash
# Release Cycle Automation Script
# Enforces protocols from .github/MAINTENANCE.md
set -e
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${YELLOW}🤖 Initiating Antigravity Release Protocol...${NC}"
# 1. Validation Chain
echo -e "\n${YELLOW}Step 1: Running Validation Chain...${NC}"
echo "Running validate_skills.py..."
python3 scripts/validate_skills.py
echo "Running generate_index.py..."
python3 scripts/generate_index.py
echo "Running update_readme.py..."
python3 scripts/update_readme.py
# 2. Catalog (required for CI)
echo -e "\n${YELLOW}Step 2: Build catalog...${NC}"
npm run catalog
# 3. Stats Consistency Check
echo -e "\n${YELLOW}Step 3: Verifying Stats Consistency...${NC}"
JSON_COUNT=$(python3 -c "import json; print(len(json.load(open('skills_index.json'))))")
echo "Skills in Registry (JSON): $JSON_COUNT"
# Check README Intro
README_CONTENT=$(cat README.md)
if [[ "$README_CONTENT" != *"$JSON_COUNT high-performance"* ]]; then
echo -e "${RED}❌ ERROR: README.md intro consistency failure!${NC}"
echo "Expected: '$JSON_COUNT high-performance'"
echo "Found mismatch. Please grep for 'high-performance' in README.md and fix it."
exit 1
fi
echo -e "${GREEN}✅ Stats Consistent.${NC}"
# 4. Version check (package.json is source of truth for npm)
echo -e "\n${YELLOW}Step 4: Version check${NC}"
PKG_VERSION=$(node -p "require('./package.json').version")
echo "package.json version: $PKG_VERSION"
echo "Ensure this version is bumped before 'npm publish' (npm forbids republishing the same version)."
# 5. Contributor Check
echo -e "\n${YELLOW}Step 5: Contributor Check${NC}"
echo "Recent commits by author (check against README 'Repo Contributors'):"
git shortlog -sn --since="1 month ago" --all --no-merges | head -n 10
echo -e "${YELLOW}⚠️ MANUAL VERIFICATION REQUIRED:${NC}"
echo "1. Are all PR authors above listed in 'Repo Contributors'?"
echo "2. Are all External Sources listed in 'Credits & Sources'?"
read -p "Type 'yes' to confirm you have verified contributors: " CONFIRM_CONTRIB
if [ "$CONFIRM_CONTRIB" != "yes" ]; then
echo -e "${RED}❌ Verification failed. Aborting.${NC}"
exit 1
fi
echo -e "\n${GREEN}✅ Release Cycle Checks Passed. You may now commit and push.${NC}"
echo -e "${YELLOW}After tagging a release: run \`npm publish\` from repo root (or use GitHub Release + NPM_TOKEN for CI).${NC}"
exit 0

View File

@@ -1,90 +0,0 @@
#!/usr/bin/env node
'use strict';
const { spawn, spawnSync } = require('node:child_process');
const args = process.argv.slice(2);
if (args.length === 0) {
console.error('Usage: node scripts/run-python.js <script.py> [args...]');
process.exit(1);
}
function uniqueCandidates(candidates) {
const seen = new Set();
const unique = [];
for (const candidate of candidates) {
const key = candidate.join('\u0000');
if (!seen.has(key)) {
seen.add(key);
unique.push(candidate);
}
}
return unique;
}
function getPythonCandidates() {
// Optional override for CI/local pinning without editing scripts.
const configuredPython =
process.env.ANTIGRAVITY_PYTHON || process.env.npm_config_python;
const candidates = [
configuredPython ? [configuredPython] : null,
// Keep this ordered list easy to update if project requirements change.
['python3'],
['python'],
['py', '-3'],
].filter(Boolean);
return uniqueCandidates(candidates);
}
function canRun(candidate) {
const [command, ...baseArgs] = candidate;
const probe = spawnSync(
command,
[...baseArgs, '-c', 'import sys; raise SystemExit(0 if sys.version_info[0] == 3 else 1)'],
{
stdio: 'ignore',
shell: false,
},
);
return probe.error == null && probe.status === 0;
}
const pythonCandidates = getPythonCandidates();
const selected = pythonCandidates.find(canRun);
if (!selected) {
console.error(
'Unable to find a Python 3 interpreter. Tried: python3, python, py -3',
);
process.exit(1);
}
const [command, ...baseArgs] = selected;
const child = spawn(command, [...baseArgs, ...args], {
stdio: 'inherit',
shell: false,
});
child.on('error', (error) => {
console.error(`Failed to start Python interpreter "${command}": ${error.message}`);
process.exit(1);
});
child.on('exit', (code, signal) => {
if (signal) {
try {
process.kill(process.pid, signal);
} catch {
process.exit(1);
}
return;
}
process.exit(code ?? 1);
});

View File

@@ -1,56 +0,0 @@
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const ROOT_DIR = path.resolve(__dirname, '..');
const WEB_APP_PUBLIC = path.join(ROOT_DIR, 'web-app', 'public');
// Ensure public dir exists
if (!fs.existsSync(WEB_APP_PUBLIC)) {
fs.mkdirSync(WEB_APP_PUBLIC, { recursive: true });
}
// 1. Copy skills_index.json
const sourceIndex = path.join(ROOT_DIR, 'skills_index.json');
const destIndex = path.join(WEB_APP_PUBLIC, 'skills.json');
console.log(`Copying ${sourceIndex} -> ${destIndex}...`);
fs.copyFileSync(sourceIndex, destIndex);
// 2. Copy skills directory content
// Note: Symlinking is better, but Windows often requires admin for symlinks.
// We will try to copy for reliability in this environment.
const sourceSkills = path.join(ROOT_DIR, 'skills');
const destSkills = path.join(WEB_APP_PUBLIC, 'skills');
console.log(`Copying skills directory...`);
// Recursive copy function (follows symlinks to copy resolved content)
function copyFolderSync(from, to) {
if (!fs.existsSync(to)) fs.mkdirSync(to, { recursive: true });
fs.readdirSync(from).forEach(element => {
const srcPath = path.join(from, element);
const destPath = path.join(to, element);
const stat = fs.statSync(srcPath); // statSync follows symlinks
if (stat.isFile()) {
fs.copyFileSync(srcPath, destPath);
} else if (stat.isDirectory()) {
copyFolderSync(srcPath, destPath);
}
// Skip other types (e.g. sockets, FIFOs)
});
}
// Check if destination exists and remove it to ensure fresh copy
if (fs.existsSync(destSkills)) {
fs.rmSync(destSkills, { recursive: true, force: true });
}
copyFolderSync(sourceSkills, destSkills);
console.log('✅ Web app assets setup complete!');

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env python3
"""
Skills Manager - Easily enable/disable skills locally
Usage:
python3 scripts/skills_manager.py list # List active skills
python3 scripts/skills_manager.py disabled # List disabled skills
python3 scripts/skills_manager.py enable SKILL # Enable a skill
python3 scripts/skills_manager.py disable SKILL # Disable a skill
"""
import sys
import os
from pathlib import Path
SKILLS_DIR = Path(__file__).parent.parent / "skills"
DISABLED_DIR = SKILLS_DIR / ".disabled"
def list_active():
"""List all active skills"""
print("🟢 Active Skills:\n")
skills = sorted([d.name for d in SKILLS_DIR.iterdir()
if d.is_dir() and not d.name.startswith('.')])
symlinks = sorted([s.name for s in SKILLS_DIR.iterdir()
if s.is_symlink()])
for skill in skills:
print(f"{skill}")
if symlinks:
print("\n📎 Symlinks:")
for link in symlinks:
target = os.readlink(SKILLS_DIR / link)
print(f"{link}{target}")
print(f"\n✅ Total: {len(skills)} skills + {len(symlinks)} symlinks")
def list_disabled():
"""List all disabled skills"""
if not DISABLED_DIR.exists():
print("❌ No disabled skills directory found")
return
print("⚪ Disabled Skills:\n")
disabled = sorted([d.name for d in DISABLED_DIR.iterdir() if d.is_dir()])
for skill in disabled:
print(f"{skill}")
print(f"\n📊 Total: {len(disabled)} disabled skills")
def enable_skill(skill_name):
"""Enable a disabled skill"""
source = DISABLED_DIR / skill_name
target = SKILLS_DIR / skill_name
if not source.exists():
print(f"❌ Skill '{skill_name}' not found in .disabled/")
return False
if target.exists():
print(f"⚠️ Skill '{skill_name}' is already active")
return False
source.rename(target)
print(f"✅ Enabled: {skill_name}")
return True
def disable_skill(skill_name):
"""Disable an active skill"""
source = SKILLS_DIR / skill_name
target = DISABLED_DIR / skill_name
if not source.exists():
print(f"❌ Skill '{skill_name}' not found")
return False
if source.name.startswith('.'):
print(f"⚠️ Cannot disable system directory: {skill_name}")
return False
if source.is_symlink():
print(f"⚠️ Cannot disable symlink: {skill_name}")
print(f" (Remove the symlink manually if needed)")
return False
DISABLED_DIR.mkdir(exist_ok=True)
source.rename(target)
print(f"✅ Disabled: {skill_name}")
return True
def main():
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
command = sys.argv[1].lower()
if command == "list":
list_active()
elif command == "disabled":
list_disabled()
elif command == "enable":
if len(sys.argv) < 3:
print("❌ Usage: skills_manager.py enable SKILL_NAME")
sys.exit(1)
enable_skill(sys.argv[2])
elif command == "disable":
if len(sys.argv) < 3:
print("❌ Usage: skills_manager.py disable SKILL_NAME")
sys.exit(1)
disable_skill(sys.argv[2])
else:
print(f"❌ Unknown command: {command}")
print(__doc__)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,424 +0,0 @@
#!/usr/bin/env python3
"""
Sync Microsoft Skills Repository - v4 (Flat Structure)
Reads each SKILL.md frontmatter 'name' field and uses it as a flat directory
name under skills/ to comply with the repository's indexing conventions.
"""
import re
import shutil
import subprocess
import tempfile
import json
from pathlib import Path
MS_REPO = "https://github.com/microsoft/skills.git"
REPO_ROOT = Path(__file__).parent.parent
TARGET_DIR = REPO_ROOT / "skills"
DOCS_DIR = REPO_ROOT / "docs"
ATTRIBUTION_FILE = DOCS_DIR / "microsoft-skills-attribution.json"
def clone_repo(temp_dir: Path):
"""Clone Microsoft skills repository (shallow)."""
print("🔄 Cloning Microsoft Skills repository...")
subprocess.run(
["git", "clone", "--depth", "1", MS_REPO, str(temp_dir)],
check=True,
)
def cleanup_previous_sync():
"""Remove skill directories from a previous sync using the attribution manifest."""
if not ATTRIBUTION_FILE.exists():
print(" No previous attribution file found — skipping cleanup.")
return 0
try:
with open(ATTRIBUTION_FILE) as f:
attribution = json.load(f)
except (json.JSONDecodeError, OSError) as e:
print(f" ⚠️ Could not read attribution file: {e}")
return 0
previous_skills = attribution.get("skills", [])
removed_count = 0
for skill in previous_skills:
flat_name = skill.get("flat_name", "")
if not flat_name:
continue
skill_dir = TARGET_DIR / flat_name
if skill_dir.exists() and skill_dir.is_dir():
shutil.rmtree(skill_dir)
removed_count += 1
print(
f" 🗑️ Removed {removed_count} previously synced skill directories.")
return removed_count
import yaml
def extract_skill_name(skill_md_path: Path) -> str | None:
"""Extract the 'name' field from SKILL.md YAML frontmatter using PyYAML."""
try:
content = skill_md_path.read_text(encoding="utf-8")
except Exception:
return None
fm_match = re.search(r"^---\s*\n(.*?)\n---", content, re.DOTALL)
if not fm_match:
return None
try:
data = yaml.safe_load(fm_match.group(1)) or {}
return data.get('name')
except Exception:
return None
def generate_fallback_name(relative_path: Path) -> str:
"""
Generate a fallback directory name when frontmatter 'name' is missing.
Converts a path like 'dotnet/compute/botservice' to 'ms-dotnet-compute-botservice'.
"""
parts = [p for p in relative_path.parts if p]
return "ms-" + "-".join(parts)
def find_skills_in_directory(source_dir: Path):
"""
Walk the Microsoft repo's skills/ directory (which uses symlinks)
and resolve each to its actual SKILL.md content.
Returns list of dicts: {relative_path, skill_md_path, source_dir}.
"""
skills_source = source_dir / "skills"
results = []
if not skills_source.exists():
return results
for item in skills_source.rglob("*"):
if not item.is_dir():
continue
skill_md = None
actual_dir = None
if item.is_symlink():
try:
resolved = item.resolve()
if (resolved / "SKILL.md").exists():
skill_md = resolved / "SKILL.md"
actual_dir = resolved
except Exception:
continue
elif (item / "SKILL.md").exists():
skill_md = item / "SKILL.md"
actual_dir = item
if skill_md is None:
continue
try:
relative_path = item.relative_to(skills_source)
except ValueError:
continue
results.append({
"relative_path": relative_path,
"skill_md": skill_md,
"source_dir": actual_dir,
})
return results
def find_plugin_skills(source_dir: Path, already_synced_names: set):
"""Find plugin skills in .github/plugins/ that haven't been synced yet."""
results = []
github_plugins = source_dir / ".github" / "plugins"
if not github_plugins.exists():
return results
for skill_file in github_plugins.rglob("SKILL.md"):
skill_dir = skill_file.parent
skill_name = skill_dir.name
if skill_name not in already_synced_names:
results.append({
"relative_path": Path("plugins") / skill_name,
"skill_md": skill_file,
"source_dir": skill_dir,
})
return results
def find_github_skills(source_dir: Path, already_synced_names: set):
"""Find skills in .github/skills/ not reachable via the skills/ symlink tree."""
results = []
github_skills = source_dir / ".github" / "skills"
if not github_skills.exists():
return results
for skill_dir in github_skills.iterdir():
if not skill_dir.is_dir() or not (skill_dir / "SKILL.md").exists():
continue
if skill_dir.name not in already_synced_names:
results.append({
"relative_path": Path(".github/skills") / skill_dir.name,
"skill_md": skill_dir / "SKILL.md",
"source_dir": skill_dir,
})
return results
def sync_skills_flat(source_dir: Path, target_dir: Path):
"""
Sync all Microsoft skills into a flat structure under skills/.
Uses frontmatter 'name' as directory name, with collision detection.
Protects existing non-Microsoft skills from being overwritten.
"""
# Load previous attribution to know which dirs are Microsoft-owned
previously_synced_names = set()
if ATTRIBUTION_FILE.exists():
try:
with open(ATTRIBUTION_FILE) as f:
prev = json.load(f)
previously_synced_names = {
s["flat_name"] for s in prev.get("skills", []) if s.get("flat_name")
}
except (json.JSONDecodeError, OSError):
pass
all_skill_entries = find_skills_in_directory(source_dir)
print(f" 📂 Found {len(all_skill_entries)} skills in skills/ directory")
synced_count = 0
skill_metadata = []
# name -> original relative_path (for collision logging)
used_names: dict[str, str] = {}
for entry in all_skill_entries:
skill_name = extract_skill_name(entry["skill_md"])
if not skill_name:
skill_name = generate_fallback_name(entry["relative_path"])
print(
f" ⚠️ No frontmatter name for {entry['relative_path']}, using fallback: {skill_name}")
# Internal collision detection (two Microsoft skills with same name)
if skill_name in used_names:
original = used_names[skill_name]
print(
f" ⚠️ Name collision '{skill_name}': {entry['relative_path']} vs {original}")
lang = entry["relative_path"].parts[0] if entry["relative_path"].parts else "unknown"
skill_name = f"{skill_name}-{lang}"
print(f" Resolved to: {skill_name}")
# Protect existing non-Microsoft skills from being overwritten
target_skill_dir = target_dir / skill_name
if target_skill_dir.exists() and skill_name not in previously_synced_names:
original_name = skill_name
skill_name = f"{skill_name}-ms"
print(
f" ⚠️ '{original_name}' exists as a non-Microsoft skill, using: {skill_name}")
used_names[skill_name] = str(entry["relative_path"])
# Create flat target directory
target_skill_dir = target_dir / skill_name
target_skill_dir.mkdir(parents=True, exist_ok=True)
# Copy SKILL.md
shutil.copy2(entry["skill_md"], target_skill_dir / "SKILL.md")
# Copy other files from the skill directory
for file_item in entry["source_dir"].iterdir():
if file_item.name != "SKILL.md" and file_item.is_file():
shutil.copy2(file_item, target_skill_dir / file_item.name)
skill_metadata.append({
"flat_name": skill_name,
"original_path": str(entry["relative_path"]),
"source": "microsoft/skills",
})
synced_count += 1
print(f"{entry['relative_path']} → skills/{skill_name}/")
# Collect all source directory names already synced (for dedup)
synced_names = set(used_names.keys())
already_synced_dir_names = {
e["source_dir"].name for e in all_skill_entries}
# Sync plugin skills from .github/plugins/
plugin_entries = find_plugin_skills(source_dir, already_synced_dir_names)
if plugin_entries:
print(f"\n 📦 Found {len(plugin_entries)} additional plugin skills")
for entry in plugin_entries:
skill_name = extract_skill_name(entry["skill_md"])
if not skill_name:
skill_name = entry["source_dir"].name
if skill_name in synced_names:
skill_name = f"{skill_name}-plugin"
# Protect existing non-Microsoft skills
target_skill_dir = target_dir / skill_name
if target_skill_dir.exists() and skill_name not in previously_synced_names:
original_name = skill_name
skill_name = f"{skill_name}-ms"
target_skill_dir = target_dir / skill_name
print(
f" ⚠️ '{original_name}' exists as a non-Microsoft skill, using: {skill_name}")
synced_names.add(skill_name)
already_synced_dir_names.add(entry["source_dir"].name)
target_skill_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(entry["skill_md"], target_skill_dir / "SKILL.md")
for file_item in entry["source_dir"].iterdir():
if file_item.name != "SKILL.md" and file_item.is_file():
shutil.copy2(file_item, target_skill_dir / file_item.name)
skill_metadata.append({
"flat_name": skill_name,
"original_path": str(entry["relative_path"]),
"source": "microsoft/skills (plugin)",
})
synced_count += 1
print(f"{entry['relative_path']} → skills/{skill_name}/")
# Sync skills in .github/skills/ not reachable via the skills/ symlink tree
github_skill_entries = find_github_skills(
source_dir, already_synced_dir_names)
if github_skill_entries:
print(
f"\n <20> Found {len(github_skill_entries)} skills in .github/skills/ not linked from skills/")
for entry in github_skill_entries:
skill_name = extract_skill_name(entry["skill_md"])
if not skill_name:
skill_name = entry["source_dir"].name
if skill_name in synced_names:
skill_name = f"{skill_name}-github"
# Protect existing non-Microsoft skills
target_skill_dir = target_dir / skill_name
if target_skill_dir.exists() and skill_name not in previously_synced_names:
original_name = skill_name
skill_name = f"{skill_name}-ms"
target_skill_dir = target_dir / skill_name
print(
f" ⚠️ '{original_name}' exists as a non-Microsoft skill, using: {skill_name}")
synced_names.add(skill_name)
target_skill_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(entry["skill_md"], target_skill_dir / "SKILL.md")
for file_item in entry["source_dir"].iterdir():
if file_item.name != "SKILL.md" and file_item.is_file():
shutil.copy2(file_item, target_skill_dir / file_item.name)
skill_metadata.append({
"flat_name": skill_name,
"original_path": str(entry["relative_path"]),
"source": "microsoft/skills (.github/skills)",
})
synced_count += 1
print(f"{entry['relative_path']} → skills/{skill_name}/")
return synced_count, skill_metadata
def save_attribution(metadata: list):
"""Save attribution metadata to docs/."""
DOCS_DIR.mkdir(parents=True, exist_ok=True)
attribution = {
"source": "microsoft/skills",
"repository": "https://github.com/microsoft/skills",
"license": "MIT",
"synced_skills": len(metadata),
"structure": "flat (frontmatter name as directory name)",
"skills": metadata,
}
with open(DOCS_DIR / "microsoft-skills-attribution.json", "w") as f:
json.dump(attribution, f, indent=2)
def copy_license(source_dir: Path):
"""Copy the Microsoft LICENSE to docs/."""
DOCS_DIR.mkdir(parents=True, exist_ok=True)
if (source_dir / "LICENSE").exists():
shutil.copy2(source_dir / "LICENSE", DOCS_DIR / "LICENSE-MICROSOFT")
def main():
"""Main sync function."""
print("🚀 Microsoft Skills Sync Script v4 (Flat Structure)")
print("=" * 55)
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
try:
clone_repo(temp_path)
TARGET_DIR.mkdir(parents=True, exist_ok=True)
print("\n🧹 Cleaning up previous sync...")
cleanup_previous_sync()
print("\n🔗 Resolving symlinks and flattening into skills/<name>/...")
count, metadata = sync_skills_flat(temp_path, TARGET_DIR)
print("\n📄 Saving attribution...")
save_attribution(metadata)
copy_license(temp_path)
print(
f"\n✨ Success! Synced {count} Microsoft skills (flat structure)")
print(f"📁 Location: {TARGET_DIR}/")
# Show summary of languages
languages = set()
for skill in metadata:
parts = skill["original_path"].split("/")
if len(parts) >= 1 and parts[0] != "plugins":
languages.add(parts[0])
print(f"\n📊 Organization:")
print(f" Total skills: {count}")
print(f" Languages: {', '.join(sorted(languages))}")
print("\n📋 Next steps:")
print("1. Run: npm run build")
print("2. Commit changes and create PR")
except Exception as e:
print(f"\n❌ Error: {e}")
import traceback
traceback.print_exc()
return 1
return 0
if __name__ == "__main__":
exit(main())

View File

@@ -1,114 +0,0 @@
#!/bin/bash
# sync_recommended_skills.sh
# Syncs only the 35 recommended skills from GitHub repo to local central library
set -e
# Paths
GITHUB_REPO="/Users/nicco/Antigravity Projects/antigravity-awesome-skills/skills"
LOCAL_LIBRARY="/Users/nicco/.gemini/antigravity/scratch/.agent/skills"
BACKUP_DIR="/Users/nicco/.gemini/antigravity/scratch/.agent/skills_backup_$(date +%Y%m%d_%H%M%S)"
# 35 Recommended Skills
RECOMMENDED_SKILLS=(
# Tier S - Core Development (13)
"systematic-debugging"
"test-driven-development"
"writing-skills"
"doc-coauthoring"
"planning-with-files"
"concise-planning"
"software-architecture"
"senior-architect"
"senior-fullstack"
"verification-before-completion"
"git-pushing"
"address-github-comments"
"javascript-mastery"
# Tier A - Your Projects (12)
"docx-official"
"pdf-official"
"pptx-official"
"xlsx-official"
"react-best-practices"
"web-design-guidelines"
"frontend-dev-guidelines"
"webapp-testing"
"playwright-skill"
"mcp-builder"
"notebooklm"
"ui-ux-pro-max"
# Marketing & SEO (1)
"content-creator"
# Corporate (4)
"brand-guidelines-anthropic"
"brand-guidelines-community"
"internal-comms-anthropic"
"internal-comms-community"
# Planning & Documentation (1)
"writing-plans"
# AI & Automation (5)
"workflow-automation"
"llm-app-patterns"
"autonomous-agent-patterns"
"prompt-library"
"github-workflow-automation"
)
echo "🔄 Sync Recommended Skills"
echo "========================="
echo ""
echo "📍 Source: $GITHUB_REPO"
echo "📍 Target: $LOCAL_LIBRARY"
echo "📊 Skills to sync: ${#RECOMMENDED_SKILLS[@]}"
echo ""
# Create backup
echo "📦 Creating backup at: $BACKUP_DIR"
cp -r "$LOCAL_LIBRARY" "$BACKUP_DIR"
echo "✅ Backup created"
echo ""
# Clear local library (keep README.md if exists)
echo "🗑️ Clearing local library..."
cd "$LOCAL_LIBRARY"
for item in */; do
rm -rf "$item"
done
echo "✅ Local library cleared"
echo ""
# Copy recommended skills
echo "📋 Copying recommended skills..."
SUCCESS_COUNT=0
MISSING_COUNT=0
for skill in "${RECOMMENDED_SKILLS[@]}"; do
if [ -d "$GITHUB_REPO/$skill" ]; then
cp -r "$GITHUB_REPO/$skill" "$LOCAL_LIBRARY/"
echo "$skill"
((SUCCESS_COUNT++))
else
echo " ⚠️ $skill (not found in repo)"
((MISSING_COUNT++))
fi
done
echo ""
echo "📊 Summary"
echo "=========="
echo "✅ Copied: $SUCCESS_COUNT skills"
echo "⚠️ Missing: $MISSING_COUNT skills"
echo "📦 Backup: $BACKUP_DIR"
echo ""
# Verify
FINAL_COUNT=$(find "$LOCAL_LIBRARY" -maxdepth 1 -type d ! -name "." | wc -l | tr -d ' ')
echo "🎯 Final count in local library: $FINAL_COUNT skills"
echo ""
echo "Done! Your local library now has only the recommended skills."

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from update_readme import configure_utf8_output, find_repo_root, load_metadata, update_readme
ABOUT_DESCRIPTION_RE = re.compile(r'"description"\s*:\s*"([^"]*)"')
def update_package_description(base_dir: str, metadata: dict, dry_run: bool) -> bool:
package_path = os.path.join(base_dir, "package.json")
with open(package_path, "r", encoding="utf-8") as file:
content = file.read()
new_description = (
f"{metadata['total_skills_label']} agentic skills for Claude Code, Gemini CLI, "
"Cursor, Antigravity & more. Installer CLI."
)
updated_content = ABOUT_DESCRIPTION_RE.sub(
f'"description": "{new_description}"', content, count=1
)
if updated_content == content:
return False
if dry_run:
print(f"[dry-run] Would update package description in {package_path}")
return True
with open(package_path, "w", encoding="utf-8", newline="\n") as file:
file.write(updated_content)
print(f"✅ Updated package description in {package_path}")
return True
def print_manual_github_about(metadata: dict) -> None:
description = (
f"{metadata['total_skills_label']} curated SKILL.md files for Claude Code, "
"Cursor, Gemini CLI, Codex, Copilot, and Antigravity."
)
print("\nManual GitHub repo settings update:")
print(f"- About description: {description}")
print("- Suggested topics: claude-code, cursor, gemini-cli, codex-cli, github-copilot, antigravity")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Synchronize repository metadata across README and package.json."
)
parser.add_argument("--dry-run", action="store_true", help="Preview updates without writing files.")
return parser.parse_args()
def main() -> int:
args = parse_args()
base_dir = find_repo_root(os.path.dirname(__file__))
metadata = load_metadata(base_dir)
print("Repository metadata")
print(json.dumps(metadata, indent=2))
readme_metadata = update_readme(dry_run=args.dry_run)
package_updated = update_package_description(base_dir, metadata, args.dry_run)
print_manual_github_about(readme_metadata)
if args.dry_run and not package_updated:
print("\n[dry-run] No package.json description changes required.")
return 0
if __name__ == "__main__":
configure_utf8_output()
sys.exit(main())

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env python3
"""
Inspect Microsoft Skills Repository Structure
Shows the repository layout, skill locations, and what flat names would be generated.
"""
import re
import io
import shutil
import subprocess
import sys
import tempfile
import traceback
import uuid
from pathlib import Path
MS_REPO = "https://github.com/microsoft/skills.git"
def create_clone_target(prefix: str) -> Path:
"""Return a writable, non-existent path for git clone destination."""
repo_tmp_root = Path(__file__).resolve().parents[2] / ".tmp" / "tests"
candidate_roots = (repo_tmp_root, Path(tempfile.gettempdir()))
last_error: OSError | None = None
for root in candidate_roots:
try:
root.mkdir(parents=True, exist_ok=True)
probe_file = root / f".{prefix}write-probe-{uuid.uuid4().hex}.tmp"
with probe_file.open("xb"):
pass
probe_file.unlink()
return root / f"{prefix}{uuid.uuid4().hex}"
except OSError as exc:
last_error = exc
if last_error is not None:
raise last_error
raise OSError("Unable to determine clone destination")
def configure_utf8_output() -> None:
"""Best-effort UTF-8 stdout/stderr on Windows without dropping diagnostics."""
for stream_name in ("stdout", "stderr"):
stream = getattr(sys, stream_name)
try:
stream.reconfigure(encoding="utf-8", errors="backslashreplace")
continue
except Exception:
pass
buffer = getattr(stream, "buffer", None)
if buffer is not None:
setattr(
sys,
stream_name,
io.TextIOWrapper(
buffer, encoding="utf-8", errors="backslashreplace"
),
)
def extract_skill_name(skill_md_path: Path) -> str | None:
"""Extract the 'name' field from SKILL.md YAML frontmatter."""
try:
content = skill_md_path.read_text(encoding="utf-8")
except Exception:
return None
fm_match = re.search(r"^---\s*\n(.*?)\n---", content, re.DOTALL)
if not fm_match:
return None
for line in fm_match.group(1).splitlines():
match = re.match(r"^name:\s*(.+)$", line)
if match:
value = match.group(1).strip().strip("\"'")
if value:
return value
return None
def inspect_repo():
"""Inspect the Microsoft skills repository structure."""
print("🔍 Inspecting Microsoft Skills Repository Structure")
print("=" * 60)
repo_path: Path | None = None
try:
repo_path = create_clone_target(prefix="ms-skills-")
print("\n1⃣ Cloning repository...")
try:
subprocess.run(
["git", "clone", "--depth", "1", MS_REPO, str(repo_path)],
check=True,
capture_output=True,
text=True,
)
except subprocess.CalledProcessError as exc:
print("\n❌ git clone failed.", file=sys.stderr)
if exc.stderr:
print(exc.stderr.strip(), file=sys.stderr)
raise
# Find all SKILL.md files
all_skill_mds = list(repo_path.rglob("SKILL.md"))
print(f"\n2⃣ Total SKILL.md files found: {len(all_skill_mds)}")
# Show flat name mapping
print(f"\n3⃣ Flat Name Mapping (frontmatter 'name' → directory name):")
print("-" * 60)
names_seen: dict[str, list[str]] = {}
for skill_md in sorted(all_skill_mds, key=lambda p: str(p)):
try:
rel = skill_md.parent.relative_to(repo_path)
except ValueError:
rel = skill_md.parent
name = extract_skill_name(skill_md)
display_name = name if name else f"(no name → ms-{'-'.join(rel.parts[1:])})"
print(f" {rel}{display_name}")
effective_name = name if name else f"ms-{'-'.join(rel.parts[1:])}"
if effective_name not in names_seen:
names_seen[effective_name] = []
names_seen[effective_name].append(str(rel))
# Collision check
collisions = {n: paths for n, paths in names_seen.items()
if len(paths) > 1}
if collisions:
print(f"\n4⃣ ⚠️ Name Collisions Detected ({len(collisions)}):")
for name, paths in collisions.items():
print(f" '{name}':")
for p in paths:
print(f" - {p}")
else:
print(
f"\n4⃣ ✅ No name collisions — all {len(names_seen)} names are unique!")
print("\n✨ Inspection complete!")
finally:
if repo_path is not None:
shutil.rmtree(repo_path, ignore_errors=True)
if __name__ == "__main__":
configure_utf8_output()
try:
inspect_repo()
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode or 1)
except Exception as e:
print(f"\n❌ Error: {e}", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env node
const { spawnSync } = require("child_process");
const NETWORK_TEST_ENV = "ENABLE_NETWORK_TESTS";
const ENABLED_VALUES = new Set(["1", "true", "yes", "on"]);
const LOCAL_TEST_COMMANDS = [
["scripts/tests/validate_skills_headings.test.js"],
["scripts/run-python.js", "scripts/tests/test_validate_skills_headings.py"],
];
const NETWORK_TEST_COMMANDS = [
["scripts/run-python.js", "scripts/tests/inspect_microsoft_repo.py"],
["scripts/run-python.js", "scripts/tests/test_comprehensive_coverage.py"],
];
function isNetworkTestsEnabled() {
const value = process.env[NETWORK_TEST_ENV];
if (!value) {
return false;
}
return ENABLED_VALUES.has(String(value).trim().toLowerCase());
}
function runNodeCommand(args) {
const result = spawnSync(process.execPath, args, { stdio: "inherit" });
if (result.error) {
throw result.error;
}
if (result.signal) {
process.kill(process.pid, result.signal);
}
if (typeof result.status !== "number") {
process.exit(1);
}
if (result.status !== 0) {
process.exit(result.status);
}
}
function runCommandSet(commands) {
for (const commandArgs of commands) {
runNodeCommand(commandArgs);
}
}
function main() {
const mode = process.argv[2];
if (mode === "--local") {
runCommandSet(LOCAL_TEST_COMMANDS);
return;
}
if (mode === "--network") {
runCommandSet(NETWORK_TEST_COMMANDS);
return;
}
runCommandSet(LOCAL_TEST_COMMANDS);
if (!isNetworkTestsEnabled()) {
console.log(
`[tests] Skipping network integration tests. Set ${NETWORK_TEST_ENV}=1 to enable.`,
);
return;
}
console.log(`[tests] ${NETWORK_TEST_ENV} enabled; running network integration tests.`);
runCommandSet(NETWORK_TEST_COMMANDS);
}
main();

View File

@@ -1,253 +0,0 @@
#!/usr/bin/env python3
"""
Test Script: Verify Microsoft Skills Sync Coverage and Flat Name Uniqueness
Ensures all skills are captured and no directory name collisions exist.
"""
import re
import io
import shutil
import subprocess
import sys
import tempfile
import traceback
import uuid
from pathlib import Path
from collections import defaultdict
MS_REPO = "https://github.com/microsoft/skills.git"
def create_clone_target(prefix: str) -> Path:
"""Return a writable, non-existent path for git clone destination."""
repo_tmp_root = Path(__file__).resolve().parents[2] / ".tmp" / "tests"
candidate_roots = (repo_tmp_root, Path(tempfile.gettempdir()))
last_error: OSError | None = None
for root in candidate_roots:
try:
root.mkdir(parents=True, exist_ok=True)
probe_file = root / f".{prefix}write-probe-{uuid.uuid4().hex}.tmp"
with probe_file.open("xb"):
pass
probe_file.unlink()
return root / f"{prefix}{uuid.uuid4().hex}"
except OSError as exc:
last_error = exc
if last_error is not None:
raise last_error
raise OSError("Unable to determine clone destination")
def configure_utf8_output() -> None:
"""Best-effort UTF-8 stdout/stderr on Windows without dropping diagnostics."""
for stream_name in ("stdout", "stderr"):
stream = getattr(sys, stream_name)
try:
stream.reconfigure(encoding="utf-8", errors="backslashreplace")
continue
except Exception:
pass
buffer = getattr(stream, "buffer", None)
if buffer is not None:
setattr(
sys,
stream_name,
io.TextIOWrapper(
buffer, encoding="utf-8", errors="backslashreplace"
),
)
def extract_skill_name(skill_md_path: Path) -> str | None:
"""Extract the 'name' field from SKILL.md YAML frontmatter."""
try:
content = skill_md_path.read_text(encoding="utf-8")
except Exception:
return None
fm_match = re.search(r"^---\s*\n(.*?)\n---", content, re.DOTALL)
if not fm_match:
return None
for line in fm_match.group(1).splitlines():
match = re.match(r"^name:\s*(.+)$", line)
if match:
value = match.group(1).strip().strip("\"'")
if value:
return value
return None
def analyze_skill_locations():
"""
Comprehensive analysis of all skill locations in Microsoft repo.
Verifies flat name uniqueness and coverage.
"""
print("🔬 Comprehensive Skill Coverage & Uniqueness Analysis")
print("=" * 60)
repo_path: Path | None = None
try:
repo_path = create_clone_target(prefix="ms-skills-")
print("\n1⃣ Cloning repository...")
try:
subprocess.run(
["git", "clone", "--depth", "1", MS_REPO, str(repo_path)],
check=True,
capture_output=True,
text=True,
)
except subprocess.CalledProcessError as exc:
print("\n❌ git clone failed.", file=sys.stderr)
if exc.stderr:
print(exc.stderr.strip(), file=sys.stderr)
raise
# Find ALL SKILL.md files
all_skill_files = list(repo_path.rglob("SKILL.md"))
print(f"\n2⃣ Total SKILL.md files found: {len(all_skill_files)}")
# Categorize by location
location_types = defaultdict(list)
for skill_file in all_skill_files:
path_str = skill_file.as_posix()
if ".github/skills/" in path_str:
location_types["github_skills"].append(skill_file)
elif ".github/plugins/" in path_str:
location_types["github_plugins"].append(skill_file)
elif "/skills/" in path_str:
location_types["skills_dir"].append(skill_file)
else:
location_types["other"].append(skill_file)
print("\n3⃣ Skills by Location Type:")
for loc_type, files in sorted(location_types.items()):
print(f" 📍 {loc_type}: {len(files)} skills")
# Flat name uniqueness check
print("\n4⃣ Flat Name Uniqueness Check:")
print("-" * 60)
name_map: dict[str, list[str]] = {}
missing_names = []
for skill_file in all_skill_files:
try:
rel = skill_file.parent.relative_to(repo_path)
except ValueError:
rel = skill_file.parent
name = extract_skill_name(skill_file)
if not name:
missing_names.append(str(rel))
# Generate fallback
parts = [p for p in rel.parts if p not in (
".github", "skills", "plugins")]
name = "ms-" + "-".join(parts) if parts else str(rel)
if name not in name_map:
name_map[name] = []
name_map[name].append(str(rel))
# Report results
collisions = {n: paths for n, paths in name_map.items()
if len(paths) > 1}
unique_names = {n: paths for n,
paths in name_map.items() if len(paths) == 1}
print(f"\n ✅ Unique names: {len(unique_names)}")
if missing_names:
print(
f"\n ⚠️ Skills missing frontmatter 'name' ({len(missing_names)}):")
for path in missing_names[:5]:
print(f" - {path}")
if len(missing_names) > 5:
print(f" ... and {len(missing_names) - 5} more")
if collisions:
print(f"\n ❌ Name collisions ({len(collisions)}):")
for name, paths in collisions.items():
print(f" '{name}':")
for p in paths:
print(f" - {p}")
else:
print(f"\n ✅ No collisions detected!")
# Validate all names are valid directory names
print("\n5⃣ Directory Name Validation:")
invalid_names = []
for name in name_map:
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9._-]*$", name):
invalid_names.append(name)
if invalid_names:
print(f" ❌ Invalid directory names ({len(invalid_names)}):")
for name in invalid_names[:5]:
print(f" - '{name}'")
else:
print(f" ✅ All {len(name_map)} names are valid directory names!")
# Summary
print("\n6⃣ Summary:")
print("-" * 60)
total = len(all_skill_files)
unique = len(unique_names) + len(collisions)
print(f" Total SKILL.md files: {total}")
print(f" Unique flat names: {len(unique_names)}")
print(f" Collisions: {len(collisions)}")
print(f" Missing names: {len(missing_names)}")
is_pass = len(collisions) == 0 and len(invalid_names) == 0
if is_pass:
print(f"\n ✅ ALL CHECKS PASSED")
else:
print(f"\n ⚠️ SOME CHECKS NEED ATTENTION")
print("\n✨ Analysis complete!")
return {
"total": total,
"unique": len(unique_names),
"collisions": len(collisions),
"missing_names": len(missing_names),
"invalid_names": len(invalid_names),
"passed": is_pass,
}
finally:
if repo_path is not None:
shutil.rmtree(repo_path, ignore_errors=True)
if __name__ == "__main__":
configure_utf8_output()
try:
results = analyze_skill_locations()
print("\n" + "=" * 60)
print("FINAL VERDICT")
print("=" * 60)
if results["passed"]:
print("\n✅ V4 FLAT STRUCTURE IS VALID")
print(" All names are unique and valid directory names!")
sys.exit(0)
else:
print("\n⚠️ V4 FLAT STRUCTURE NEEDS FIXES")
if results["collisions"] > 0:
print(f" {results['collisions']} name collisions to resolve")
if results["invalid_names"] > 0:
print(f" {results['invalid_names']} invalid directory names")
sys.exit(1)
except subprocess.CalledProcessError as exc:
sys.exit(exc.returncode or 1)
except Exception as e:
print(f"\n❌ Error: {e}", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)

View File

@@ -1,18 +0,0 @@
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from validate_skills import has_when_to_use_section
SAMPLES = [
("## When to Use", True),
("## Use this skill when", True),
("## When to Use This Skill", True),
("## Overview", False),
]
for heading, expected in SAMPLES:
content = f"\n{heading}\n- item\n"
assert has_when_to_use_section(content) is expected, heading
print("ok")

View File

@@ -1,48 +0,0 @@
const assert = require("assert");
const { hasUseSection } = require("../validate-skills");
const samples = [
["## When to Use", true],
["## Use this skill when", true],
["## When to Use This Skill", true],
["## Overview", false],
];
for (const [heading, expected] of samples) {
const content = `\n${heading}\n- item\n`;
assert.strictEqual(hasUseSection(content), expected, heading);
}
// Regression test for YAML validity in frontmatter (Issue #79)
// Logs skills with parse errors as warnings; does not fail (many legacy skills have multiline frontmatter).
const fs = require("fs");
const path = require("path");
const { listSkillIds, parseFrontmatter } = require("../../lib/skill-utils");
const SKILLS_DIR = path.join(__dirname, "../../skills");
const skillIds = listSkillIds(SKILLS_DIR);
console.log(`Checking YAML validity for ${skillIds.length} skills...`);
let warnCount = 0;
for (const skillId of skillIds) {
const skillPath = path.join(SKILLS_DIR, skillId, "SKILL.md");
const content = fs.readFileSync(skillPath, "utf8");
const { errors, hasFrontmatter } = parseFrontmatter(content);
if (!hasFrontmatter) {
console.warn(`[WARN] No frontmatter in ${skillId}`);
warnCount++;
continue;
}
if (errors.length > 0) {
console.warn(`[WARN] YAML parse errors in ${skillId}: ${errors.join(", ")}`);
warnCount++;
}
}
if (warnCount > 0) {
console.log(`ok (${warnCount} skills with frontmatter warnings; run validate_skills.py for schema checks)`);
} else {
console.log("ok");
}

View File

@@ -1,9 +1,16 @@
#!/usr/bin/env python3
import argparse
import io
import json
import os
import re
import sys
import urllib.error
import urllib.request
from datetime import datetime, timezone
GITHUB_REPO = "sickn33/antigravity-awesome-skills"
SYNC_COMMENT_RE = re.compile(r"<!-- registry-sync: .*? -->")
def configure_utf8_output() -> None:
@@ -28,63 +35,227 @@ def configure_utf8_output() -> None:
)
def update_readme():
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def find_repo_root(start_path: str) -> str:
current = os.path.abspath(start_path)
while True:
if os.path.isfile(os.path.join(current, "package.json")) and os.path.isfile(
os.path.join(current, "README.md")
):
return current
parent = os.path.dirname(current)
if parent == current:
raise FileNotFoundError("Could not locate repository root from script path.")
current = parent
def format_skill_count(total_skills: int) -> str:
return f"{total_skills:,}+"
def format_star_badge_count(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded}%2C000%2B"
return f"{stars}%2B"
def format_star_milestone(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded},000+"
return f"{stars}+"
def format_star_celebration(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded}k"
return str(stars)
def fetch_star_count(repo: str) -> int | None:
url = f"https://api.github.com/repos/{repo}"
request = urllib.request.Request(
url,
headers={
"Accept": "application/vnd.github+json",
"User-Agent": "antigravity-awesome-skills-readme-sync",
},
)
try:
with urllib.request.urlopen(request, timeout=10) as response:
payload = json.load(response)
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
return None
stars = payload.get("stargazers_count")
return int(stars) if isinstance(stars, int) else None
def load_metadata(base_dir: str, repo: str = GITHUB_REPO) -> dict:
readme_path = os.path.join(base_dir, "README.md")
package_path = os.path.join(base_dir, "package.json")
index_path = os.path.join(base_dir, "skills_index.json")
print(f"📖 Reading skills index from: {index_path}")
with open(index_path, "r", encoding="utf-8") as f:
skills = json.load(f)
with open(index_path, "r", encoding="utf-8") as file:
skills = json.load(file)
total_skills = len(skills)
print(f"🔢 Total skills found: {total_skills}")
with open(package_path, "r", encoding="utf-8") as file:
package = json.load(file)
print(f"📝 Updating README at: {readme_path}")
with open(readme_path, "r", encoding="utf-8") as f:
content = f.read()
with open(readme_path, "r", encoding="utf-8") as file:
current_readme = file.read()
# 1. Update Title Count
content = re.sub(
r"(# 🌌 Antigravity Awesome Skills: )\d+(\+ Agentic Skills)",
rf"\g<1>{total_skills}\g<2>",
content,
current_star_match = re.search(r"%20([\d%2C\+]+)%20Stars", current_readme)
current_stars = None
if current_star_match:
compact = current_star_match.group(1).replace("%2C", "").replace("%2B", "")
compact = compact.rstrip("+")
if compact.isdigit():
current_stars = int(compact)
live_stars = fetch_star_count(repo)
total_stars = live_stars if live_stars is not None else current_stars or 0
return {
"repo": repo,
"version": str(package.get("version", "0.0.0")),
"total_skills": len(skills),
"total_skills_label": format_skill_count(len(skills)),
"stars": total_stars,
"star_badge_count": format_star_badge_count(total_stars),
"star_milestone": format_star_milestone(total_stars),
"star_celebration": format_star_celebration(total_stars),
"updated_at": datetime.now(timezone.utc).replace(microsecond=0).isoformat(),
"used_live_star_count": live_stars is not None,
}
def apply_metadata(content: str, metadata: dict) -> str:
total_skills = metadata["total_skills"]
total_skills_label = metadata["total_skills_label"]
version = metadata["version"]
star_badge_count = metadata["star_badge_count"]
star_milestone = metadata["star_milestone"]
star_celebration = metadata["star_celebration"]
sync_comment = (
f"<!-- registry-sync: version={version}; skills={total_skills}; "
f"stars={metadata['stars']}; updated_at={metadata['updated_at']} -->"
)
# 2. Update Blockquote Count
content = re.sub(
r"(Collection of )\d+(\+ Universal)",
rf"\g<1>{total_skills}\g<2>",
r"^# 🌌 Antigravity Awesome Skills: .*?$",
(
f"# 🌌 Antigravity Awesome Skills: {total_skills_label} "
"Agentic Skills for Claude Code, Gemini CLI, Cursor, Copilot & More"
),
content,
count=1,
flags=re.MULTILINE,
)
# 3. Update Intro Text Count
content = re.sub(
r"(library of \*\*)\d+( high-performance agentic skills\*\*)",
rf"\g<1>{total_skills}\g<2>",
r"^> \*\*The Ultimate Collection of .*?\*\*$",
(
f"> **The Ultimate Collection of {total_skills_label} Universal Agentic "
"Skills for AI Coding Assistants — Claude Code, Gemini CLI, Codex CLI, "
"Antigravity IDE, GitHub Copilot, Cursor, OpenCode, AdaL**"
),
content,
count=1,
flags=re.MULTILINE,
)
# 4. Update Browse section header
content = re.sub(
r"## Browse \d+\+ Skills",
f"## Browse {total_skills}+ Skills",
r"https://img\.shields\.io/badge/⭐%20[\d%2C\+]+%20Stars-gold\?style=for-the-badge",
f"https://img.shields.io/badge/⭐%20{star_badge_count}%20Stars-gold?style=for-the-badge",
content,
count=1,
)
# 5. Update TOC link for Browse (anchor matches header-derived slug)
content = re.sub(
r"\[📚 Browse \d+\+ Skills\]\(#browse-\d+-skills\)",
f"[📚 Browse {total_skills}+ Skills](#browse-{total_skills}-skills)",
r"^\*\*Antigravity Awesome Skills\*\* is a curated, battle-tested library of \*\*.*?\*\* designed",
(
f"**Antigravity Awesome Skills** is a curated, battle-tested library of "
f"**{total_skills_label} high-performance agentic skills** designed"
),
content,
count=1,
flags=re.MULTILINE,
)
content = re.sub(
r"\[📚 Browse \d[\d,]*\+ Skills\]\(#browse-[^)]+\)",
f"[📚 Browse {total_skills_label} Skills](#browse-{total_skills}-skills)",
content,
count=1,
)
content = re.sub(
r"\*\*Welcome to the V[\d.]+ .*? Stars Celebration Release!\*\*",
f"**Welcome to the V{version} {star_celebration} Stars Celebration Release!**",
content,
count=1,
)
content = re.sub(
r"> \*\*🌟 .*? GitHub Stars Milestone!\*\*",
f"> **🌟 {star_milestone} GitHub Stars Milestone!**",
content,
count=1,
)
content = re.sub(
r"\*\*Antigravity Awesome Skills\*\* \(Release [\d.]+\) is a massive upgrade to your AI's capabilities, now featuring \*\*.*?\*\* skills",
(
f"**Antigravity Awesome Skills** (Release {version}) is a massive upgrade "
f"to your AI's capabilities, now featuring **{total_skills_label} skills**"
),
content,
count=1,
)
content = re.sub(
r"## Browse \d[\d,]*\+ Skills",
f"## Browse {total_skills_label} Skills",
content,
count=1,
)
content = re.sub(
r"<!-- registry-sync: .*? -->\n?",
"",
content,
count=1,
)
return f"{sync_comment}\n{content.lstrip()}"
with open(readme_path, "w", encoding="utf-8", newline="\n") as f:
f.write(content)
def update_readme(dry_run: bool = False) -> dict:
base_dir = find_repo_root(os.path.dirname(__file__))
readme_path = os.path.join(base_dir, "README.md")
metadata = load_metadata(base_dir)
print(f"📖 Reading README from: {readme_path}")
print(f"🔢 Total skills found: {metadata['total_skills']}")
print(f"🏷️ Version found: {metadata['version']}")
if metadata["used_live_star_count"]:
print(f"⭐ Live GitHub stars found: {metadata['stars']}")
else:
print(f"⭐ Using existing README star count: {metadata['stars']}")
with open(readme_path, "r", encoding="utf-8") as file:
content = file.read()
updated_content = apply_metadata(content, metadata)
if dry_run:
print("🧪 Dry run enabled; README.md not written.")
return metadata
with open(readme_path, "w", encoding="utf-8", newline="\n") as file:
file.write(updated_content)
print("✅ README.md updated successfully.")
return metadata
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Sync generated metadata into README.md.")
parser.add_argument("--dry-run", action="store_true", help="Compute metadata without writing files.")
return parser.parse_args()
if __name__ == "__main__":
configure_utf8_output()
update_readme()
args = parse_args()
update_readme(dry_run=args.dry_run)

View File

@@ -1,358 +0,0 @@
/**
* Legacy / alternative validator. For CI and PR checks, use scripts/validate_skills.py.
* Run: npm run validate (or npm run validate:strict)
*/
const fs = require("fs");
const path = require("path");
const { listSkillIds, parseFrontmatter } = require("../lib/skill-utils");
const ROOT = path.resolve(__dirname, "..");
const SKILLS_DIR = path.join(ROOT, "skills");
const BASELINE_PATH = path.join(ROOT, "validation-baseline.json");
const errors = [];
const warnings = [];
const missingUseSection = [];
const missingDoNotUseSection = [];
const missingInstructionsSection = [];
const longFiles = [];
const unknownFieldSkills = [];
const isStrict =
process.argv.includes("--strict") ||
process.env.STRICT === "1" ||
process.env.STRICT === "true";
const writeBaseline =
process.argv.includes("--write-baseline") ||
process.env.WRITE_BASELINE === "1" ||
process.env.WRITE_BASELINE === "true";
const NAME_PATTERN = /^[a-z0-9]+(?:-[a-z0-9]+)*$/;
const MAX_NAME_LENGTH = 64;
const MAX_DESCRIPTION_LENGTH = 1024;
const MAX_COMPATIBILITY_LENGTH = 500;
const MAX_SKILL_LINES = 500;
const ALLOWED_FIELDS = new Set([
"name",
"description",
"risk",
"source",
"license",
"compatibility",
"metadata",
"allowed-tools",
"package",
"date_added",
]);
const USE_SECTION_PATTERNS = [
/^##\s+When\s+to\s+Use/im,
/^##\s+Use\s+this\s+skill\s+when/im,
/^##\s+When\s+to\s+Use\s+This\s+Skill/im,
];
function hasUseSection(content) {
return USE_SECTION_PATTERNS.some((pattern) => pattern.test(content));
}
function isPlainObject(value) {
return value && typeof value === "object" && !Array.isArray(value);
}
function validateStringField(
fieldName,
value,
{ min = 1, max = Infinity } = {},
) {
if (typeof value !== "string") {
return `${fieldName} must be a string.`;
}
const trimmed = value.trim();
if (!trimmed) {
return `${fieldName} cannot be empty.`;
}
if (trimmed.length < min) {
return `${fieldName} must be at least ${min} characters.`;
}
if (trimmed.length > max) {
return `${fieldName} must be <= ${max} characters.`;
}
return null;
}
function addError(message) {
errors.push(message);
}
function addWarning(message) {
warnings.push(message);
}
function loadBaseline() {
if (!fs.existsSync(BASELINE_PATH)) {
return {
useSection: [],
doNotUseSection: [],
instructionsSection: [],
longFile: [],
};
}
try {
const parsed = JSON.parse(fs.readFileSync(BASELINE_PATH, "utf8"));
return {
useSection: Array.isArray(parsed.useSection) ? parsed.useSection : [],
doNotUseSection: Array.isArray(parsed.doNotUseSection)
? parsed.doNotUseSection
: [],
instructionsSection: Array.isArray(parsed.instructionsSection)
? parsed.instructionsSection
: [],
longFile: Array.isArray(parsed.longFile) ? parsed.longFile : [],
};
} catch (err) {
addWarning(
"Failed to parse validation-baseline.json; strict mode may fail.",
);
return {
useSection: [],
doNotUseSection: [],
instructionsSection: [],
longFile: [],
};
}
}
function addStrictSectionErrors(label, missing, baselineSet) {
if (!isStrict) return;
const strictMissing = missing.filter((skillId) => !baselineSet.has(skillId));
if (strictMissing.length) {
addError(
`Missing "${label}" section (strict): ${strictMissing.length} skills (examples: ${strictMissing.slice(0, 5).join(", ")})`,
);
}
}
function run() {
const skillIds = listSkillIds(SKILLS_DIR);
const baseline = loadBaseline();
const baselineUse = new Set(baseline.useSection || []);
const baselineDoNotUse = new Set(baseline.doNotUseSection || []);
const baselineInstructions = new Set(baseline.instructionsSection || []);
const baselineLongFile = new Set(baseline.longFile || []);
for (const skillId of skillIds) {
const skillPath = path.join(SKILLS_DIR, skillId, "SKILL.md");
if (!fs.existsSync(skillPath)) {
addError(`Missing SKILL.md: ${skillId}`);
continue;
}
const content = fs.readFileSync(skillPath, "utf8");
const {
data,
errors: fmErrors,
hasFrontmatter,
} = parseFrontmatter(content);
const lineCount = content.split(/\r?\n/).length;
if (!hasFrontmatter) {
addError(`Missing frontmatter: ${skillId}`);
}
if (fmErrors && fmErrors.length) {
fmErrors.forEach((error) =>
addError(`Frontmatter parse error (${skillId}): ${error}`),
);
}
if (!NAME_PATTERN.test(skillId)) {
addError(`Folder name must match ${NAME_PATTERN}: ${skillId}`);
}
if (data.name !== undefined) {
const nameError = validateStringField("name", data.name, {
min: 1,
max: MAX_NAME_LENGTH,
});
if (nameError) {
addError(`${nameError} (${skillId})`);
} else {
const nameValue = String(data.name).trim();
if (!NAME_PATTERN.test(nameValue)) {
addError(`name must match ${NAME_PATTERN}: ${skillId}`);
}
if (nameValue !== skillId) {
addError(`name must match folder name: ${skillId} -> ${nameValue}`);
}
}
}
const descError =
data.description === undefined
? "description is required."
: validateStringField("description", data.description, {
min: 1,
max: MAX_DESCRIPTION_LENGTH,
});
if (descError) {
addError(`${descError} (${skillId})`);
}
if (data.license !== undefined) {
const licenseError = validateStringField("license", data.license, {
min: 1,
max: 128,
});
if (licenseError) {
addError(`${licenseError} (${skillId})`);
}
}
if (data.compatibility !== undefined) {
const compatibilityError = validateStringField(
"compatibility",
data.compatibility,
{ min: 1, max: MAX_COMPATIBILITY_LENGTH },
);
if (compatibilityError) {
addError(`${compatibilityError} (${skillId})`);
}
}
if (data["allowed-tools"] !== undefined) {
if (typeof data["allowed-tools"] !== "string") {
addError(
`allowed-tools must be a space-delimited string. (${skillId})`,
);
} else if (!data["allowed-tools"].trim()) {
addError(`allowed-tools cannot be empty. (${skillId})`);
}
}
if (data.metadata !== undefined) {
if (!isPlainObject(data.metadata)) {
addError(`metadata must be a string map/object. (${skillId})`);
} else {
for (const [key, value] of Object.entries(data.metadata)) {
if (typeof value !== "string") {
addError(`metadata.${key} must be a string. (${skillId})`);
}
}
}
}
if (data && Object.keys(data).length) {
const unknownFields = Object.keys(data).filter(
(key) => !ALLOWED_FIELDS.has(key),
);
if (unknownFields.length) {
unknownFieldSkills.push(skillId);
addError(
`Unknown frontmatter fields (${skillId}): ${unknownFields.join(", ")}`,
);
}
}
if (lineCount > MAX_SKILL_LINES) {
longFiles.push(skillId);
}
if (!hasUseSection(content)) {
missingUseSection.push(skillId);
}
if (!content.includes("## Do not use")) {
missingDoNotUseSection.push(skillId);
}
if (!content.includes("## Instructions")) {
missingInstructionsSection.push(skillId);
}
}
if (missingUseSection.length) {
addWarning(
`Missing "Use this skill when" section: ${missingUseSection.length} skills (examples: ${missingUseSection.slice(0, 5).join(", ")})`,
);
}
if (missingDoNotUseSection.length) {
addWarning(
`Missing "Do not use" section: ${missingDoNotUseSection.length} skills (examples: ${missingDoNotUseSection.slice(0, 5).join(", ")})`,
);
}
if (missingInstructionsSection.length) {
addWarning(
`Missing "Instructions" section: ${missingInstructionsSection.length} skills (examples: ${missingInstructionsSection.slice(0, 5).join(", ")})`,
);
}
if (longFiles.length) {
addWarning(
`SKILL.md over ${MAX_SKILL_LINES} lines: ${longFiles.length} skills (examples: ${longFiles.slice(0, 5).join(", ")})`,
);
}
if (unknownFieldSkills.length) {
addWarning(
`Unknown frontmatter fields detected: ${unknownFieldSkills.length} skills (examples: ${unknownFieldSkills.slice(0, 5).join(", ")})`,
);
}
addStrictSectionErrors("Use this skill when", missingUseSection, baselineUse);
addStrictSectionErrors(
"Do not use",
missingDoNotUseSection,
baselineDoNotUse,
);
addStrictSectionErrors(
"Instructions",
missingInstructionsSection,
baselineInstructions,
);
addStrictSectionErrors(
`SKILL.md line count <= ${MAX_SKILL_LINES}`,
longFiles,
baselineLongFile,
);
if (writeBaseline) {
const baselineData = {
generatedAt: new Date().toISOString(),
useSection: [...missingUseSection].sort(),
doNotUseSection: [...missingDoNotUseSection].sort(),
instructionsSection: [...missingInstructionsSection].sort(),
longFile: [...longFiles].sort(),
};
fs.writeFileSync(BASELINE_PATH, JSON.stringify(baselineData, null, 2));
console.log(`Baseline written to ${BASELINE_PATH}`);
}
if (warnings.length) {
console.warn("Warnings:");
for (const warning of warnings) {
console.warn(`- ${warning}`);
}
}
if (errors.length) {
console.error("\nErrors:");
for (const error of errors) {
console.error(`- ${error}`);
}
process.exit(1);
}
console.log(`Validation passed for ${skillIds.length} skills.`);
}
if (require.main === module) {
run();
}
module.exports = {
hasUseSection,
run,
};

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env python3
"""
Validate cross-references in data/workflows.json and data/bundles.json.
- Every recommendedSkills slug in workflows must exist under skills/ (with SKILL.md).
- Every relatedBundles id in workflows must exist in bundles.json.
- Every skill slug in each bundle's skills list must exist under skills/.
Exits with 1 if any reference is broken.
"""
import json
import os
import re
import sys
def collect_skill_ids(skills_dir):
"""Return set of relative paths (skill ids) that have SKILL.md. Matches listSkillIdsRecursive behavior."""
ids = set()
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith(".")]
if "SKILL.md" in files:
rel = os.path.relpath(root, skills_dir)
ids.add(rel)
return ids
def main():
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_dir = os.path.join(base_dir, "skills")
data_dir = os.path.join(base_dir, "data")
workflows_path = os.path.join(data_dir, "workflows.json")
bundles_path = os.path.join(data_dir, "bundles.json")
if not os.path.exists(workflows_path):
print(f"Missing {workflows_path}")
sys.exit(1)
if not os.path.exists(bundles_path):
print(f"Missing {bundles_path}")
sys.exit(1)
skill_ids = collect_skill_ids(skills_dir)
with open(workflows_path, "r", encoding="utf-8") as f:
workflows_data = json.load(f)
with open(bundles_path, "r", encoding="utf-8") as f:
bundles_data = json.load(f)
bundle_ids = set(bundles_data.get("bundles", {}).keys())
errors = []
# Workflows: recommendedSkills and relatedBundles
for w in workflows_data.get("workflows", []):
w_id = w.get("id", "?")
for step in w.get("steps", []):
for slug in step.get("recommendedSkills", []):
if slug not in skill_ids:
errors.append(f"workflows.json workflow '{w_id}' recommends missing skill: {slug}")
for bid in w.get("relatedBundles", []):
if bid not in bundle_ids:
errors.append(f"workflows.json workflow '{w_id}' references missing bundle: {bid}")
# Bundles: every skill in each bundle
for bid, bundle in bundles_data.get("bundles", {}).items():
for slug in bundle.get("skills", []):
if slug not in skill_ids:
errors.append(f"bundles.json bundle '{bid}' lists missing skill: {slug}")
# BUNDLES.md: links like [text](../skills/slug/) must point to existing skill dirs
bundles_md_path = os.path.join(base_dir, "docs", "BUNDLES.md")
if os.path.exists(bundles_md_path):
with open(bundles_md_path, "r", encoding="utf-8") as f:
bundles_md = f.read()
for m in re.finditer(r"\]\(\.\./skills/([^)]+)/\)", bundles_md):
slug = m.group(1).rstrip("/")
if slug not in skill_ids:
errors.append(f"docs/BUNDLES.md links to missing skill: {slug}")
if errors:
for e in errors:
print(e)
print(f"\nTotal broken references: {len(errors)}")
sys.exit(1)
print("All workflow, bundle, and BUNDLES.md references are valid.")
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,205 +0,0 @@
import os
import re
import argparse
import sys
import io
def configure_utf8_output() -> None:
"""Best-effort UTF-8 stdout/stderr on Windows without dropping diagnostics."""
if sys.platform != "win32":
return
for stream_name in ("stdout", "stderr"):
stream = getattr(sys, stream_name)
try:
stream.reconfigure(encoding="utf-8", errors="backslashreplace")
continue
except Exception:
pass
buffer = getattr(stream, "buffer", None)
if buffer is not None:
setattr(
sys,
stream_name,
io.TextIOWrapper(buffer, encoding="utf-8", errors="backslashreplace"),
)
WHEN_TO_USE_PATTERNS = [
re.compile(r"^##\s+When\s+to\s+Use", re.MULTILINE | re.IGNORECASE),
re.compile(r"^##\s+Use\s+this\s+skill\s+when", re.MULTILINE | re.IGNORECASE),
re.compile(r"^##\s+When\s+to\s+Use\s+This\s+Skill", re.MULTILINE | re.IGNORECASE),
]
def has_when_to_use_section(content):
return any(pattern.search(content) for pattern in WHEN_TO_USE_PATTERNS)
import yaml
def parse_frontmatter(content, rel_path=None):
"""
Parse frontmatter using PyYAML for robustness.
Returns a dict of key-values and a list of error messages.
"""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None, ["Missing or malformed YAML frontmatter"]
fm_text = fm_match.group(1)
fm_errors = []
try:
metadata = yaml.safe_load(fm_text) or {}
# Identification of the specific regression issue for better reporting
if "description" in metadata:
desc = metadata["description"]
if not desc or (isinstance(desc, str) and not desc.strip()):
fm_errors.append("description field is empty or whitespace only.")
elif desc == "|":
fm_errors.append("description contains only the YAML block indicator '|', likely due to a parsing regression.")
return metadata, fm_errors
except yaml.YAMLError as e:
return None, [f"YAML Syntax Error: {e}"]
def validate_skills(skills_dir, strict_mode=False):
configure_utf8_output()
print(f"🔍 Validating skills in: {skills_dir}")
print(f"⚙️ Mode: {'STRICT (CI)' if strict_mode else 'Standard (Dev)'}")
errors = []
warnings = []
skill_count = 0
# Pre-compiled regex
security_disclaimer_pattern = re.compile(r"AUTHORIZED USE ONLY", re.IGNORECASE)
valid_risk_levels = ["none", "safe", "critical", "offensive", "unknown"]
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$') # YYYY-MM-DD format
for root, dirs, files in os.walk(skills_dir):
# Skip .disabled or hidden directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_count += 1
skill_path = os.path.join(root, "SKILL.md")
rel_path = os.path.relpath(skill_path, skills_dir)
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
errors.append(f"{rel_path}: Unreadable file - {str(e)}")
continue
# 1. Frontmatter Check
metadata, fm_errors = parse_frontmatter(content, rel_path)
if not metadata:
errors.append(f"{rel_path}: Missing or malformed YAML frontmatter")
continue # Cannot proceed without metadata
if fm_errors:
for fe in fm_errors:
errors.append(f"{rel_path}: YAML Structure Error - {fe}")
# 2. Metadata Schema Checks
if "name" not in metadata:
errors.append(f"{rel_path}: Missing 'name' in frontmatter")
elif metadata["name"] != os.path.basename(root):
errors.append(f"{rel_path}: Name '{metadata['name']}' does not match folder name '{os.path.basename(root)}'")
if "description" not in metadata or metadata["description"] is None:
errors.append(f"{rel_path}: Missing 'description' in frontmatter")
else:
# agentskills-ref checks for short descriptions
desc = metadata["description"]
if not isinstance(desc, str):
errors.append(f"{rel_path}: 'description' must be a string, got {type(desc).__name__}")
elif len(desc) > 300: # increased limit for multi-line support
errors.append(f"{rel_path}: Description is oversized ({len(desc)} chars). Must be concise.")
# Risk Validation (Quality Bar)
if "risk" not in metadata:
msg = f"⚠️ {rel_path}: Missing 'risk' label (defaulting to 'unknown')"
if strict_mode: errors.append(msg.replace("⚠️", ""))
else: warnings.append(msg)
elif metadata["risk"] not in valid_risk_levels:
errors.append(f"{rel_path}: Invalid risk level '{metadata['risk']}'. Must be one of {valid_risk_levels}")
# Source Validation
if "source" not in metadata:
msg = f"⚠️ {rel_path}: Missing 'source' attribution"
if strict_mode: errors.append(msg.replace("⚠️", ""))
else: warnings.append(msg)
# Date Added Validation (optional field)
if "date_added" in metadata:
if not date_pattern.match(metadata["date_added"]):
errors.append(f"{rel_path}: Invalid 'date_added' format. Must be YYYY-MM-DD (e.g., '2024-01-15'), got '{metadata['date_added']}'")
else:
msg = f" {rel_path}: Missing 'date_added' field (optional, but recommended)"
if strict_mode: warnings.append(msg)
# In normal mode, we just silently skip this
# 3. Content Checks (Triggers)
if not has_when_to_use_section(content):
msg = f"⚠️ {rel_path}: Missing '## When to Use' section"
if strict_mode: errors.append(msg.replace("⚠️", ""))
else: warnings.append(msg)
# 4. Security Guardrails
if metadata.get("risk") == "offensive":
if not security_disclaimer_pattern.search(content):
errors.append(f"🚨 {rel_path}: OFFENSIVE SKILL MISSING SECURITY DISCLAIMER! (Must contain 'AUTHORIZED USE ONLY')")
# 5. Dangling Links Validation
# Look for markdown links: [text](href)
links = re.findall(r'\[[^\]]*\]\(([^)]+)\)', content)
for link in links:
link_clean = link.split('#')[0].strip()
# Skip empty anchors, external links, and edge cases
if not link_clean or link_clean.startswith(('http://', 'https://', 'mailto:', '<', '>')):
continue
if os.path.isabs(link_clean):
continue
# Check if file exists relative to this skill file
target_path = os.path.normpath(os.path.join(root, link_clean))
if not os.path.exists(target_path):
errors.append(f"{rel_path}: Dangling link detected. Path '{link_clean}' (from '...({link})') does not exist locally.")
# Reporting
print(f"\n📊 Checked {skill_count} skills.")
if warnings:
print(f"\n⚠️ Found {len(warnings)} Warnings:")
for w in warnings:
print(w)
if errors:
print(f"\n❌ Found {len(errors)} Critical Errors:")
for e in errors:
print(e)
return False
if strict_mode and warnings:
print("\n❌ STRICT MODE: Failed due to warnings.")
return False
print("\n✨ All skills passed validation!")
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Validate Antigravity Skills")
parser.add_argument("--strict", action="store_true", help="Fail on warnings (for CI)")
args = parser.parse_args()
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
success = validate_skills(skills_path, strict_mode=args.strict)
if not success:
sys.exit(1)