Merge pull request #150 from zinzied/main - Enhanced User Experience

Features integrated:
- Stars Feature: Community-driven skill discovery with upvotes
- Auto-Update: Automatic skill updates via START_APP.bat (Git + PowerShell fallback)
- Interactive Prompt Builder: Context-aware prompt construction
- Date Tracking: Added date_added field to all skills
- Auto-Categorization: Smart category assignment based on keywords
- Enhanced UI: Risk level badges, date display, category stats

Conflicts resolved:
- START_APP.bat: Merged enhanced auto-update logic
- README.md: Kept v6.4.1 with new feature documentation
- Home.jsx: Combined fuzzy search + pagination + stars
- SkillDetail.jsx: Merged syntax highlighting + stars + date badges

All 950+ skills updated with date tracking and proper categorization.

Made-with: Cursor
This commit is contained in:
sck_0
2026-02-27 09:14:48 +01:00
1153 changed files with 278012 additions and 3038 deletions

View File

@@ -0,0 +1,275 @@
#!/usr/bin/env python3
"""
Auto-categorize skills based on their names and descriptions.
Removes "uncategorized" by intelligently assigning categories.
Usage:
python auto_categorize_skills.py
python auto_categorize_skills.py --dry-run (shows what would change)
"""
import os
import re
import json
import sys
import argparse
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# Category keywords mapping
CATEGORY_KEYWORDS = {
'web-development': [
'react', 'vue', 'angular', 'svelte', 'nextjs', 'gatsby', 'remix',
'html', 'css', 'javascript', 'typescript', 'frontend', 'web', 'tailwind',
'bootstrap', 'sass', 'less', 'webpack', 'vite', 'rollup', 'parcel',
'rest api', 'graphql', 'http', 'fetch', 'axios', 'cors',
'responsive', 'seo', 'accessibility', 'a11y', 'pwa', 'progressive',
'dom', 'jsx', 'tsx', 'component', 'router', 'routing'
],
'backend': [
'nodejs', 'node.js', 'express', 'fastapi', 'django', 'flask',
'spring', 'java', 'python', 'golang', 'rust', 'c#', 'csharp',
'dotnet', '.net', 'laravel', 'php', 'ruby', 'rails',
'server', 'backend', 'api', 'rest', 'graphql', 'database',
'sql', 'mongodb', 'postgres', 'mysql', 'redis', 'cache',
'authentication', 'auth', 'jwt', 'oauth', 'session',
'middleware', 'routing', 'controller', 'model'
],
'database': [
'database', 'sql', 'postgres', 'postgresql', 'mysql', 'mariadb',
'mongodb', 'nosql', 'firestore', 'dynamodb', 'cassandra',
'elasticsearch', 'redis', 'memcached', 'graphql', 'prisma',
'orm', 'query', 'migration', 'schema', 'index'
],
'ai-ml': [
'ai', 'artificial intelligence', 'machine learning', 'ml',
'deep learning', 'neural', 'tensorflow', 'pytorch', 'scikit',
'nlp', 'computer vision', 'cv', 'llm', 'gpt', 'bert',
'classification', 'regression', 'clustering', 'transformer',
'embedding', 'vector', 'embedding', 'training', 'model'
],
'devops': [
'devops', 'docker', 'kubernetes', 'k8s', 'ci/cd', 'git',
'github', 'gitlab', 'jenkins', 'gitlab-ci', 'github actions',
'aws', 'azure', 'gcp', 'terraform', 'ansible', 'vagrant',
'deploy', 'deployment', 'container', 'orchestration',
'monitoring', 'logging', 'prometheus', 'grafana'
],
'cloud': [
'aws', 'amazon', 'azure', 'gcp', 'google cloud', 'cloud',
'ec2', 's3', 'lambda', 'cloudformation', 'terraform',
'serverless', 'functions', 'storage', 'cdn', 'distributed'
],
'security': [
'security', 'encryption', 'cryptography', 'ssl', 'tls',
'hashing', 'bcrypt', 'jwt', 'oauth', 'authentication',
'authorization', 'firewall', 'penetration', 'audit',
'vulnerability', 'privacy', 'gdpr', 'compliance'
],
'testing': [
'test', 'testing', 'jest', 'mocha', 'jasmine', 'pytest',
'unittest', 'cypress', 'selenium', 'puppeteer', 'e2e',
'unit test', 'integration', 'coverage', 'ci/cd'
],
'mobile': [
'mobile', 'android', 'ios', 'react native', 'flutter',
'swift', 'kotlin', 'objective-c', 'app', 'native',
'cross-platform', 'expo', 'cordova', 'xamarin'
],
'game-development': [
'game', 'unity', 'unreal', 'godot', 'canvas', 'webgl',
'threejs', 'babylon', 'phaser', 'sprite', 'physics',
'collision', '2d', '3d', 'shader', 'rendering'
],
'data-science': [
'data', 'analytics', 'science', 'pandas', 'numpy', 'scipy',
'jupyter', 'notebook', 'visualization', 'matplotlib', 'plotly',
'statistics', 'correlation', 'regression', 'clustering'
],
'automation': [
'automation', 'scripting', 'selenium', 'puppeteer', 'robot',
'workflow', 'automation', 'scheduled', 'trigger', 'integration'
],
'content': [
'markdown', 'documentation', 'content', 'blog', 'writing',
'seo', 'meta', 'schema', 'og', 'twitter', 'description'
]
}
def categorize_skill(skill_name, description):
"""
Intelligently categorize a skill based on name and description.
Returns the best matching category or None if no match.
"""
combined_text = f"{skill_name} {description}".lower()
# Score each category based on keyword matches
scores = {}
for category, keywords in CATEGORY_KEYWORDS.items():
score = 0
for keyword in keywords:
# Prefer exact phrase matches with word boundaries
if re.search(r'\b' + re.escape(keyword) + r'\b', combined_text):
score += 2
elif keyword in combined_text:
score += 1
if score > 0:
scores[category] = score
# Return the category with highest score
if scores:
best_category = max(scores, key=scores.get)
return best_category
return None
def auto_categorize(skills_dir, dry_run=False):
"""Auto-categorize skills and update generate_index.py"""
skills = []
categorized_count = 0
already_categorized = 0
failed_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
skill_id = os.path.basename(root)
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
# Extract name and description from frontmatter
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
continue
fm_text = fm_match.group(1)
metadata = {}
for line in fm_text.split('\n'):
if ':' in line and not line.strip().startswith('#'):
key, val = line.split(':', 1)
metadata[key.strip()] = val.strip().strip('"').strip("'")
skill_name = metadata.get('name', skill_id)
description = metadata.get('description', '')
current_category = metadata.get('category', 'uncategorized')
# Skip if already has a meaningful category
if current_category and current_category != 'uncategorized':
already_categorized += 1
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'action': 'SKIP'
})
continue
# Try to auto-categorize
new_category = categorize_skill(skill_name, description)
if new_category:
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'new': new_category,
'action': 'UPDATE'
})
if not dry_run:
# Update the SKILL.md file - add or replace category
fm_start = content.find('---')
fm_end = content.find('---', fm_start + 3)
if fm_start >= 0 and fm_end > fm_start:
frontmatter = content[fm_start:fm_end+3]
body = content[fm_end+3:]
# Check if category exists in frontmatter
if 'category:' in frontmatter:
# Replace existing category
new_frontmatter = re.sub(
r'category:\s*\w+',
f'category: {new_category}',
frontmatter
)
else:
# Add category before the closing ---
new_frontmatter = frontmatter.replace(
'\n---',
f'\ncategory: {new_category}\n---'
)
new_content = new_frontmatter + body
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
categorized_count += 1
else:
skills.append({
'id': skill_id,
'name': skill_name,
'current': current_category,
'action': 'FAILED'
})
failed_count += 1
except Exception as e:
print(f"❌ Error processing {skill_id}: {str(e)}")
# Print report
print("\n" + "="*70)
print("AUTO-CATEGORIZATION REPORT")
print("="*70)
print(f"\n📊 Summary:")
print(f" ✅ Categorized: {categorized_count}")
print(f" ⏭️ Already categorized: {already_categorized}")
print(f" ❌ Failed to categorize: {failed_count}")
print(f" 📈 Total processed: {len(skills)}")
if categorized_count > 0:
print(f"\n📋 Sample changes:")
for skill in skills[:10]:
if skill['action'] == 'UPDATE':
print(f"{skill['id']}")
print(f" {skill['current']}{skill['new']}")
if dry_run:
print(f"\n🔍 DRY RUN MODE - No changes made")
else:
print(f"\n💾 Changes saved to SKILL.md files")
return categorized_count
def main():
parser = argparse.ArgumentParser(
description="Auto-categorize skills based on content",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python auto_categorize_skills.py --dry-run
python auto_categorize_skills.py
"""
)
parser.add_argument('--dry-run', action='store_true',
help='Show what would be changed without making changes')
args = parser.parse_args()
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
auto_categorize(skills_path, dry_run=args.dry_run)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env python3
"""
Update all skill dates from 2025 to 2026.
Fixes the year mismatch issue.
"""
import os
import re
import sys
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def update_dates(skills_dir):
"""Update all dates from 2025 to 2026"""
updated_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_path = os.path.join(root, "SKILL.md")
skill_id = os.path.basename(root)
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
# Replace 2025 with 2026 in date_added field
if 'date_added: "2025-' in content:
new_content = content.replace('date_added: "2025-', 'date_added: "2026-')
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"OK {skill_id}")
updated_count += 1
except Exception as e:
print(f"Error updating {skill_id}: {str(e)}")
print(f"\nUpdated {updated_count} skills to 2026")
return updated_count
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
skills_path = os.path.join(base_dir, "skills")
print("Updating all dates from 2025 to 2026...\n")
update_dates(skills_path)
print("\nDone! Run: python scripts/generate_index.py")

View File

@@ -1,9 +1,16 @@
import os
import json
import re
import sys
import yaml
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def parse_frontmatter(content):
"""
Parses YAML frontmatter, sanitizing unquoted values containing @.
@@ -55,11 +62,12 @@ def generate_index(skills_dir, output_file):
skill_info = {
"id": dir_name,
"path": os.path.relpath(root, os.path.dirname(skills_dir)),
"category": parent_dir if parent_dir != "skills" else "uncategorized",
"category": parent_dir if parent_dir != "skills" else None, # Will be overridden by frontmatter if present
"name": dir_name.replace("-", " ").title(),
"description": "",
"risk": "unknown",
"source": "unknown"
"source": "unknown",
"date_added": None
}
try:
@@ -72,11 +80,18 @@ def generate_index(skills_dir, output_file):
# Parse Metadata
metadata = parse_frontmatter(content)
# Merge Metadata
# Merge Metadata (frontmatter takes priority)
if "name" in metadata: skill_info["name"] = metadata["name"]
if "description" in metadata: skill_info["description"] = metadata["description"]
if "risk" in metadata: skill_info["risk"] = metadata["risk"]
if "source" in metadata: skill_info["source"] = metadata["source"]
if "date_added" in metadata: skill_info["date_added"] = metadata["date_added"]
# Category: prefer frontmatter, then folder structure, then default
if "category" in metadata:
skill_info["category"] = metadata["category"]
elif skill_info["category"] is None:
skill_info["category"] = "uncategorized"
# Fallback for description if missing in frontmatter (legacy support)
if not skill_info["description"]:

View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python3
"""
Generate a report of skills with their date_added metadata in JSON format.
Usage:
python generate_skills_report.py [--output report.json] [--sort date|name]
"""
import os
import re
import json
import sys
import argparse
from datetime import datetime
from pathlib import Path
def get_project_root():
"""Get the project root directory."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def parse_frontmatter(content):
"""Parse frontmatter from SKILL.md content."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None
fm_text = fm_match.group(1)
metadata = {}
for line in fm_text.split('\n'):
if ':' in line and not line.strip().startswith('#'):
key, val = line.split(':', 1)
metadata[key.strip()] = val.strip().strip('"').strip("'")
return metadata
def generate_skills_report(output_file=None, sort_by='date'):
"""Generate a report of all skills with their metadata."""
skills_dir = os.path.join(get_project_root(), 'skills')
skills_data = []
for root, dirs, files in os.walk(skills_dir):
# Skip hidden/disabled directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata = parse_frontmatter(content)
if metadata is None:
continue
skill_info = {
'id': metadata.get('id', skill_name),
'name': metadata.get('name', skill_name),
'description': metadata.get('description', ''),
'date_added': metadata.get('date_added', None),
'source': metadata.get('source', 'unknown'),
'risk': metadata.get('risk', 'unknown'),
'category': metadata.get('category', metadata.get('id', '').split('-')[0] if '-' in metadata.get('id', '') else 'other'),
}
skills_data.append(skill_info)
except Exception as e:
print(f"⚠️ Error reading {skill_path}: {str(e)}", file=sys.stderr)
# Sort data
if sort_by == 'date':
# Sort by date_added (newest first), then by name
skills_data.sort(key=lambda x: (x['date_added'] or '0000-00-00', x['name']), reverse=True)
elif sort_by == 'name':
skills_data.sort(key=lambda x: x['name'])
# Prepare report
report = {
'generated_at': datetime.now().isoformat(),
'total_skills': len(skills_data),
'skills_with_dates': sum(1 for s in skills_data if s['date_added']),
'skills_without_dates': sum(1 for s in skills_data if not s['date_added']),
'coverage_percentage': round(
sum(1 for s in skills_data if s['date_added']) / len(skills_data) * 100 if skills_data else 0,
1
),
'sorted_by': sort_by,
'skills': skills_data
}
# Output
if output_file:
try:
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)
print(f"✅ Report saved to: {output_file}")
except Exception as e:
print(f"❌ Error saving report: {str(e)}")
return None
else:
# Print to stdout
print(json.dumps(report, indent=2, ensure_ascii=False))
return report
def main():
parser = argparse.ArgumentParser(
description="Generate a skills report with date_added metadata",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python generate_skills_report.py
python generate_skills_report.py --output skills_report.json
python generate_skills_report.py --sort name --output sorted_skills.json
"""
)
parser.add_argument('--output', '-o', help='Output file (JSON). If not specified, prints to stdout')
parser.add_argument('--sort', choices=['date', 'name'], default='date', help='Sort order (default: date)')
args = parser.parse_args()
generate_skills_report(output_file=args.output, sort_by=args.sort)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,306 @@
#!/usr/bin/env python3
"""
Manage skill date_added metadata.
Usage:
python manage_skill_dates.py list # List all skills with their dates
python manage_skill_dates.py add-missing [--date YYYY-MM-DD] # Add dates to skills without them
python manage_skill_dates.py add-all [--date YYYY-MM-DD] # Add/update dates for all skills
python manage_skill_dates.py update <skill-id> YYYY-MM-DD # Update a specific skill's date
"""
import os
import re
import sys
import argparse
from datetime import datetime
from pathlib import Path
# Ensure UTF-8 output for Windows compatibility
if sys.platform == 'win32':
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def get_project_root():
"""Get the project root directory."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def parse_frontmatter(content):
"""Parse frontmatter from SKILL.md content."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None, content
fm_text = fm_match.group(1)
metadata = {}
for line in fm_text.split('\n'):
if ':' in line and not line.strip().startswith('#'):
key, val = line.split(':', 1)
metadata[key.strip()] = val.strip().strip('"').strip("'")
return metadata, content
def reconstruct_frontmatter(metadata):
"""Reconstruct frontmatter from metadata dict."""
lines = ["---"]
# Order: id, name, description, category, risk, source, tags, date_added
priority_keys = ['id', 'name', 'description', 'category', 'risk', 'source', 'tags']
for key in priority_keys:
if key in metadata:
val = metadata[key]
if isinstance(val, list):
# Handle list fields like tags
lines.append(f'{key}: {val}')
elif ' ' in str(val) or any(c in str(val) for c in ':#"'):
lines.append(f'{key}: "{val}"')
else:
lines.append(f'{key}: {val}')
# Add date_added at the end
if 'date_added' in metadata:
lines.append(f'date_added: "{metadata["date_added"]}"')
lines.append("---")
return '\n'.join(lines)
def update_skill_frontmatter(skill_path, metadata):
"""Update a skill's frontmatter with new metadata."""
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
old_metadata, body_content = parse_frontmatter(content)
if old_metadata is None:
print(f"{skill_path}: Could not parse frontmatter")
return False
# Merge metadata
old_metadata.update(metadata)
# Reconstruct content
new_frontmatter = reconstruct_frontmatter(old_metadata)
# Find where the frontmatter ends in the original content
fm_end = content.find('---', 3) # Skip first ---
if fm_end == -1:
print(f"{skill_path}: Could not locate frontmatter boundary")
return False
body_start = fm_end + 3
body = content[body_start:]
new_content = new_frontmatter + body
with open(skill_path, 'w', encoding='utf-8') as f:
f.write(new_content)
return True
except Exception as e:
print(f"❌ Error updating {skill_path}: {str(e)}")
return False
def list_skills():
"""List all skills with their date_added values."""
skills_dir = os.path.join(get_project_root(), 'skills')
skills_with_dates = []
skills_without_dates = []
for root, dirs, files in os.walk(skills_dir):
# Skip hidden/disabled directories
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata, _ = parse_frontmatter(content)
if metadata is None:
continue
date_added = metadata.get('date_added', 'N/A')
if date_added == 'N/A':
skills_without_dates.append(skill_name)
else:
skills_with_dates.append((skill_name, date_added))
except Exception as e:
print(f"⚠️ Error reading {skill_path}: {str(e)}", file=sys.stderr)
# Sort by date
skills_with_dates.sort(key=lambda x: x[1], reverse=True)
print(f"\n📅 Skills with Date Added ({len(skills_with_dates)}):")
print("=" * 60)
if skills_with_dates:
for skill_name, date in skills_with_dates:
print(f" {date}{skill_name}")
else:
print(" (none)")
print(f"\n⏳ Skills without Date Added ({len(skills_without_dates)}):")
print("=" * 60)
if skills_without_dates:
for skill_name in sorted(skills_without_dates):
print(f" {skill_name}")
else:
print(" (none)")
total = len(skills_with_dates) + len(skills_without_dates)
percentage = (len(skills_with_dates) / total * 100) if total > 0 else 0
print(f"\n📊 Coverage: {len(skills_with_dates)}/{total} ({percentage:.1f}%)")
def add_missing_dates(date_str=None):
"""Add date_added to skills that don't have it."""
if date_str is None:
date_str = datetime.now().strftime('%Y-%m-%d')
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
updated_count = 0
skipped_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
with open(skill_path, 'r', encoding='utf-8') as f:
content = f.read()
metadata, _ = parse_frontmatter(content)
if metadata is None:
print(f"⚠️ {skill_name}: Could not parse frontmatter, skipping")
continue
if 'date_added' not in metadata:
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Added date_added: {date_str}")
updated_count += 1
else:
print(f"{skill_name}: Failed to update")
else:
skipped_count += 1
except Exception as e:
print(f"❌ Error processing {skill_name}: {str(e)}")
print(f"\n✨ Updated {updated_count} skills, skipped {skipped_count} that already had dates")
return True
def add_all_dates(date_str=None):
"""Add/update date_added for all skills."""
if date_str is None:
date_str = datetime.now().strftime('%Y-%m-%d')
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
updated_count = 0
for root, dirs, files in os.walk(skills_dir):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if "SKILL.md" in files:
skill_name = os.path.basename(root)
skill_path = os.path.join(root, "SKILL.md")
try:
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Set date_added: {date_str}")
updated_count += 1
else:
print(f"{skill_name}: Failed to update")
except Exception as e:
print(f"❌ Error processing {skill_name}: {str(e)}")
print(f"\n✨ Updated {updated_count} skills")
return True
def update_skill_date(skill_name, date_str):
"""Update a specific skill's date_added."""
# Validate date format
if not re.match(r'^\d{4}-\d{2}-\d{2}$', date_str):
print(f"❌ Invalid date format: {date_str}. Use YYYY-MM-DD.")
return False
skills_dir = os.path.join(get_project_root(), 'skills')
skill_path = os.path.join(skills_dir, skill_name, 'SKILL.md')
if not os.path.exists(skill_path):
print(f"❌ Skill not found: {skill_name}")
return False
if update_skill_frontmatter(skill_path, {'date_added': date_str}):
print(f"{skill_name}: Updated date_added to {date_str}")
return True
else:
print(f"{skill_name}: Failed to update")
return False
def main():
parser = argparse.ArgumentParser(
description="Manage skill date_added metadata",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python manage_skill_dates.py list
python manage_skill_dates.py add-missing
python manage_skill_dates.py add-missing --date 2024-01-15
python manage_skill_dates.py add-all --date 2025-01-01
python manage_skill_dates.py update my-skill-name 2024-06-01
"""
)
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# list command
subparsers.add_parser('list', help='List all skills with their date_added values')
# add-missing command
add_missing_parser = subparsers.add_parser('add-missing', help='Add date_added to skills without it')
add_missing_parser.add_argument('--date', help='Date to use (YYYY-MM-DD), defaults to today')
# add-all command
add_all_parser = subparsers.add_parser('add-all', help='Add/update date_added for all skills')
add_all_parser.add_argument('--date', help='Date to use (YYYY-MM-DD), defaults to today')
# update command
update_parser = subparsers.add_parser('update', help='Update a specific skill date')
update_parser.add_argument('skill_name', help='Name of the skill')
update_parser.add_argument('date', help='Date to set (YYYY-MM-DD)')
args = parser.parse_args()
if not args.command:
parser.print_help()
return
if args.command == 'list':
list_skills()
elif args.command == 'add-missing':
add_missing_dates(args.date)
elif args.command == 'add-all':
add_all_dates(args.date)
elif args.command == 'update':
update_skill_date(args.skill_name, args.date)
if __name__ == '__main__':
main()

View File

@@ -41,6 +41,7 @@ const ALLOWED_FIELDS = new Set([
"metadata",
"allowed-tools",
"package",
"date_added",
]);
const USE_SECTION_PATTERNS = [

View File

@@ -56,6 +56,7 @@ def validate_skills(skills_dir, strict_mode=False):
security_disclaimer_pattern = re.compile(r"AUTHORIZED USE ONLY", re.IGNORECASE)
valid_risk_levels = ["none", "safe", "critical", "offensive", "unknown"]
date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$') # YYYY-MM-DD format
for root, dirs, files in os.walk(skills_dir):
# Skip .disabled or hidden directories
@@ -110,6 +111,15 @@ def validate_skills(skills_dir, strict_mode=False):
if strict_mode: errors.append(msg.replace("⚠️", ""))
else: warnings.append(msg)
# Date Added Validation (optional field)
if "date_added" in metadata:
if not date_pattern.match(metadata["date_added"]):
errors.append(f"{rel_path}: Invalid 'date_added' format. Must be YYYY-MM-DD (e.g., '2024-01-15'), got '{metadata['date_added']}'")
else:
msg = f" {rel_path}: Missing 'date_added' field (optional, but recommended)"
if strict_mode: warnings.append(msg)
# In normal mode, we just silently skip this
# 3. Content Checks (Triggers)
if not has_when_to_use_section(content):
msg = f"⚠️ {rel_path}: Missing '## When to Use' section"