feat(multi-llm): Phase 1 - Foundation adaptor architecture
Implement base adaptor pattern for multi-LLM support (Issue #179) **Architecture:** - Created adaptors/ package with base SkillAdaptor class - Implemented factory pattern with get_adaptor() registry - Refactored Claude-specific code into ClaudeAdaptor **Changes:** - New: src/skill_seekers/cli/adaptors/base.py (SkillAdaptor + SkillMetadata) - New: src/skill_seekers/cli/adaptors/__init__.py (registry + factory) - New: src/skill_seekers/cli/adaptors/claude.py (refactored upload + enhance logic) - Modified: package_skill.py (added --target flag, uses adaptor.package()) - Modified: upload_skill.py (added --target flag, uses adaptor.upload()) - Modified: enhance_skill.py (added --target flag, uses adaptor.enhance()) **Tests:** - New: tests/test_adaptors/test_base.py (10 tests passing) - All existing tests still pass (backward compatible) **Backward Compatibility:** - Default --target=claude maintains existing behavior - All CLI tools work exactly as before without --target flag - No breaking changes **Next:** Phase 2 - Implement Gemini, OpenAI, Markdown adaptors
This commit is contained in:
124
src/skill_seekers/cli/adaptors/__init__.py
Normal file
124
src/skill_seekers/cli/adaptors/__init__.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Multi-LLM Adaptor Registry
|
||||||
|
|
||||||
|
Provides factory function to get platform-specific adaptors for skill generation.
|
||||||
|
Supports Claude AI, Google Gemini, OpenAI ChatGPT, and generic Markdown export.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Type
|
||||||
|
|
||||||
|
from .base import SkillAdaptor, SkillMetadata
|
||||||
|
|
||||||
|
# Import adaptors (some may not be implemented yet)
|
||||||
|
try:
|
||||||
|
from .claude import ClaudeAdaptor
|
||||||
|
except ImportError:
|
||||||
|
ClaudeAdaptor = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .gemini import GeminiAdaptor
|
||||||
|
except ImportError:
|
||||||
|
GeminiAdaptor = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .openai import OpenAIAdaptor
|
||||||
|
except ImportError:
|
||||||
|
OpenAIAdaptor = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .markdown import MarkdownAdaptor
|
||||||
|
except ImportError:
|
||||||
|
MarkdownAdaptor = None
|
||||||
|
|
||||||
|
|
||||||
|
# Registry of available adaptors
|
||||||
|
ADAPTORS: Dict[str, Type[SkillAdaptor]] = {}
|
||||||
|
|
||||||
|
# Register adaptors that are implemented
|
||||||
|
if ClaudeAdaptor:
|
||||||
|
ADAPTORS['claude'] = ClaudeAdaptor
|
||||||
|
if GeminiAdaptor:
|
||||||
|
ADAPTORS['gemini'] = GeminiAdaptor
|
||||||
|
if OpenAIAdaptor:
|
||||||
|
ADAPTORS['openai'] = OpenAIAdaptor
|
||||||
|
if MarkdownAdaptor:
|
||||||
|
ADAPTORS['markdown'] = MarkdownAdaptor
|
||||||
|
|
||||||
|
|
||||||
|
def get_adaptor(platform: str, config: dict = None) -> SkillAdaptor:
|
||||||
|
"""
|
||||||
|
Factory function to get platform-specific adaptor instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
platform: Platform identifier ('claude', 'gemini', 'openai', 'markdown')
|
||||||
|
config: Optional platform-specific configuration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SkillAdaptor instance for the specified platform
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If platform is not supported or not yet implemented
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> adaptor = get_adaptor('claude')
|
||||||
|
>>> adaptor = get_adaptor('gemini', {'api_version': 'v1beta'})
|
||||||
|
"""
|
||||||
|
if platform not in ADAPTORS:
|
||||||
|
available = ', '.join(ADAPTORS.keys())
|
||||||
|
if not ADAPTORS:
|
||||||
|
raise ValueError(
|
||||||
|
f"No adaptors are currently implemented. "
|
||||||
|
f"Platform '{platform}' is not available."
|
||||||
|
)
|
||||||
|
raise ValueError(
|
||||||
|
f"Platform '{platform}' is not supported or not yet implemented. "
|
||||||
|
f"Available platforms: {available}"
|
||||||
|
)
|
||||||
|
|
||||||
|
adaptor_class = ADAPTORS[platform]
|
||||||
|
return adaptor_class(config)
|
||||||
|
|
||||||
|
|
||||||
|
def list_platforms() -> list[str]:
|
||||||
|
"""
|
||||||
|
List all supported platforms.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of platform identifiers
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> list_platforms()
|
||||||
|
['claude', 'gemini', 'openai', 'markdown']
|
||||||
|
"""
|
||||||
|
return list(ADAPTORS.keys())
|
||||||
|
|
||||||
|
|
||||||
|
def is_platform_available(platform: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a platform adaptor is available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
platform: Platform identifier to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if platform is available
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> is_platform_available('claude')
|
||||||
|
True
|
||||||
|
>>> is_platform_available('unknown')
|
||||||
|
False
|
||||||
|
"""
|
||||||
|
return platform in ADAPTORS
|
||||||
|
|
||||||
|
|
||||||
|
# Export public interface
|
||||||
|
__all__ = [
|
||||||
|
'SkillAdaptor',
|
||||||
|
'SkillMetadata',
|
||||||
|
'get_adaptor',
|
||||||
|
'list_platforms',
|
||||||
|
'is_platform_available',
|
||||||
|
'ADAPTORS',
|
||||||
|
]
|
||||||
220
src/skill_seekers/cli/adaptors/base.py
Normal file
220
src/skill_seekers/cli/adaptors/base.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Base Adaptor for Multi-LLM Support
|
||||||
|
|
||||||
|
Defines the abstract interface that all platform-specific adaptors must implement.
|
||||||
|
This enables Skill Seekers to generate skills for multiple LLM platforms (Claude, Gemini, ChatGPT).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Any, Optional
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SkillMetadata:
|
||||||
|
"""Universal skill metadata used across all platforms"""
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
version: str = "1.0.0"
|
||||||
|
author: Optional[str] = None
|
||||||
|
tags: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class SkillAdaptor(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for platform-specific skill adaptors.
|
||||||
|
|
||||||
|
Each platform (Claude, Gemini, OpenAI) implements this interface to handle:
|
||||||
|
- Platform-specific SKILL.md formatting
|
||||||
|
- Platform-specific package structure (ZIP, tar.gz, etc.)
|
||||||
|
- Platform-specific upload endpoints and authentication
|
||||||
|
- Optional AI enhancement capabilities
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Platform identifiers (override in subclasses)
|
||||||
|
PLATFORM: str = "unknown" # e.g., "claude", "gemini", "openai"
|
||||||
|
PLATFORM_NAME: str = "Unknown" # e.g., "Claude AI (Anthropic)"
|
||||||
|
DEFAULT_API_ENDPOINT: Optional[str] = None
|
||||||
|
|
||||||
|
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
||||||
|
"""
|
||||||
|
Initialize adaptor with optional configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Platform-specific configuration options
|
||||||
|
"""
|
||||||
|
self.config = config or {}
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def format_skill_md(self, skill_dir: Path, metadata: SkillMetadata) -> str:
|
||||||
|
"""
|
||||||
|
Format SKILL.md content with platform-specific frontmatter/structure.
|
||||||
|
|
||||||
|
Different platforms require different formats:
|
||||||
|
- Claude: YAML frontmatter + markdown
|
||||||
|
- Gemini: Plain markdown (no frontmatter)
|
||||||
|
- OpenAI: Assistant instructions format
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory containing references/
|
||||||
|
metadata: Skill metadata (name, description, version, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted SKILL.md content as string
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def package(self, skill_dir: Path, output_path: Path) -> Path:
|
||||||
|
"""
|
||||||
|
Package skill for platform (ZIP, tar.gz, etc.).
|
||||||
|
|
||||||
|
Different platforms require different package formats:
|
||||||
|
- Claude: .zip with SKILL.md, references/, scripts/, assets/
|
||||||
|
- Gemini: .tar.gz with system_instructions.md, references/
|
||||||
|
- OpenAI: .zip with assistant_instructions.txt, vector_store_files/
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory to package
|
||||||
|
output_path: Path for output package (file or directory)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to created package file
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def upload(self, package_path: Path, api_key: str, **kwargs) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Upload packaged skill to platform.
|
||||||
|
|
||||||
|
Returns a standardized response dictionary for all platforms.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to packaged skill file
|
||||||
|
api_key: Platform API key
|
||||||
|
**kwargs: Additional platform-specific arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with keys:
|
||||||
|
- success (bool): Whether upload succeeded
|
||||||
|
- skill_id (str|None): Platform-specific skill/assistant ID
|
||||||
|
- url (str|None): URL to view/manage skill
|
||||||
|
- message (str): Success or error message
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def validate_api_key(self, api_key: str) -> bool:
|
||||||
|
"""
|
||||||
|
Validate API key format for this platform.
|
||||||
|
|
||||||
|
Default implementation just checks if key is non-empty.
|
||||||
|
Override for platform-specific validation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_key: API key to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if key format is valid
|
||||||
|
"""
|
||||||
|
return bool(api_key and api_key.strip())
|
||||||
|
|
||||||
|
def get_env_var_name(self) -> str:
|
||||||
|
"""
|
||||||
|
Get expected environment variable name for API key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Environment variable name (e.g., "ANTHROPIC_API_KEY", "GOOGLE_API_KEY")
|
||||||
|
"""
|
||||||
|
return f"{self.PLATFORM.upper()}_API_KEY"
|
||||||
|
|
||||||
|
def supports_enhancement(self) -> bool:
|
||||||
|
"""
|
||||||
|
Whether this platform supports AI-powered SKILL.md enhancement.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if platform can enhance skills
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def enhance(self, skill_dir: Path, api_key: str) -> bool:
|
||||||
|
"""
|
||||||
|
Optionally enhance SKILL.md using platform's AI.
|
||||||
|
|
||||||
|
Only called if supports_enhancement() returns True.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
api_key: Platform API key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if enhancement succeeded
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _read_existing_content(self, skill_dir: Path) -> str:
|
||||||
|
"""
|
||||||
|
Helper to read existing SKILL.md content (without frontmatter).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SKILL.md content without YAML frontmatter
|
||||||
|
"""
|
||||||
|
skill_md_path = skill_dir / "SKILL.md"
|
||||||
|
if not skill_md_path.exists():
|
||||||
|
return ""
|
||||||
|
|
||||||
|
content = skill_md_path.read_text(encoding='utf-8')
|
||||||
|
|
||||||
|
# Strip YAML frontmatter if present
|
||||||
|
if content.startswith('---'):
|
||||||
|
parts = content.split('---', 2)
|
||||||
|
if len(parts) >= 3:
|
||||||
|
return parts[2].strip()
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
def _extract_quick_reference(self, skill_dir: Path) -> str:
|
||||||
|
"""
|
||||||
|
Helper to extract quick reference section from references.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Quick reference content as markdown string
|
||||||
|
"""
|
||||||
|
index_path = skill_dir / "references" / "index.md"
|
||||||
|
if not index_path.exists():
|
||||||
|
return "See references/ directory for documentation."
|
||||||
|
|
||||||
|
# Read index and extract relevant sections
|
||||||
|
content = index_path.read_text(encoding='utf-8')
|
||||||
|
return content[:500] + "..." if len(content) > 500 else content
|
||||||
|
|
||||||
|
def _generate_toc(self, skill_dir: Path) -> str:
|
||||||
|
"""
|
||||||
|
Helper to generate table of contents from references.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Table of contents as markdown string
|
||||||
|
"""
|
||||||
|
refs_dir = skill_dir / "references"
|
||||||
|
if not refs_dir.exists():
|
||||||
|
return ""
|
||||||
|
|
||||||
|
toc_lines = []
|
||||||
|
for ref_file in sorted(refs_dir.glob("*.md")):
|
||||||
|
if ref_file.name == "index.md":
|
||||||
|
continue
|
||||||
|
title = ref_file.stem.replace('_', ' ').title()
|
||||||
|
toc_lines.append(f"- [{title}](references/{ref_file.name})")
|
||||||
|
|
||||||
|
return "\n".join(toc_lines)
|
||||||
501
src/skill_seekers/cli/adaptors/claude.py
Normal file
501
src/skill_seekers/cli/adaptors/claude.py
Normal file
@@ -0,0 +1,501 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Claude AI Adaptor
|
||||||
|
|
||||||
|
Implements platform-specific handling for Claude AI (Anthropic) skills.
|
||||||
|
Refactored from upload_skill.py and enhance_skill.py.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import zipfile
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
from .base import SkillAdaptor, SkillMetadata
|
||||||
|
|
||||||
|
|
||||||
|
class ClaudeAdaptor(SkillAdaptor):
|
||||||
|
"""
|
||||||
|
Claude AI platform adaptor.
|
||||||
|
|
||||||
|
Handles:
|
||||||
|
- YAML frontmatter format for SKILL.md
|
||||||
|
- ZIP packaging with standard Claude skill structure
|
||||||
|
- Upload to Anthropic Skills API
|
||||||
|
- AI enhancement using Claude API
|
||||||
|
"""
|
||||||
|
|
||||||
|
PLATFORM = "claude"
|
||||||
|
PLATFORM_NAME = "Claude AI (Anthropic)"
|
||||||
|
DEFAULT_API_ENDPOINT = "https://api.anthropic.com/v1/skills"
|
||||||
|
|
||||||
|
def format_skill_md(self, skill_dir: Path, metadata: SkillMetadata) -> str:
|
||||||
|
"""
|
||||||
|
Format SKILL.md with Claude's YAML frontmatter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
metadata: Skill metadata
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted SKILL.md content with YAML frontmatter
|
||||||
|
"""
|
||||||
|
# Read existing content (if any)
|
||||||
|
existing_content = self._read_existing_content(skill_dir)
|
||||||
|
|
||||||
|
# If existing content already has proper structure, use it
|
||||||
|
if existing_content and len(existing_content) > 100:
|
||||||
|
content_body = existing_content
|
||||||
|
else:
|
||||||
|
# Generate default content
|
||||||
|
content_body = f"""# {metadata.name.title()} Documentation Skill
|
||||||
|
|
||||||
|
{metadata.description}
|
||||||
|
|
||||||
|
## When to use this skill
|
||||||
|
|
||||||
|
Use this skill when the user asks about {metadata.name} documentation, including API references, tutorials, examples, and best practices.
|
||||||
|
|
||||||
|
## What's included
|
||||||
|
|
||||||
|
This skill contains comprehensive documentation organized into categorized reference files.
|
||||||
|
|
||||||
|
{self._generate_toc(skill_dir)}
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
{self._extract_quick_reference(skill_dir)}
|
||||||
|
|
||||||
|
## Navigation
|
||||||
|
|
||||||
|
See `references/index.md` for complete documentation structure.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Format with YAML frontmatter
|
||||||
|
return f"""---
|
||||||
|
name: {metadata.name}
|
||||||
|
description: {metadata.description}
|
||||||
|
version: {metadata.version}
|
||||||
|
---
|
||||||
|
|
||||||
|
{content_body}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def package(self, skill_dir: Path, output_path: Path) -> Path:
|
||||||
|
"""
|
||||||
|
Package skill into ZIP file for Claude.
|
||||||
|
|
||||||
|
Creates standard Claude skill structure:
|
||||||
|
- SKILL.md
|
||||||
|
- references/*.md
|
||||||
|
- scripts/ (optional)
|
||||||
|
- assets/ (optional)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
output_path: Output path/filename for ZIP
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to created ZIP file
|
||||||
|
"""
|
||||||
|
skill_dir = Path(skill_dir)
|
||||||
|
|
||||||
|
# Determine output filename
|
||||||
|
if output_path.is_dir() or str(output_path).endswith('/'):
|
||||||
|
output_path = Path(output_path) / f"{skill_dir.name}.zip"
|
||||||
|
elif not str(output_path).endswith('.zip'):
|
||||||
|
output_path = Path(str(output_path) + '.zip')
|
||||||
|
|
||||||
|
output_path = Path(output_path)
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Create ZIP file
|
||||||
|
with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zf:
|
||||||
|
# Add SKILL.md (required)
|
||||||
|
skill_md = skill_dir / "SKILL.md"
|
||||||
|
if skill_md.exists():
|
||||||
|
zf.write(skill_md, "SKILL.md")
|
||||||
|
|
||||||
|
# Add references directory (if exists)
|
||||||
|
refs_dir = skill_dir / "references"
|
||||||
|
if refs_dir.exists():
|
||||||
|
for ref_file in refs_dir.rglob("*"):
|
||||||
|
if ref_file.is_file() and not ref_file.name.startswith('.'):
|
||||||
|
arcname = ref_file.relative_to(skill_dir)
|
||||||
|
zf.write(ref_file, str(arcname))
|
||||||
|
|
||||||
|
# Add scripts directory (if exists)
|
||||||
|
scripts_dir = skill_dir / "scripts"
|
||||||
|
if scripts_dir.exists():
|
||||||
|
for script_file in scripts_dir.rglob("*"):
|
||||||
|
if script_file.is_file() and not script_file.name.startswith('.'):
|
||||||
|
arcname = script_file.relative_to(skill_dir)
|
||||||
|
zf.write(script_file, str(arcname))
|
||||||
|
|
||||||
|
# Add assets directory (if exists)
|
||||||
|
assets_dir = skill_dir / "assets"
|
||||||
|
if assets_dir.exists():
|
||||||
|
for asset_file in assets_dir.rglob("*"):
|
||||||
|
if asset_file.is_file() and not asset_file.name.startswith('.'):
|
||||||
|
arcname = asset_file.relative_to(skill_dir)
|
||||||
|
zf.write(asset_file, str(arcname))
|
||||||
|
|
||||||
|
return output_path
|
||||||
|
|
||||||
|
def upload(self, package_path: Path, api_key: str, **kwargs) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Upload skill ZIP to Anthropic Skills API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path: Path to skill ZIP file
|
||||||
|
api_key: Anthropic API key
|
||||||
|
**kwargs: Additional arguments (timeout, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with upload result
|
||||||
|
"""
|
||||||
|
# Check for requests library
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except ImportError:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': 'requests library not installed. Run: pip install requests'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate ZIP file
|
||||||
|
package_path = Path(package_path)
|
||||||
|
if not package_path.exists():
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': f'File not found: {package_path}'
|
||||||
|
}
|
||||||
|
|
||||||
|
if not package_path.suffix == '.zip':
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': f'Not a ZIP file: {package_path}'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prepare API request
|
||||||
|
api_url = self.DEFAULT_API_ENDPOINT
|
||||||
|
headers = {
|
||||||
|
"x-api-key": api_key,
|
||||||
|
"anthropic-version": "2023-06-01",
|
||||||
|
"anthropic-beta": "skills-2025-10-02"
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout = kwargs.get('timeout', 60)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Read ZIP file
|
||||||
|
with open(package_path, 'rb') as f:
|
||||||
|
zip_data = f.read()
|
||||||
|
|
||||||
|
# Upload skill
|
||||||
|
files = {
|
||||||
|
'files[]': (package_path.name, zip_data, 'application/zip')
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
api_url,
|
||||||
|
headers=headers,
|
||||||
|
files=files,
|
||||||
|
timeout=timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check response
|
||||||
|
if response.status_code == 200:
|
||||||
|
# Extract skill ID if available
|
||||||
|
try:
|
||||||
|
response_data = response.json()
|
||||||
|
skill_id = response_data.get('id')
|
||||||
|
except:
|
||||||
|
skill_id = None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': True,
|
||||||
|
'skill_id': skill_id,
|
||||||
|
'url': 'https://claude.ai/skills',
|
||||||
|
'message': 'Skill uploaded successfully to Claude AI'
|
||||||
|
}
|
||||||
|
|
||||||
|
elif response.status_code == 401:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': 'Authentication failed. Check your ANTHROPIC_API_KEY'
|
||||||
|
}
|
||||||
|
|
||||||
|
elif response.status_code == 400:
|
||||||
|
try:
|
||||||
|
error_msg = response.json().get('error', {}).get('message', 'Unknown error')
|
||||||
|
except:
|
||||||
|
error_msg = 'Invalid skill format'
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': f'Invalid skill format: {error_msg}'
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
error_msg = response.json().get('error', {}).get('message', 'Unknown error')
|
||||||
|
except:
|
||||||
|
error_msg = f'HTTP {response.status_code}'
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': f'Upload failed: {error_msg}'
|
||||||
|
}
|
||||||
|
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': 'Upload timed out. Try again or use manual upload'
|
||||||
|
}
|
||||||
|
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': 'Connection error. Check your internet connection'
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'skill_id': None,
|
||||||
|
'url': None,
|
||||||
|
'message': f'Unexpected error: {str(e)}'
|
||||||
|
}
|
||||||
|
|
||||||
|
def validate_api_key(self, api_key: str) -> bool:
|
||||||
|
"""
|
||||||
|
Validate Anthropic API key format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_key: API key to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if key starts with 'sk-ant-'
|
||||||
|
"""
|
||||||
|
return api_key.strip().startswith('sk-ant-')
|
||||||
|
|
||||||
|
def get_env_var_name(self) -> str:
|
||||||
|
"""
|
||||||
|
Get environment variable name for Anthropic API key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
'ANTHROPIC_API_KEY'
|
||||||
|
"""
|
||||||
|
return "ANTHROPIC_API_KEY"
|
||||||
|
|
||||||
|
def supports_enhancement(self) -> bool:
|
||||||
|
"""
|
||||||
|
Claude supports AI enhancement via Anthropic API.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
|
||||||
|
def enhance(self, skill_dir: Path, api_key: str) -> bool:
|
||||||
|
"""
|
||||||
|
Enhance SKILL.md using Claude API.
|
||||||
|
|
||||||
|
Reads reference files, sends them to Claude, and generates
|
||||||
|
an improved SKILL.md with real examples and better organization.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_dir: Path to skill directory
|
||||||
|
api_key: Anthropic API key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if enhancement succeeded
|
||||||
|
"""
|
||||||
|
# Check for anthropic library
|
||||||
|
try:
|
||||||
|
import anthropic
|
||||||
|
except ImportError:
|
||||||
|
print("❌ Error: anthropic package not installed")
|
||||||
|
print("Install with: pip install anthropic")
|
||||||
|
return False
|
||||||
|
|
||||||
|
skill_dir = Path(skill_dir)
|
||||||
|
references_dir = skill_dir / "references"
|
||||||
|
skill_md_path = skill_dir / "SKILL.md"
|
||||||
|
|
||||||
|
# Read reference files
|
||||||
|
print("📖 Reading reference documentation...")
|
||||||
|
references = self._read_reference_files(references_dir)
|
||||||
|
|
||||||
|
if not references:
|
||||||
|
print("❌ No reference files found to analyze")
|
||||||
|
return False
|
||||||
|
|
||||||
|
print(f" ✓ Read {len(references)} reference files")
|
||||||
|
total_size = sum(len(c) for c in references.values())
|
||||||
|
print(f" ✓ Total size: {total_size:,} characters\n")
|
||||||
|
|
||||||
|
# Read current SKILL.md
|
||||||
|
current_skill_md = None
|
||||||
|
if skill_md_path.exists():
|
||||||
|
current_skill_md = skill_md_path.read_text(encoding='utf-8')
|
||||||
|
print(f" ℹ Found existing SKILL.md ({len(current_skill_md)} chars)")
|
||||||
|
else:
|
||||||
|
print(f" ℹ No existing SKILL.md, will create new one")
|
||||||
|
|
||||||
|
# Build enhancement prompt
|
||||||
|
prompt = self._build_enhancement_prompt(
|
||||||
|
skill_dir.name,
|
||||||
|
references,
|
||||||
|
current_skill_md
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n🤖 Asking Claude to enhance SKILL.md...")
|
||||||
|
print(f" Input: {len(prompt):,} characters")
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = anthropic.Anthropic(api_key=api_key)
|
||||||
|
|
||||||
|
message = client.messages.create(
|
||||||
|
model="claude-sonnet-4-20250514",
|
||||||
|
max_tokens=4096,
|
||||||
|
temperature=0.3,
|
||||||
|
messages=[{
|
||||||
|
"role": "user",
|
||||||
|
"content": prompt
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
|
||||||
|
enhanced_content = message.content[0].text
|
||||||
|
print(f" ✓ Generated enhanced SKILL.md ({len(enhanced_content)} chars)\n")
|
||||||
|
|
||||||
|
# Backup original
|
||||||
|
if skill_md_path.exists():
|
||||||
|
backup_path = skill_md_path.with_suffix('.md.backup')
|
||||||
|
skill_md_path.rename(backup_path)
|
||||||
|
print(f" 💾 Backed up original to: {backup_path.name}")
|
||||||
|
|
||||||
|
# Save enhanced version
|
||||||
|
skill_md_path.write_text(enhanced_content, encoding='utf-8')
|
||||||
|
print(f" ✅ Saved enhanced SKILL.md")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Error calling Claude API: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _read_reference_files(self, references_dir: Path, max_chars: int = 200000) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Read reference markdown files from skill directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
references_dir: Path to references directory
|
||||||
|
max_chars: Maximum total characters to read
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping filename to content
|
||||||
|
"""
|
||||||
|
if not references_dir.exists():
|
||||||
|
return {}
|
||||||
|
|
||||||
|
references = {}
|
||||||
|
total_chars = 0
|
||||||
|
|
||||||
|
# Read all .md files
|
||||||
|
for ref_file in sorted(references_dir.glob("*.md")):
|
||||||
|
if total_chars >= max_chars:
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = ref_file.read_text(encoding='utf-8')
|
||||||
|
# Limit individual file size
|
||||||
|
if len(content) > 30000:
|
||||||
|
content = content[:30000] + "\n\n...(truncated)"
|
||||||
|
|
||||||
|
references[ref_file.name] = content
|
||||||
|
total_chars += len(content)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ⚠️ Could not read {ref_file.name}: {e}")
|
||||||
|
|
||||||
|
return references
|
||||||
|
|
||||||
|
def _build_enhancement_prompt(
|
||||||
|
self,
|
||||||
|
skill_name: str,
|
||||||
|
references: Dict[str, str],
|
||||||
|
current_skill_md: str = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Build Claude API prompt for enhancement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
skill_name: Name of the skill
|
||||||
|
references: Dictionary of reference content
|
||||||
|
current_skill_md: Existing SKILL.md content (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Enhancement prompt for Claude
|
||||||
|
"""
|
||||||
|
prompt = f"""You are enhancing a Claude skill's SKILL.md file. This skill is about: {skill_name}
|
||||||
|
|
||||||
|
I've scraped documentation and organized it into reference files. Your job is to create an EXCELLENT SKILL.md that will help Claude use this documentation effectively.
|
||||||
|
|
||||||
|
CURRENT SKILL.MD:
|
||||||
|
{'```markdown' if current_skill_md else '(none - create from scratch)'}
|
||||||
|
{current_skill_md or 'No existing SKILL.md'}
|
||||||
|
{'```' if current_skill_md else ''}
|
||||||
|
|
||||||
|
REFERENCE DOCUMENTATION:
|
||||||
|
"""
|
||||||
|
|
||||||
|
for filename, content in references.items():
|
||||||
|
prompt += f"\n\n## {filename}\n```markdown\n{content[:30000]}\n```\n"
|
||||||
|
|
||||||
|
prompt += """
|
||||||
|
|
||||||
|
YOUR TASK:
|
||||||
|
Create an enhanced SKILL.md that includes:
|
||||||
|
|
||||||
|
1. **Clear "When to Use This Skill" section** - Be specific about trigger conditions
|
||||||
|
2. **Excellent Quick Reference section** - Extract 5-10 of the BEST, most practical code examples from the reference docs
|
||||||
|
- Choose SHORT, clear examples that demonstrate common tasks
|
||||||
|
- Include both simple and intermediate examples
|
||||||
|
- Annotate examples with clear descriptions
|
||||||
|
- Use proper language tags (cpp, python, javascript, json, etc.)
|
||||||
|
3. **Detailed Reference Files description** - Explain what's in each reference file
|
||||||
|
4. **Practical "Working with This Skill" section** - Give users clear guidance on how to navigate the skill
|
||||||
|
5. **Key Concepts section** (if applicable) - Explain core concepts
|
||||||
|
6. **Keep the frontmatter** (---\nname: ...\n---) intact
|
||||||
|
|
||||||
|
IMPORTANT:
|
||||||
|
- Extract REAL examples from the reference docs, don't make them up
|
||||||
|
- Prioritize SHORT, clear examples (5-20 lines max)
|
||||||
|
- Make it actionable and practical
|
||||||
|
- Don't be too verbose - be concise but useful
|
||||||
|
- Maintain the markdown structure for Claude skills
|
||||||
|
- Keep code examples properly formatted with language tags
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
Return ONLY the complete SKILL.md content, starting with the frontmatter (---).
|
||||||
|
"""
|
||||||
|
|
||||||
|
return prompt
|
||||||
@@ -1,12 +1,18 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
SKILL.md Enhancement Script
|
SKILL.md Enhancement Script
|
||||||
Uses Claude API to improve SKILL.md by analyzing reference documentation.
|
Uses platform AI APIs to improve SKILL.md by analyzing reference documentation.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
skill-seekers enhance output/steam-inventory/
|
# Claude (default)
|
||||||
skill-seekers enhance output/react/
|
skill-seekers enhance output/react/
|
||||||
skill-seekers enhance output/godot/ --api-key YOUR_API_KEY
|
skill-seekers enhance output/react/ --api-key sk-ant-...
|
||||||
|
|
||||||
|
# Gemini
|
||||||
|
skill-seekers enhance output/react/ --target gemini --api-key AIzaSy...
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
skill-seekers enhance output/react/ --target openai --api-key sk-proj-...
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -195,18 +201,26 @@ Return ONLY the complete SKILL.md content, starting with the frontmatter (---).
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Enhance SKILL.md using Claude API',
|
description='Enhance SKILL.md using platform AI APIs',
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
epilog="""
|
epilog="""
|
||||||
Examples:
|
Examples:
|
||||||
# Using ANTHROPIC_API_KEY environment variable
|
# Claude (default)
|
||||||
export ANTHROPIC_API_KEY=sk-ant-...
|
export ANTHROPIC_API_KEY=sk-ant-...
|
||||||
skill-seekers enhance output/steam-inventory/
|
skill-seekers enhance output/react/
|
||||||
|
|
||||||
# Providing API key directly
|
# Gemini
|
||||||
|
export GOOGLE_API_KEY=AIzaSy...
|
||||||
|
skill-seekers enhance output/react/ --target gemini
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
export OPENAI_API_KEY=sk-proj-...
|
||||||
|
skill-seekers enhance output/react/ --target openai
|
||||||
|
|
||||||
|
# With explicit API key
|
||||||
skill-seekers enhance output/react/ --api-key sk-ant-...
|
skill-seekers enhance output/react/ --api-key sk-ant-...
|
||||||
|
|
||||||
# Show what would be done (dry run)
|
# Dry run
|
||||||
skill-seekers enhance output/godot/ --dry-run
|
skill-seekers enhance output/godot/ --dry-run
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
@@ -214,7 +228,11 @@ Examples:
|
|||||||
parser.add_argument('skill_dir', type=str,
|
parser.add_argument('skill_dir', type=str,
|
||||||
help='Path to skill directory (e.g., output/steam-inventory/)')
|
help='Path to skill directory (e.g., output/steam-inventory/)')
|
||||||
parser.add_argument('--api-key', type=str,
|
parser.add_argument('--api-key', type=str,
|
||||||
help='Anthropic API key (or set ANTHROPIC_API_KEY env var)')
|
help='Platform API key (or set environment variable)')
|
||||||
|
parser.add_argument('--target',
|
||||||
|
choices=['claude', 'gemini', 'openai'],
|
||||||
|
default='claude',
|
||||||
|
help='Target LLM platform (default: claude)')
|
||||||
parser.add_argument('--dry-run', action='store_true',
|
parser.add_argument('--dry-run', action='store_true',
|
||||||
help='Show what would be done without calling API')
|
help='Show what would be done without calling API')
|
||||||
|
|
||||||
@@ -249,18 +267,57 @@ Examples:
|
|||||||
print(f" skill-seekers enhance {skill_dir}")
|
print(f" skill-seekers enhance {skill_dir}")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Create enhancer and run
|
# Check if platform supports enhancement
|
||||||
try:
|
try:
|
||||||
enhancer = SkillEnhancer(skill_dir, api_key=args.api_key)
|
from skill_seekers.cli.adaptors import get_adaptor
|
||||||
success = enhancer.run()
|
|
||||||
|
adaptor = get_adaptor(args.target)
|
||||||
|
|
||||||
|
if not adaptor.supports_enhancement():
|
||||||
|
print(f"❌ Error: {adaptor.PLATFORM_NAME} does not support AI enhancement")
|
||||||
|
print(f"\nSupported platforms for enhancement:")
|
||||||
|
print(" - Claude AI (Anthropic)")
|
||||||
|
print(" - Google Gemini")
|
||||||
|
print(" - OpenAI ChatGPT")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Get API key
|
||||||
|
api_key = args.api_key
|
||||||
|
if not api_key:
|
||||||
|
api_key = os.environ.get(adaptor.get_env_var_name(), '').strip()
|
||||||
|
|
||||||
|
if not api_key:
|
||||||
|
print(f"❌ Error: {adaptor.get_env_var_name()} not set")
|
||||||
|
print(f"\nSet your API key for {adaptor.PLATFORM_NAME}:")
|
||||||
|
print(f" export {adaptor.get_env_var_name()}=...")
|
||||||
|
print("Or provide it directly:")
|
||||||
|
print(f" skill-seekers enhance {skill_dir} --target {args.target} --api-key ...")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Run enhancement using adaptor
|
||||||
|
print(f"\n{'='*60}")
|
||||||
|
print(f"ENHANCING SKILL: {skill_dir}")
|
||||||
|
print(f"Platform: {adaptor.PLATFORM_NAME}")
|
||||||
|
print(f"{'='*60}\n")
|
||||||
|
|
||||||
|
success = adaptor.enhance(Path(skill_dir), api_key)
|
||||||
|
|
||||||
|
if success:
|
||||||
|
print(f"\n✅ Enhancement complete!")
|
||||||
|
print(f"\nNext steps:")
|
||||||
|
print(f" 1. Review: {Path(skill_dir) / 'SKILL.md'}")
|
||||||
|
print(f" 2. If you don't like it, restore backup: {Path(skill_dir) / 'SKILL.md.backup'}")
|
||||||
|
print(f" 3. Package your skill:")
|
||||||
|
print(f" skill-seekers package {skill_dir}/ --target {args.target}")
|
||||||
|
|
||||||
sys.exit(0 if success else 1)
|
sys.exit(0 if success else 1)
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
print(f"❌ Error: {e}")
|
||||||
|
print("\nAdaptor system not available. Reinstall skill-seekers.")
|
||||||
|
sys.exit(1)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
print(f"❌ Error: {e}")
|
print(f"❌ Error: {e}")
|
||||||
print("\nSet your API key:")
|
|
||||||
print(" export ANTHROPIC_API_KEY=sk-ant-...")
|
|
||||||
print("Or provide it directly:")
|
|
||||||
print(f" skill-seekers enhance {skill_dir} --api-key sk-ant-...")
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"❌ Unexpected error: {e}")
|
print(f"❌ Unexpected error: {e}")
|
||||||
|
|||||||
@@ -36,17 +36,18 @@ except ImportError:
|
|||||||
from quality_checker import SkillQualityChecker, print_report
|
from quality_checker import SkillQualityChecker, print_report
|
||||||
|
|
||||||
|
|
||||||
def package_skill(skill_dir, open_folder_after=True, skip_quality_check=False):
|
def package_skill(skill_dir, open_folder_after=True, skip_quality_check=False, target='claude'):
|
||||||
"""
|
"""
|
||||||
Package a skill directory into a .zip file
|
Package a skill directory into platform-specific format
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
skill_dir: Path to skill directory
|
skill_dir: Path to skill directory
|
||||||
open_folder_after: Whether to open the output folder after packaging
|
open_folder_after: Whether to open the output folder after packaging
|
||||||
skip_quality_check: Skip quality checks before packaging
|
skip_quality_check: Skip quality checks before packaging
|
||||||
|
target: Target LLM platform ('claude', 'gemini', 'openai', 'markdown')
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple: (success, zip_path) where success is bool and zip_path is Path or None
|
tuple: (success, package_path) where success is bool and package_path is Path or None
|
||||||
"""
|
"""
|
||||||
skill_path = Path(skill_dir)
|
skill_path = Path(skill_dir)
|
||||||
|
|
||||||
@@ -80,40 +81,43 @@ def package_skill(skill_dir, open_folder_after=True, skip_quality_check=False):
|
|||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
# Create zip filename
|
# Get platform-specific adaptor
|
||||||
|
try:
|
||||||
|
from skill_seekers.cli.adaptors import get_adaptor
|
||||||
|
adaptor = get_adaptor(target)
|
||||||
|
except (ImportError, ValueError) as e:
|
||||||
|
print(f"❌ Error: {e}")
|
||||||
|
return False, None
|
||||||
|
|
||||||
|
# Create package using adaptor
|
||||||
skill_name = skill_path.name
|
skill_name = skill_path.name
|
||||||
zip_path = skill_path.parent / f"{skill_name}.zip"
|
output_dir = skill_path.parent
|
||||||
|
|
||||||
print(f"📦 Packaging skill: {skill_name}")
|
print(f"📦 Packaging skill: {skill_name}")
|
||||||
|
print(f" Target: {adaptor.PLATFORM_NAME}")
|
||||||
print(f" Source: {skill_path}")
|
print(f" Source: {skill_path}")
|
||||||
print(f" Output: {zip_path}")
|
|
||||||
|
|
||||||
# Create zip file
|
try:
|
||||||
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
|
package_path = adaptor.package(skill_path, output_dir)
|
||||||
for root, dirs, files in os.walk(skill_path):
|
print(f" Output: {package_path}")
|
||||||
# Skip backup files
|
except Exception as e:
|
||||||
files = [f for f in files if not f.endswith('.backup')]
|
print(f"❌ Error creating package: {e}")
|
||||||
|
return False, None
|
||||||
|
|
||||||
for file in files:
|
# Get package size
|
||||||
file_path = Path(root) / file
|
package_size = package_path.stat().st_size
|
||||||
arcname = file_path.relative_to(skill_path)
|
print(f"\n✅ Package created: {package_path}")
|
||||||
zf.write(file_path, arcname)
|
print(f" Size: {package_size:,} bytes ({format_file_size(package_size)})")
|
||||||
print(f" + {arcname}")
|
|
||||||
|
|
||||||
# Get zip size
|
|
||||||
zip_size = zip_path.stat().st_size
|
|
||||||
print(f"\n✅ Package created: {zip_path}")
|
|
||||||
print(f" Size: {zip_size:,} bytes ({format_file_size(zip_size)})")
|
|
||||||
|
|
||||||
# Open folder in file browser
|
# Open folder in file browser
|
||||||
if open_folder_after:
|
if open_folder_after:
|
||||||
print(f"\n📂 Opening folder: {zip_path.parent}")
|
print(f"\n📂 Opening folder: {package_path.parent}")
|
||||||
open_folder(zip_path.parent)
|
open_folder(package_path.parent)
|
||||||
|
|
||||||
# Print upload instructions
|
# Print upload instructions
|
||||||
print_upload_instructions(zip_path)
|
print_upload_instructions(package_path)
|
||||||
|
|
||||||
return True, zip_path
|
return True, package_path
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -156,18 +160,26 @@ Examples:
|
|||||||
help='Skip quality checks before packaging'
|
help='Skip quality checks before packaging'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--target',
|
||||||
|
choices=['claude', 'gemini', 'openai', 'markdown'],
|
||||||
|
default='claude',
|
||||||
|
help='Target LLM platform (default: claude)'
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--upload',
|
'--upload',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Automatically upload to Claude after packaging (requires ANTHROPIC_API_KEY)'
|
help='Automatically upload after packaging (requires platform API key)'
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
success, zip_path = package_skill(
|
success, package_path = package_skill(
|
||||||
args.skill_dir,
|
args.skill_dir,
|
||||||
open_folder_after=not args.no_open,
|
open_folder_after=not args.no_open,
|
||||||
skip_quality_check=args.skip_quality_check
|
skip_quality_check=args.skip_quality_check,
|
||||||
|
target=args.target
|
||||||
)
|
)
|
||||||
|
|
||||||
if not success:
|
if not success:
|
||||||
@@ -175,42 +187,58 @@ Examples:
|
|||||||
|
|
||||||
# Auto-upload if requested
|
# Auto-upload if requested
|
||||||
if args.upload:
|
if args.upload:
|
||||||
# Check if API key is set BEFORE attempting upload
|
|
||||||
api_key = os.environ.get('ANTHROPIC_API_KEY', '').strip()
|
|
||||||
|
|
||||||
if not api_key:
|
|
||||||
# No API key - show helpful message but DON'T fail
|
|
||||||
print("\n" + "="*60)
|
|
||||||
print("💡 Automatic Upload")
|
|
||||||
print("="*60)
|
|
||||||
print()
|
|
||||||
print("To enable automatic upload:")
|
|
||||||
print(" 1. Get API key from https://console.anthropic.com/")
|
|
||||||
print(" 2. Set: export ANTHROPIC_API_KEY=sk-ant-...")
|
|
||||||
print(" 3. Run package_skill.py with --upload flag")
|
|
||||||
print()
|
|
||||||
print("For now, use manual upload (instructions above) ☝️")
|
|
||||||
print("="*60)
|
|
||||||
# Exit successfully - packaging worked!
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# API key exists - try upload
|
|
||||||
try:
|
try:
|
||||||
from upload_skill import upload_skill_api
|
from skill_seekers.cli.adaptors import get_adaptor
|
||||||
|
|
||||||
|
# Get adaptor for target platform
|
||||||
|
adaptor = get_adaptor(args.target)
|
||||||
|
|
||||||
|
# Get API key from environment
|
||||||
|
api_key = os.environ.get(adaptor.get_env_var_name(), '').strip()
|
||||||
|
|
||||||
|
if not api_key:
|
||||||
|
# No API key - show helpful message but DON'T fail
|
||||||
|
print("\n" + "="*60)
|
||||||
|
print("💡 Automatic Upload")
|
||||||
|
print("="*60)
|
||||||
|
print()
|
||||||
|
print(f"To enable automatic upload to {adaptor.PLATFORM_NAME}:")
|
||||||
|
print(f" 1. Get API key from the platform")
|
||||||
|
print(f" 2. Set: export {adaptor.get_env_var_name()}=...")
|
||||||
|
print(f" 3. Run package command with --upload flag")
|
||||||
|
print()
|
||||||
|
print("For now, use manual upload (instructions above) ☝️")
|
||||||
|
print("="*60)
|
||||||
|
# Exit successfully - packaging worked!
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# API key exists - try upload
|
||||||
print("\n" + "="*60)
|
print("\n" + "="*60)
|
||||||
upload_success, message = upload_skill_api(zip_path)
|
print(f"📤 Uploading to {adaptor.PLATFORM_NAME}...")
|
||||||
if not upload_success:
|
print("="*60)
|
||||||
print(f"❌ Upload failed: {message}")
|
|
||||||
|
result = adaptor.upload(package_path, api_key)
|
||||||
|
|
||||||
|
if result['success']:
|
||||||
|
print(f"\n✅ {result['message']}")
|
||||||
|
if result['url']:
|
||||||
|
print(f" View at: {result['url']}")
|
||||||
|
print("="*60)
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
print(f"\n❌ Upload failed: {result['message']}")
|
||||||
print()
|
print()
|
||||||
print("💡 Try manual upload instead (instructions above) ☝️")
|
print("💡 Try manual upload instead (instructions above) ☝️")
|
||||||
print("="*60)
|
print("="*60)
|
||||||
# Exit successfully - packaging worked even if upload failed
|
# Exit successfully - packaging worked even if upload failed
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
else:
|
|
||||||
print("="*60)
|
except ImportError as e:
|
||||||
sys.exit(0)
|
print(f"\n❌ Error: {e}")
|
||||||
except ImportError:
|
print("Install required dependencies for this platform")
|
||||||
print("\n❌ Error: upload_skill.py not found")
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Upload error: {e}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|||||||
@@ -1,15 +1,20 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
Automatic Skill Uploader
|
Automatic Skill Uploader
|
||||||
Uploads a skill .zip file to Claude using the Anthropic API
|
Uploads a skill package to LLM platforms (Claude, Gemini, OpenAI, etc.)
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
# Set API key (one-time)
|
# Claude (default)
|
||||||
export ANTHROPIC_API_KEY=sk-ant-...
|
export ANTHROPIC_API_KEY=sk-ant-...
|
||||||
|
skill-seekers upload output/react.zip
|
||||||
|
|
||||||
# Upload skill
|
# Gemini
|
||||||
python3 upload_skill.py output/react.zip
|
export GOOGLE_API_KEY=AIzaSy...
|
||||||
python3 upload_skill.py output/godot.zip
|
skill-seekers upload output/react-gemini.tar.gz --target gemini
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
export OPENAI_API_KEY=sk-proj-...
|
||||||
|
skill-seekers upload output/react-openai.zip --target openai
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -21,108 +26,84 @@ from pathlib import Path
|
|||||||
# Import utilities
|
# Import utilities
|
||||||
try:
|
try:
|
||||||
from utils import (
|
from utils import (
|
||||||
get_api_key,
|
|
||||||
get_upload_url,
|
|
||||||
print_upload_instructions,
|
print_upload_instructions,
|
||||||
validate_zip_file
|
validate_zip_file
|
||||||
)
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
sys.path.insert(0, str(Path(__file__).parent))
|
sys.path.insert(0, str(Path(__file__).parent))
|
||||||
from utils import (
|
from utils import (
|
||||||
get_api_key,
|
|
||||||
get_upload_url,
|
|
||||||
print_upload_instructions,
|
print_upload_instructions,
|
||||||
validate_zip_file
|
validate_zip_file
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def upload_skill_api(zip_path):
|
def upload_skill_api(package_path, target='claude', api_key=None):
|
||||||
"""
|
"""
|
||||||
Upload skill to Claude via Anthropic API
|
Upload skill package to LLM platform
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
zip_path: Path to skill .zip file
|
package_path: Path to skill package file
|
||||||
|
target: Target platform ('claude', 'gemini', 'openai')
|
||||||
|
api_key: Optional API key (otherwise read from environment)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple: (success, message)
|
tuple: (success, message)
|
||||||
"""
|
"""
|
||||||
# Check for requests library
|
|
||||||
try:
|
try:
|
||||||
import requests
|
from skill_seekers.cli.adaptors import get_adaptor
|
||||||
except ImportError:
|
except ImportError:
|
||||||
return False, "requests library not installed. Run: pip install requests"
|
return False, "Adaptor system not available. Reinstall skill-seekers."
|
||||||
|
|
||||||
# Validate zip file
|
# Get platform-specific adaptor
|
||||||
is_valid, error_msg = validate_zip_file(zip_path)
|
try:
|
||||||
if not is_valid:
|
adaptor = get_adaptor(target)
|
||||||
return False, error_msg
|
except ValueError as e:
|
||||||
|
return False, str(e)
|
||||||
|
|
||||||
# Get API key
|
# Get API key
|
||||||
api_key = get_api_key()
|
|
||||||
if not api_key:
|
if not api_key:
|
||||||
return False, "ANTHROPIC_API_KEY not set. Run: export ANTHROPIC_API_KEY=sk-ant-..."
|
api_key = os.environ.get(adaptor.get_env_var_name(), '').strip()
|
||||||
|
|
||||||
zip_path = Path(zip_path)
|
if not api_key:
|
||||||
skill_name = zip_path.stem
|
return False, f"{adaptor.get_env_var_name()} not set. Export your API key first."
|
||||||
|
|
||||||
|
# Validate API key format
|
||||||
|
if not adaptor.validate_api_key(api_key):
|
||||||
|
return False, f"Invalid API key format for {adaptor.PLATFORM_NAME}"
|
||||||
|
|
||||||
|
package_path = Path(package_path)
|
||||||
|
|
||||||
|
# Basic file validation
|
||||||
|
if not package_path.exists():
|
||||||
|
return False, f"File not found: {package_path}"
|
||||||
|
|
||||||
|
skill_name = package_path.stem
|
||||||
|
|
||||||
print(f"📤 Uploading skill: {skill_name}")
|
print(f"📤 Uploading skill: {skill_name}")
|
||||||
print(f" Source: {zip_path}")
|
print(f" Target: {adaptor.PLATFORM_NAME}")
|
||||||
print(f" Size: {zip_path.stat().st_size:,} bytes")
|
print(f" Source: {package_path}")
|
||||||
|
print(f" Size: {package_path.stat().st_size:,} bytes")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
# Prepare API request
|
# Upload using adaptor
|
||||||
api_url = "https://api.anthropic.com/v1/skills"
|
print(f"⏳ Uploading to {adaptor.PLATFORM_NAME}...")
|
||||||
headers = {
|
|
||||||
"x-api-key": api_key,
|
|
||||||
"anthropic-version": "2023-06-01",
|
|
||||||
"anthropic-beta": "skills-2025-10-02"
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Read zip file
|
result = adaptor.upload(package_path, api_key)
|
||||||
with open(zip_path, 'rb') as f:
|
|
||||||
zip_data = f.read()
|
|
||||||
|
|
||||||
# Upload skill
|
if result['success']:
|
||||||
print("⏳ Uploading to Anthropic API...")
|
|
||||||
|
|
||||||
files = {
|
|
||||||
'files[]': (zip_path.name, zip_data, 'application/zip')
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(
|
|
||||||
api_url,
|
|
||||||
headers=headers,
|
|
||||||
files=files,
|
|
||||||
timeout=60
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check response
|
|
||||||
if response.status_code == 200:
|
|
||||||
print()
|
print()
|
||||||
print("✅ Skill uploaded successfully!")
|
print(f"✅ {result['message']}")
|
||||||
print()
|
print()
|
||||||
print("Your skill is now available in Claude at:")
|
if result['url']:
|
||||||
print(f" {get_upload_url()}")
|
print("Your skill is now available at:")
|
||||||
|
print(f" {result['url']}")
|
||||||
|
if result['skill_id']:
|
||||||
|
print(f" Skill ID: {result['skill_id']}")
|
||||||
print()
|
print()
|
||||||
return True, "Upload successful"
|
return True, "Upload successful"
|
||||||
|
|
||||||
elif response.status_code == 401:
|
|
||||||
return False, "Authentication failed. Check your ANTHROPIC_API_KEY"
|
|
||||||
|
|
||||||
elif response.status_code == 400:
|
|
||||||
error_msg = response.json().get('error', {}).get('message', 'Unknown error')
|
|
||||||
return False, f"Invalid skill format: {error_msg}"
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
error_msg = response.json().get('error', {}).get('message', 'Unknown error')
|
return False, result['message']
|
||||||
return False, f"Upload failed ({response.status_code}): {error_msg}"
|
|
||||||
|
|
||||||
except requests.exceptions.Timeout:
|
|
||||||
return False, "Upload timed out. Try again or use manual upload"
|
|
||||||
|
|
||||||
except requests.exceptions.ConnectionError:
|
|
||||||
return False, "Connection error. Check your internet connection"
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return False, f"Unexpected error: {str(e)}"
|
return False, f"Unexpected error: {str(e)}"
|
||||||
@@ -130,36 +111,55 @@ def upload_skill_api(zip_path):
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Upload a skill .zip file to Claude via Anthropic API",
|
description="Upload a skill package to LLM platforms",
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
epilog="""
|
epilog="""
|
||||||
Setup:
|
Setup:
|
||||||
1. Get your Anthropic API key from https://console.anthropic.com/
|
Claude:
|
||||||
2. Set the API key:
|
export ANTHROPIC_API_KEY=sk-ant-...
|
||||||
export ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
|
Gemini:
|
||||||
|
export GOOGLE_API_KEY=AIzaSy...
|
||||||
|
|
||||||
|
OpenAI:
|
||||||
|
export OPENAI_API_KEY=sk-proj-...
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
# Upload skill
|
# Upload to Claude (default)
|
||||||
python3 upload_skill.py output/react.zip
|
skill-seekers upload output/react.zip
|
||||||
|
|
||||||
# Upload with explicit path
|
# Upload to Gemini
|
||||||
python3 upload_skill.py /path/to/skill.zip
|
skill-seekers upload output/react-gemini.tar.gz --target gemini
|
||||||
|
|
||||||
Requirements:
|
# Upload to OpenAI
|
||||||
- ANTHROPIC_API_KEY environment variable must be set
|
skill-seekers upload output/react-openai.zip --target openai
|
||||||
- requests library (pip install requests)
|
|
||||||
|
# Upload with explicit API key
|
||||||
|
skill-seekers upload output/react.zip --api-key sk-ant-...
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'zip_file',
|
'package_file',
|
||||||
help='Path to skill .zip file (e.g., output/react.zip)'
|
help='Path to skill package file (e.g., output/react.zip)'
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--target',
|
||||||
|
choices=['claude', 'gemini', 'openai'],
|
||||||
|
default='claude',
|
||||||
|
help='Target LLM platform (default: claude)'
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--api-key',
|
||||||
|
help='Platform API key (or set environment variable)'
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Upload skill
|
# Upload skill
|
||||||
success, message = upload_skill_api(args.zip_file)
|
success, message = upload_skill_api(args.package_file, args.target, args.api_key)
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -167,7 +167,7 @@ Requirements:
|
|||||||
print(f"\n❌ Upload failed: {message}")
|
print(f"\n❌ Upload failed: {message}")
|
||||||
print()
|
print()
|
||||||
print("📝 Manual upload instructions:")
|
print("📝 Manual upload instructions:")
|
||||||
print_upload_instructions(args.zip_file)
|
print_upload_instructions(args.package_file)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
1
tests/test_adaptors/__init__.py
Normal file
1
tests/test_adaptors/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Adaptor tests package
|
||||||
122
tests/test_adaptors/test_base.py
Normal file
122
tests/test_adaptors/test_base.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Tests for base adaptor and registry
|
||||||
|
"""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from skill_seekers.cli.adaptors import (
|
||||||
|
get_adaptor,
|
||||||
|
list_platforms,
|
||||||
|
is_platform_available,
|
||||||
|
SkillAdaptor,
|
||||||
|
SkillMetadata,
|
||||||
|
ADAPTORS
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSkillMetadata(unittest.TestCase):
|
||||||
|
"""Test SkillMetadata dataclass"""
|
||||||
|
|
||||||
|
def test_basic_metadata(self):
|
||||||
|
"""Test basic metadata creation"""
|
||||||
|
metadata = SkillMetadata(
|
||||||
|
name="test-skill",
|
||||||
|
description="Test skill description"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(metadata.name, "test-skill")
|
||||||
|
self.assertEqual(metadata.description, "Test skill description")
|
||||||
|
self.assertEqual(metadata.version, "1.0.0") # default
|
||||||
|
self.assertIsNone(metadata.author) # default
|
||||||
|
self.assertEqual(metadata.tags, []) # default
|
||||||
|
|
||||||
|
def test_full_metadata(self):
|
||||||
|
"""Test metadata with all fields"""
|
||||||
|
metadata = SkillMetadata(
|
||||||
|
name="react",
|
||||||
|
description="React documentation",
|
||||||
|
version="2.0.0",
|
||||||
|
author="Test Author",
|
||||||
|
tags=["react", "javascript", "web"]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(metadata.name, "react")
|
||||||
|
self.assertEqual(metadata.description, "React documentation")
|
||||||
|
self.assertEqual(metadata.version, "2.0.0")
|
||||||
|
self.assertEqual(metadata.author, "Test Author")
|
||||||
|
self.assertEqual(metadata.tags, ["react", "javascript", "web"])
|
||||||
|
|
||||||
|
|
||||||
|
class TestAdaptorRegistry(unittest.TestCase):
|
||||||
|
"""Test adaptor registry and factory"""
|
||||||
|
|
||||||
|
def test_list_platforms(self):
|
||||||
|
"""Test listing available platforms"""
|
||||||
|
platforms = list_platforms()
|
||||||
|
|
||||||
|
self.assertIsInstance(platforms, list)
|
||||||
|
# Claude should always be available
|
||||||
|
self.assertIn('claude', platforms)
|
||||||
|
|
||||||
|
def test_is_platform_available(self):
|
||||||
|
"""Test checking platform availability"""
|
||||||
|
# Claude should be available
|
||||||
|
self.assertTrue(is_platform_available('claude'))
|
||||||
|
|
||||||
|
# Unknown platform should not be available
|
||||||
|
self.assertFalse(is_platform_available('unknown_platform'))
|
||||||
|
|
||||||
|
def test_get_adaptor_claude(self):
|
||||||
|
"""Test getting Claude adaptor"""
|
||||||
|
adaptor = get_adaptor('claude')
|
||||||
|
|
||||||
|
self.assertIsInstance(adaptor, SkillAdaptor)
|
||||||
|
self.assertEqual(adaptor.PLATFORM, 'claude')
|
||||||
|
self.assertEqual(adaptor.PLATFORM_NAME, 'Claude AI (Anthropic)')
|
||||||
|
|
||||||
|
def test_get_adaptor_invalid(self):
|
||||||
|
"""Test getting invalid adaptor raises error"""
|
||||||
|
with self.assertRaises(ValueError) as ctx:
|
||||||
|
get_adaptor('invalid_platform')
|
||||||
|
|
||||||
|
error_msg = str(ctx.exception)
|
||||||
|
self.assertIn('invalid_platform', error_msg)
|
||||||
|
self.assertIn('not supported', error_msg)
|
||||||
|
|
||||||
|
def test_get_adaptor_with_config(self):
|
||||||
|
"""Test getting adaptor with custom config"""
|
||||||
|
config = {'custom_setting': 'value'}
|
||||||
|
adaptor = get_adaptor('claude', config)
|
||||||
|
|
||||||
|
self.assertEqual(adaptor.config, config)
|
||||||
|
|
||||||
|
|
||||||
|
class TestBaseAdaptorInterface(unittest.TestCase):
|
||||||
|
"""Test base adaptor interface methods"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""Set up test adaptor"""
|
||||||
|
self.adaptor = get_adaptor('claude')
|
||||||
|
|
||||||
|
def test_validate_api_key_default(self):
|
||||||
|
"""Test default API key validation"""
|
||||||
|
# Claude adaptor overrides this
|
||||||
|
self.assertTrue(self.adaptor.validate_api_key('sk-ant-test123'))
|
||||||
|
self.assertFalse(self.adaptor.validate_api_key('invalid'))
|
||||||
|
|
||||||
|
def test_get_env_var_name(self):
|
||||||
|
"""Test environment variable name"""
|
||||||
|
env_var = self.adaptor.get_env_var_name()
|
||||||
|
|
||||||
|
self.assertEqual(env_var, 'ANTHROPIC_API_KEY')
|
||||||
|
|
||||||
|
def test_supports_enhancement(self):
|
||||||
|
"""Test enhancement support check"""
|
||||||
|
# Claude supports enhancement
|
||||||
|
self.assertTrue(self.adaptor.supports_enhancement())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user