feat: expand platform coverage with 8 new adaptors, 7 new CLI agents, and OpenCode skill tools
Phase 1 - OpenCode Integration: - Add OpenCodeAdaptor with directory-based packaging and dual-format YAML frontmatter - Kebab-case name validation matching OpenCode's regex spec Phase 2 - OpenAI-Compatible LLM Platforms: - Extract OpenAICompatibleAdaptor base class from MiniMax (shared format/package/upload/enhance) - Refactor MiniMax to ~20 lines of constants inheriting from base - Add 6 new LLM adaptors: Kimi, DeepSeek, Qwen, OpenRouter, Together AI, Fireworks AI - All use OpenAI-compatible API with platform-specific constants Phase 3 - CLI Agent Expansion: - Add 7 new install-agent paths: roo, cline, aider, bolt, kilo, continue, kimi-code - Total agents: 11 -> 18 Phase 4 - Advanced Features: - OpenCode skill splitter (auto-split large docs into focused sub-skills with router) - Bi-directional skill format converter (import/export between OpenCode and any platform) - GitHub Actions template for automated skill updates Totals: 12 --target platforms, 18 --agent paths, 2915 tests passing Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
51
tests/test_adaptors/test_deepseek_adaptor.py
Normal file
51
tests/test_adaptors/test_deepseek_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for DeepSeek AI adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestDeepSeekAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("deepseek")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "deepseek")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "DeepSeek AI")
|
||||
self.assertIn("deepseek", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertEqual(self.adaptor.DEFAULT_MODEL, "deepseek-chat")
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("deepseek"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "DEEPSEEK_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("deepseek", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("deepseek_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "deepseek")
|
||||
self.assertIn("deepseek", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
51
tests/test_adaptors/test_fireworks_adaptor.py
Normal file
51
tests/test_adaptors/test_fireworks_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Fireworks AI adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestFireworksAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("fireworks")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "fireworks")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "Fireworks AI")
|
||||
self.assertIn("fireworks", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertIn("llama", self.adaptor.DEFAULT_MODEL.lower())
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("fireworks"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "FIREWORKS_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("fireworks", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("fireworks_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "fireworks")
|
||||
self.assertIn("fireworks", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
51
tests/test_adaptors/test_kimi_adaptor.py
Normal file
51
tests/test_adaptors/test_kimi_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Kimi (Moonshot AI) adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestKimiAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("kimi")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "kimi")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "Kimi (Moonshot AI)")
|
||||
self.assertIn("moonshot", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertEqual(self.adaptor.DEFAULT_MODEL, "moonshot-v1-128k")
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("kimi"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "MOONSHOT_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("kimi", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("kimi_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "kimi")
|
||||
self.assertIn("moonshot", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
224
tests/test_adaptors/test_openai_compatible_base.py
Normal file
224
tests/test_adaptors/test_openai_compatible_base.py
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for OpenAI-compatible base adaptor class.
|
||||
|
||||
Tests shared behavior across all OpenAI-compatible platforms.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from skill_seekers.cli.adaptors.openai_compatible import OpenAICompatibleAdaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class ConcreteTestAdaptor(OpenAICompatibleAdaptor):
|
||||
"""Concrete subclass for testing the base class."""
|
||||
|
||||
PLATFORM = "testplatform"
|
||||
PLATFORM_NAME = "Test Platform"
|
||||
DEFAULT_API_ENDPOINT = "https://api.test.example.com/v1"
|
||||
DEFAULT_MODEL = "test-model-v1"
|
||||
ENV_VAR_NAME = "TEST_PLATFORM_API_KEY"
|
||||
PLATFORM_URL = "https://test.example.com/"
|
||||
|
||||
|
||||
class TestOpenAICompatibleBase(unittest.TestCase):
|
||||
"""Test shared OpenAI-compatible base behavior"""
|
||||
|
||||
def setUp(self):
|
||||
self.adaptor = ConcreteTestAdaptor()
|
||||
|
||||
def test_constants_used_in_env_var(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "TEST_PLATFORM_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_validate_api_key_valid(self):
|
||||
self.assertTrue(self.adaptor.validate_api_key("sk-some-long-api-key-string"))
|
||||
|
||||
def test_validate_api_key_invalid(self):
|
||||
self.assertFalse(self.adaptor.validate_api_key(""))
|
||||
self.assertFalse(self.adaptor.validate_api_key(" "))
|
||||
self.assertFalse(self.adaptor.validate_api_key("short"))
|
||||
|
||||
def test_format_skill_md_no_frontmatter(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "test.md").write_text("# Test")
|
||||
|
||||
metadata = SkillMetadata(name="test-skill", description="Test description")
|
||||
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
self.assertFalse(formatted.startswith("---"))
|
||||
self.assertIn("You are an expert assistant", formatted)
|
||||
self.assertIn("test-skill", formatted)
|
||||
|
||||
def test_format_skill_md_with_existing_content(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
existing = "# Existing\n\n" + "x" * 200
|
||||
(skill_dir / "SKILL.md").write_text(existing)
|
||||
|
||||
metadata = SkillMetadata(name="test", description="Test")
|
||||
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
self.assertIn("You are an expert assistant", formatted)
|
||||
|
||||
def test_package_creates_zip_with_platform_name(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test instructions")
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "guide.md").write_text("# Guide")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
package_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue(package_path.exists())
|
||||
self.assertTrue(str(package_path).endswith(".zip"))
|
||||
self.assertIn("testplatform", package_path.name)
|
||||
|
||||
def test_package_metadata_uses_constants(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "guide.md").write_text("# Guide")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
package_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
with zipfile.ZipFile(package_path, "r") as zf:
|
||||
metadata_content = zf.read("testplatform_metadata.json").decode("utf-8")
|
||||
metadata = json.loads(metadata_content)
|
||||
self.assertEqual(metadata["platform"], "testplatform")
|
||||
self.assertEqual(metadata["model"], "test-model-v1")
|
||||
self.assertEqual(metadata["api_base"], "https://api.test.example.com/v1")
|
||||
|
||||
def test_package_zip_structure(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "test.md").write_text("# Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
package_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
with zipfile.ZipFile(package_path, "r") as zf:
|
||||
names = zf.namelist()
|
||||
self.assertIn("system_instructions.txt", names)
|
||||
self.assertIn("testplatform_metadata.json", names)
|
||||
self.assertTrue(any("knowledge_files" in n for n in names))
|
||||
|
||||
def test_upload_missing_file(self):
|
||||
result = self.adaptor.upload(Path("/nonexistent/file.zip"), "test-key")
|
||||
self.assertFalse(result["success"])
|
||||
self.assertIn("not found", result["message"].lower())
|
||||
|
||||
def test_upload_wrong_format(self):
|
||||
with tempfile.NamedTemporaryFile(suffix=".tar.gz") as tmp:
|
||||
result = self.adaptor.upload(Path(tmp.name), "test-key")
|
||||
self.assertFalse(result["success"])
|
||||
self.assertIn("not a zip", result["message"].lower())
|
||||
|
||||
def test_upload_missing_library(self):
|
||||
with tempfile.NamedTemporaryFile(suffix=".zip") as tmp:
|
||||
with patch.dict(sys.modules, {"openai": None}):
|
||||
result = self.adaptor.upload(Path(tmp.name), "test-key")
|
||||
self.assertFalse(result["success"])
|
||||
self.assertIn("openai", result["message"])
|
||||
|
||||
@patch("openai.OpenAI")
|
||||
def test_upload_success_mocked(self, mock_openai_class):
|
||||
mock_client = MagicMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock()]
|
||||
mock_response.choices[0].message.content = "Ready"
|
||||
mock_client.chat.completions.create.return_value = mock_response
|
||||
mock_openai_class.return_value = mock_client
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "test.md").write_text("# Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
package_path = self.adaptor.package(skill_dir, output_dir)
|
||||
result = self.adaptor.upload(package_path, "test-long-api-key-string")
|
||||
|
||||
self.assertTrue(result["success"])
|
||||
self.assertEqual(result["url"], "https://test.example.com/")
|
||||
self.assertIn("validated", result["message"])
|
||||
|
||||
def test_read_reference_files(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
refs_dir = Path(temp_dir)
|
||||
(refs_dir / "guide.md").write_text("# Guide\nContent")
|
||||
(refs_dir / "api.md").write_text("# API\nDocs")
|
||||
|
||||
refs = self.adaptor._read_reference_files(refs_dir)
|
||||
self.assertEqual(len(refs), 2)
|
||||
|
||||
def test_read_reference_files_truncation(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
(Path(temp_dir) / "large.md").write_text("x" * 50000)
|
||||
refs = self.adaptor._read_reference_files(Path(temp_dir))
|
||||
self.assertIn("truncated", refs["large.md"])
|
||||
self.assertLessEqual(len(refs["large.md"]), 31000)
|
||||
|
||||
def test_build_enhancement_prompt_uses_platform_name(self):
|
||||
refs = {"test.md": "# Test\nContent"}
|
||||
prompt = self.adaptor._build_enhancement_prompt("skill", refs, None)
|
||||
self.assertIn("Test Platform", prompt)
|
||||
|
||||
@patch("openai.OpenAI")
|
||||
def test_enhance_success_mocked(self, mock_openai_class):
|
||||
mock_client = MagicMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock()]
|
||||
mock_response.choices[0].message.content = "Enhanced content"
|
||||
mock_client.chat.completions.create.return_value = mock_response
|
||||
mock_openai_class.return_value = mock_client
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\nContent")
|
||||
(skill_dir / "SKILL.md").write_text("Original")
|
||||
|
||||
success = self.adaptor.enhance(skill_dir, "test-api-key")
|
||||
|
||||
self.assertTrue(success)
|
||||
self.assertEqual((skill_dir / "SKILL.md").read_text(), "Enhanced content")
|
||||
self.assertTrue((skill_dir / "SKILL.md.backup").exists())
|
||||
|
||||
def test_enhance_missing_references(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
self.assertFalse(self.adaptor.enhance(Path(temp_dir), "key"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
210
tests/test_adaptors/test_opencode_adaptor.py
Normal file
210
tests/test_adaptors/test_opencode_adaptor.py
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for OpenCode adaptor
|
||||
"""
|
||||
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
from skill_seekers.cli.adaptors.opencode import OpenCodeAdaptor
|
||||
|
||||
|
||||
class TestOpenCodeAdaptor(unittest.TestCase):
|
||||
"""Test OpenCode adaptor functionality"""
|
||||
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("opencode")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "opencode")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "OpenCode")
|
||||
self.assertIsNone(self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("opencode"))
|
||||
|
||||
def test_validate_api_key_always_true(self):
|
||||
self.assertTrue(self.adaptor.validate_api_key(""))
|
||||
self.assertTrue(self.adaptor.validate_api_key("anything"))
|
||||
|
||||
def test_no_enhancement_support(self):
|
||||
self.assertFalse(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_upload_returns_local_path(self):
|
||||
result = self.adaptor.upload(Path("/some/path"), "")
|
||||
self.assertTrue(result["success"])
|
||||
self.assertIn("local", result["message"].lower())
|
||||
|
||||
# --- Kebab-case conversion ---
|
||||
|
||||
def test_kebab_case_spaces(self):
|
||||
self.assertEqual(OpenCodeAdaptor._to_kebab_case("My Cool Skill"), "my-cool-skill")
|
||||
|
||||
def test_kebab_case_underscores(self):
|
||||
self.assertEqual(OpenCodeAdaptor._to_kebab_case("my_cool_skill"), "my-cool-skill")
|
||||
|
||||
def test_kebab_case_special_chars(self):
|
||||
self.assertEqual(OpenCodeAdaptor._to_kebab_case("My Skill! (v2.0)"), "my-skill-v2-0")
|
||||
|
||||
def test_kebab_case_uppercase(self):
|
||||
self.assertEqual(OpenCodeAdaptor._to_kebab_case("ALLCAPS"), "allcaps")
|
||||
|
||||
def test_kebab_case_truncation(self):
|
||||
long_name = "a" * 100
|
||||
result = OpenCodeAdaptor._to_kebab_case(long_name)
|
||||
self.assertLessEqual(len(result), 64)
|
||||
|
||||
def test_kebab_case_empty(self):
|
||||
self.assertEqual(OpenCodeAdaptor._to_kebab_case("!!!"), "skill")
|
||||
|
||||
def test_kebab_case_valid_regex(self):
|
||||
"""All converted names must match OpenCode's regex"""
|
||||
test_names = [
|
||||
"My Skill",
|
||||
"test_skill_v2",
|
||||
"UPPERCASE NAME",
|
||||
"special!@#chars",
|
||||
"dots.and.periods",
|
||||
"a",
|
||||
]
|
||||
for name in test_names:
|
||||
result = OpenCodeAdaptor._to_kebab_case(name)
|
||||
self.assertRegex(result, r"^[a-z0-9]+(-[a-z0-9]+)*$", f"Failed for: {name}")
|
||||
|
||||
# --- Format ---
|
||||
|
||||
def test_format_skill_md_has_frontmatter(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "test.md").write_text("# Test content")
|
||||
|
||||
metadata = SkillMetadata(name="test-skill", description="Test description")
|
||||
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
self.assertTrue(formatted.startswith("---"))
|
||||
self.assertIn("name: test-skill", formatted)
|
||||
self.assertIn("compatibility: opencode", formatted)
|
||||
self.assertIn("generated-by: skill-seekers", formatted)
|
||||
|
||||
def test_format_description_truncation(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
long_desc = "x" * 2000
|
||||
metadata = SkillMetadata(name="test", description=long_desc)
|
||||
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# The description in frontmatter should be truncated to 1024 chars
|
||||
# (plus YAML quotes around it)
|
||||
lines = formatted.split("\n")
|
||||
for line in lines:
|
||||
if line.startswith("description:"):
|
||||
desc_value = line[len("description:") :].strip()
|
||||
# Strip surrounding quotes for length check
|
||||
inner = desc_value.strip('"')
|
||||
self.assertLessEqual(len(inner), 1024)
|
||||
break
|
||||
|
||||
def test_format_with_existing_content(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir)
|
||||
existing = "# Existing Content\n\n" + "x" * 200
|
||||
(skill_dir / "SKILL.md").write_text(existing)
|
||||
|
||||
metadata = SkillMetadata(name="test", description="Test")
|
||||
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
self.assertTrue(formatted.startswith("---"))
|
||||
self.assertIn("Existing Content", formatted)
|
||||
|
||||
# --- Package ---
|
||||
|
||||
def test_package_creates_directory(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test")
|
||||
(skill_dir / "references").mkdir()
|
||||
(skill_dir / "references" / "guide.md").write_text("# Guide")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
result_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue(result_path.exists())
|
||||
self.assertTrue(result_path.is_dir())
|
||||
self.assertIn("opencode", result_path.name)
|
||||
|
||||
def test_package_contains_skill_md(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test content")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
result_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue((result_path / "SKILL.md").exists())
|
||||
content = (result_path / "SKILL.md").read_text()
|
||||
self.assertEqual(content, "# Test content")
|
||||
|
||||
def test_package_copies_references(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test")
|
||||
refs = skill_dir / "references"
|
||||
refs.mkdir()
|
||||
(refs / "guide.md").write_text("# Guide")
|
||||
(refs / "api.md").write_text("# API")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
result_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue((result_path / "references" / "guide.md").exists())
|
||||
self.assertTrue((result_path / "references" / "api.md").exists())
|
||||
|
||||
def test_package_excludes_backup_files(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test")
|
||||
refs = skill_dir / "references"
|
||||
refs.mkdir()
|
||||
(refs / "guide.md").write_text("# Guide")
|
||||
(refs / "guide.md.backup").write_text("# Old")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
result_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue((result_path / "references" / "guide.md").exists())
|
||||
self.assertFalse((result_path / "references" / "guide.md.backup").exists())
|
||||
|
||||
def test_package_without_references(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
result_path = self.adaptor.package(skill_dir, output_dir)
|
||||
|
||||
self.assertTrue(result_path.exists())
|
||||
self.assertTrue((result_path / "SKILL.md").exists())
|
||||
self.assertFalse((result_path / "references").exists())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
51
tests/test_adaptors/test_openrouter_adaptor.py
Normal file
51
tests/test_adaptors/test_openrouter_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for OpenRouter adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestOpenRouterAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("openrouter")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "openrouter")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "OpenRouter")
|
||||
self.assertIn("openrouter", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertEqual(self.adaptor.DEFAULT_MODEL, "openrouter/auto")
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("openrouter"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "OPENROUTER_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("openrouter", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("openrouter_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "openrouter")
|
||||
self.assertIn("openrouter", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
51
tests/test_adaptors/test_qwen_adaptor.py
Normal file
51
tests/test_adaptors/test_qwen_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Qwen (Alibaba) adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestQwenAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("qwen")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "qwen")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "Qwen (Alibaba)")
|
||||
self.assertIn("dashscope", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertEqual(self.adaptor.DEFAULT_MODEL, "qwen-max")
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("qwen"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "DASHSCOPE_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("qwen", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("qwen_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "qwen")
|
||||
self.assertIn("dashscope", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
51
tests/test_adaptors/test_together_adaptor.py
Normal file
51
tests/test_adaptors/test_together_adaptor.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Together AI adaptor"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
|
||||
|
||||
|
||||
class TestTogetherAdaptor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.adaptor = get_adaptor("together")
|
||||
|
||||
def test_platform_info(self):
|
||||
self.assertEqual(self.adaptor.PLATFORM, "together")
|
||||
self.assertEqual(self.adaptor.PLATFORM_NAME, "Together AI")
|
||||
self.assertIn("together", self.adaptor.DEFAULT_API_ENDPOINT)
|
||||
self.assertIn("llama", self.adaptor.DEFAULT_MODEL.lower())
|
||||
|
||||
def test_platform_available(self):
|
||||
self.assertTrue(is_platform_available("together"))
|
||||
|
||||
def test_env_var_name(self):
|
||||
self.assertEqual(self.adaptor.get_env_var_name(), "TOGETHER_API_KEY")
|
||||
|
||||
def test_supports_enhancement(self):
|
||||
self.assertTrue(self.adaptor.supports_enhancement())
|
||||
|
||||
def test_package_metadata(self):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
skill_dir = Path(temp_dir) / "test-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("Test")
|
||||
|
||||
output_dir = Path(temp_dir) / "output"
|
||||
output_dir.mkdir()
|
||||
|
||||
pkg = self.adaptor.package(skill_dir, output_dir)
|
||||
self.assertIn("together", pkg.name)
|
||||
|
||||
with zipfile.ZipFile(pkg) as zf:
|
||||
meta = json.loads(zf.read("together_metadata.json"))
|
||||
self.assertEqual(meta["platform"], "together")
|
||||
self.assertIn("together", meta["api_base"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user