feat: expand platform coverage with 8 new adaptors, 7 new CLI agents, and OpenCode skill tools

Phase 1 - OpenCode Integration:
- Add OpenCodeAdaptor with directory-based packaging and dual-format YAML frontmatter
- Kebab-case name validation matching OpenCode's regex spec

Phase 2 - OpenAI-Compatible LLM Platforms:
- Extract OpenAICompatibleAdaptor base class from MiniMax (shared format/package/upload/enhance)
- Refactor MiniMax to ~20 lines of constants inheriting from base
- Add 6 new LLM adaptors: Kimi, DeepSeek, Qwen, OpenRouter, Together AI, Fireworks AI
- All use OpenAI-compatible API with platform-specific constants

Phase 3 - CLI Agent Expansion:
- Add 7 new install-agent paths: roo, cline, aider, bolt, kilo, continue, kimi-code
- Total agents: 11 -> 18

Phase 4 - Advanced Features:
- OpenCode skill splitter (auto-split large docs into focused sub-skills with router)
- Bi-directional skill format converter (import/export between OpenCode and any platform)
- GitHub Actions template for automated skill updates

Totals: 12 --target platforms, 18 --agent paths, 2915 tests passing

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
yusyus
2026-03-21 20:31:51 +03:00
parent 1d3d7389d7
commit cd7b322b5e
24 changed files with 2482 additions and 499 deletions

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for DeepSeek AI adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestDeepSeekAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("deepseek")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "deepseek")
self.assertEqual(self.adaptor.PLATFORM_NAME, "DeepSeek AI")
self.assertIn("deepseek", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertEqual(self.adaptor.DEFAULT_MODEL, "deepseek-chat")
def test_platform_available(self):
self.assertTrue(is_platform_available("deepseek"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "DEEPSEEK_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("deepseek", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("deepseek_metadata.json"))
self.assertEqual(meta["platform"], "deepseek")
self.assertIn("deepseek", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for Fireworks AI adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestFireworksAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("fireworks")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "fireworks")
self.assertEqual(self.adaptor.PLATFORM_NAME, "Fireworks AI")
self.assertIn("fireworks", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertIn("llama", self.adaptor.DEFAULT_MODEL.lower())
def test_platform_available(self):
self.assertTrue(is_platform_available("fireworks"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "FIREWORKS_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("fireworks", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("fireworks_metadata.json"))
self.assertEqual(meta["platform"], "fireworks")
self.assertIn("fireworks", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for Kimi (Moonshot AI) adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestKimiAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("kimi")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "kimi")
self.assertEqual(self.adaptor.PLATFORM_NAME, "Kimi (Moonshot AI)")
self.assertIn("moonshot", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertEqual(self.adaptor.DEFAULT_MODEL, "moonshot-v1-128k")
def test_platform_available(self):
self.assertTrue(is_platform_available("kimi"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "MOONSHOT_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("kimi", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("kimi_metadata.json"))
self.assertEqual(meta["platform"], "kimi")
self.assertIn("moonshot", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,224 @@
#!/usr/bin/env python3
"""
Tests for OpenAI-compatible base adaptor class.
Tests shared behavior across all OpenAI-compatible platforms.
"""
import json
import sys
import tempfile
import unittest
import zipfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from skill_seekers.cli.adaptors.openai_compatible import OpenAICompatibleAdaptor
from skill_seekers.cli.adaptors.base import SkillMetadata
class ConcreteTestAdaptor(OpenAICompatibleAdaptor):
"""Concrete subclass for testing the base class."""
PLATFORM = "testplatform"
PLATFORM_NAME = "Test Platform"
DEFAULT_API_ENDPOINT = "https://api.test.example.com/v1"
DEFAULT_MODEL = "test-model-v1"
ENV_VAR_NAME = "TEST_PLATFORM_API_KEY"
PLATFORM_URL = "https://test.example.com/"
class TestOpenAICompatibleBase(unittest.TestCase):
"""Test shared OpenAI-compatible base behavior"""
def setUp(self):
self.adaptor = ConcreteTestAdaptor()
def test_constants_used_in_env_var(self):
self.assertEqual(self.adaptor.get_env_var_name(), "TEST_PLATFORM_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_validate_api_key_valid(self):
self.assertTrue(self.adaptor.validate_api_key("sk-some-long-api-key-string"))
def test_validate_api_key_invalid(self):
self.assertFalse(self.adaptor.validate_api_key(""))
self.assertFalse(self.adaptor.validate_api_key(" "))
self.assertFalse(self.adaptor.validate_api_key("short"))
def test_format_skill_md_no_frontmatter(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
(skill_dir / "references").mkdir()
(skill_dir / "references" / "test.md").write_text("# Test")
metadata = SkillMetadata(name="test-skill", description="Test description")
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
self.assertFalse(formatted.startswith("---"))
self.assertIn("You are an expert assistant", formatted)
self.assertIn("test-skill", formatted)
def test_format_skill_md_with_existing_content(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
existing = "# Existing\n\n" + "x" * 200
(skill_dir / "SKILL.md").write_text(existing)
metadata = SkillMetadata(name="test", description="Test")
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
self.assertIn("You are an expert assistant", formatted)
def test_package_creates_zip_with_platform_name(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test instructions")
(skill_dir / "references").mkdir()
(skill_dir / "references" / "guide.md").write_text("# Guide")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
package_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue(package_path.exists())
self.assertTrue(str(package_path).endswith(".zip"))
self.assertIn("testplatform", package_path.name)
def test_package_metadata_uses_constants(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
(skill_dir / "references").mkdir()
(skill_dir / "references" / "guide.md").write_text("# Guide")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
package_path = self.adaptor.package(skill_dir, output_dir)
with zipfile.ZipFile(package_path, "r") as zf:
metadata_content = zf.read("testplatform_metadata.json").decode("utf-8")
metadata = json.loads(metadata_content)
self.assertEqual(metadata["platform"], "testplatform")
self.assertEqual(metadata["model"], "test-model-v1")
self.assertEqual(metadata["api_base"], "https://api.test.example.com/v1")
def test_package_zip_structure(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
(skill_dir / "references").mkdir()
(skill_dir / "references" / "test.md").write_text("# Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
package_path = self.adaptor.package(skill_dir, output_dir)
with zipfile.ZipFile(package_path, "r") as zf:
names = zf.namelist()
self.assertIn("system_instructions.txt", names)
self.assertIn("testplatform_metadata.json", names)
self.assertTrue(any("knowledge_files" in n for n in names))
def test_upload_missing_file(self):
result = self.adaptor.upload(Path("/nonexistent/file.zip"), "test-key")
self.assertFalse(result["success"])
self.assertIn("not found", result["message"].lower())
def test_upload_wrong_format(self):
with tempfile.NamedTemporaryFile(suffix=".tar.gz") as tmp:
result = self.adaptor.upload(Path(tmp.name), "test-key")
self.assertFalse(result["success"])
self.assertIn("not a zip", result["message"].lower())
def test_upload_missing_library(self):
with tempfile.NamedTemporaryFile(suffix=".zip") as tmp:
with patch.dict(sys.modules, {"openai": None}):
result = self.adaptor.upload(Path(tmp.name), "test-key")
self.assertFalse(result["success"])
self.assertIn("openai", result["message"])
@patch("openai.OpenAI")
def test_upload_success_mocked(self, mock_openai_class):
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message.content = "Ready"
mock_client.chat.completions.create.return_value = mock_response
mock_openai_class.return_value = mock_client
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
(skill_dir / "references").mkdir()
(skill_dir / "references" / "test.md").write_text("# Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
package_path = self.adaptor.package(skill_dir, output_dir)
result = self.adaptor.upload(package_path, "test-long-api-key-string")
self.assertTrue(result["success"])
self.assertEqual(result["url"], "https://test.example.com/")
self.assertIn("validated", result["message"])
def test_read_reference_files(self):
with tempfile.TemporaryDirectory() as temp_dir:
refs_dir = Path(temp_dir)
(refs_dir / "guide.md").write_text("# Guide\nContent")
(refs_dir / "api.md").write_text("# API\nDocs")
refs = self.adaptor._read_reference_files(refs_dir)
self.assertEqual(len(refs), 2)
def test_read_reference_files_truncation(self):
with tempfile.TemporaryDirectory() as temp_dir:
(Path(temp_dir) / "large.md").write_text("x" * 50000)
refs = self.adaptor._read_reference_files(Path(temp_dir))
self.assertIn("truncated", refs["large.md"])
self.assertLessEqual(len(refs["large.md"]), 31000)
def test_build_enhancement_prompt_uses_platform_name(self):
refs = {"test.md": "# Test\nContent"}
prompt = self.adaptor._build_enhancement_prompt("skill", refs, None)
self.assertIn("Test Platform", prompt)
@patch("openai.OpenAI")
def test_enhance_success_mocked(self, mock_openai_class):
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message.content = "Enhanced content"
mock_client.chat.completions.create.return_value = mock_response
mock_openai_class.return_value = mock_client
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
refs_dir = skill_dir / "references"
refs_dir.mkdir()
(refs_dir / "test.md").write_text("# Test\nContent")
(skill_dir / "SKILL.md").write_text("Original")
success = self.adaptor.enhance(skill_dir, "test-api-key")
self.assertTrue(success)
self.assertEqual((skill_dir / "SKILL.md").read_text(), "Enhanced content")
self.assertTrue((skill_dir / "SKILL.md.backup").exists())
def test_enhance_missing_references(self):
with tempfile.TemporaryDirectory() as temp_dir:
self.assertFalse(self.adaptor.enhance(Path(temp_dir), "key"))
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,210 @@
#!/usr/bin/env python3
"""
Tests for OpenCode adaptor
"""
import tempfile
import unittest
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
from skill_seekers.cli.adaptors.base import SkillMetadata
from skill_seekers.cli.adaptors.opencode import OpenCodeAdaptor
class TestOpenCodeAdaptor(unittest.TestCase):
"""Test OpenCode adaptor functionality"""
def setUp(self):
self.adaptor = get_adaptor("opencode")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "opencode")
self.assertEqual(self.adaptor.PLATFORM_NAME, "OpenCode")
self.assertIsNone(self.adaptor.DEFAULT_API_ENDPOINT)
def test_platform_available(self):
self.assertTrue(is_platform_available("opencode"))
def test_validate_api_key_always_true(self):
self.assertTrue(self.adaptor.validate_api_key(""))
self.assertTrue(self.adaptor.validate_api_key("anything"))
def test_no_enhancement_support(self):
self.assertFalse(self.adaptor.supports_enhancement())
def test_upload_returns_local_path(self):
result = self.adaptor.upload(Path("/some/path"), "")
self.assertTrue(result["success"])
self.assertIn("local", result["message"].lower())
# --- Kebab-case conversion ---
def test_kebab_case_spaces(self):
self.assertEqual(OpenCodeAdaptor._to_kebab_case("My Cool Skill"), "my-cool-skill")
def test_kebab_case_underscores(self):
self.assertEqual(OpenCodeAdaptor._to_kebab_case("my_cool_skill"), "my-cool-skill")
def test_kebab_case_special_chars(self):
self.assertEqual(OpenCodeAdaptor._to_kebab_case("My Skill! (v2.0)"), "my-skill-v2-0")
def test_kebab_case_uppercase(self):
self.assertEqual(OpenCodeAdaptor._to_kebab_case("ALLCAPS"), "allcaps")
def test_kebab_case_truncation(self):
long_name = "a" * 100
result = OpenCodeAdaptor._to_kebab_case(long_name)
self.assertLessEqual(len(result), 64)
def test_kebab_case_empty(self):
self.assertEqual(OpenCodeAdaptor._to_kebab_case("!!!"), "skill")
def test_kebab_case_valid_regex(self):
"""All converted names must match OpenCode's regex"""
test_names = [
"My Skill",
"test_skill_v2",
"UPPERCASE NAME",
"special!@#chars",
"dots.and.periods",
"a",
]
for name in test_names:
result = OpenCodeAdaptor._to_kebab_case(name)
self.assertRegex(result, r"^[a-z0-9]+(-[a-z0-9]+)*$", f"Failed for: {name}")
# --- Format ---
def test_format_skill_md_has_frontmatter(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
(skill_dir / "references").mkdir()
(skill_dir / "references" / "test.md").write_text("# Test content")
metadata = SkillMetadata(name="test-skill", description="Test description")
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
self.assertTrue(formatted.startswith("---"))
self.assertIn("name: test-skill", formatted)
self.assertIn("compatibility: opencode", formatted)
self.assertIn("generated-by: skill-seekers", formatted)
def test_format_description_truncation(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
long_desc = "x" * 2000
metadata = SkillMetadata(name="test", description=long_desc)
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
# The description in frontmatter should be truncated to 1024 chars
# (plus YAML quotes around it)
lines = formatted.split("\n")
for line in lines:
if line.startswith("description:"):
desc_value = line[len("description:") :].strip()
# Strip surrounding quotes for length check
inner = desc_value.strip('"')
self.assertLessEqual(len(inner), 1024)
break
def test_format_with_existing_content(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir)
existing = "# Existing Content\n\n" + "x" * 200
(skill_dir / "SKILL.md").write_text(existing)
metadata = SkillMetadata(name="test", description="Test")
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
self.assertTrue(formatted.startswith("---"))
self.assertIn("Existing Content", formatted)
# --- Package ---
def test_package_creates_directory(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Test")
(skill_dir / "references").mkdir()
(skill_dir / "references" / "guide.md").write_text("# Guide")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
result_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue(result_path.exists())
self.assertTrue(result_path.is_dir())
self.assertIn("opencode", result_path.name)
def test_package_contains_skill_md(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Test content")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
result_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue((result_path / "SKILL.md").exists())
content = (result_path / "SKILL.md").read_text()
self.assertEqual(content, "# Test content")
def test_package_copies_references(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Test")
refs = skill_dir / "references"
refs.mkdir()
(refs / "guide.md").write_text("# Guide")
(refs / "api.md").write_text("# API")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
result_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue((result_path / "references" / "guide.md").exists())
self.assertTrue((result_path / "references" / "api.md").exists())
def test_package_excludes_backup_files(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Test")
refs = skill_dir / "references"
refs.mkdir()
(refs / "guide.md").write_text("# Guide")
(refs / "guide.md.backup").write_text("# Old")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
result_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue((result_path / "references" / "guide.md").exists())
self.assertFalse((result_path / "references" / "guide.md.backup").exists())
def test_package_without_references(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
result_path = self.adaptor.package(skill_dir, output_dir)
self.assertTrue(result_path.exists())
self.assertTrue((result_path / "SKILL.md").exists())
self.assertFalse((result_path / "references").exists())
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for OpenRouter adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestOpenRouterAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("openrouter")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "openrouter")
self.assertEqual(self.adaptor.PLATFORM_NAME, "OpenRouter")
self.assertIn("openrouter", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertEqual(self.adaptor.DEFAULT_MODEL, "openrouter/auto")
def test_platform_available(self):
self.assertTrue(is_platform_available("openrouter"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "OPENROUTER_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("openrouter", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("openrouter_metadata.json"))
self.assertEqual(meta["platform"], "openrouter")
self.assertIn("openrouter", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for Qwen (Alibaba) adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestQwenAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("qwen")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "qwen")
self.assertEqual(self.adaptor.PLATFORM_NAME, "Qwen (Alibaba)")
self.assertIn("dashscope", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertEqual(self.adaptor.DEFAULT_MODEL, "qwen-max")
def test_platform_available(self):
self.assertTrue(is_platform_available("qwen"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "DASHSCOPE_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("qwen", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("qwen_metadata.json"))
self.assertEqual(meta["platform"], "qwen")
self.assertIn("dashscope", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""Tests for Together AI adaptor"""
import json
import tempfile
import unittest
import zipfile
from pathlib import Path
from skill_seekers.cli.adaptors import get_adaptor, is_platform_available
class TestTogetherAdaptor(unittest.TestCase):
def setUp(self):
self.adaptor = get_adaptor("together")
def test_platform_info(self):
self.assertEqual(self.adaptor.PLATFORM, "together")
self.assertEqual(self.adaptor.PLATFORM_NAME, "Together AI")
self.assertIn("together", self.adaptor.DEFAULT_API_ENDPOINT)
self.assertIn("llama", self.adaptor.DEFAULT_MODEL.lower())
def test_platform_available(self):
self.assertTrue(is_platform_available("together"))
def test_env_var_name(self):
self.assertEqual(self.adaptor.get_env_var_name(), "TOGETHER_API_KEY")
def test_supports_enhancement(self):
self.assertTrue(self.adaptor.supports_enhancement())
def test_package_metadata(self):
with tempfile.TemporaryDirectory() as temp_dir:
skill_dir = Path(temp_dir) / "test-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("Test")
output_dir = Path(temp_dir) / "output"
output_dir.mkdir()
pkg = self.adaptor.package(skill_dir, output_dir)
self.assertIn("together", pkg.name)
with zipfile.ZipFile(pkg) as zf:
meta = json.loads(zf.read("together_metadata.json"))
self.assertEqual(meta["platform"], "together")
self.assertIn("together", meta["api_base"])
if __name__ == "__main__":
unittest.main()

View File

@@ -66,17 +66,38 @@ class TestAgentPathMapping:
get_agent_path("invalid_agent")
def test_get_available_agents(self):
"""Test that all 11 agents are listed."""
"""Test that all 18 agents are listed."""
agents = get_available_agents()
assert len(agents) == 11
assert len(agents) == 18
assert "claude" in agents
assert "cursor" in agents
assert "vscode" in agents
assert "amp" in agents
assert "goose" in agents
assert "neovate" in agents
assert "roo" in agents
assert "cline" in agents
assert "aider" in agents
assert "bolt" in agents
assert "kilo" in agents
assert "continue" in agents
assert "kimi-code" in agents
assert sorted(agents) == agents # Should be sorted
def test_new_agents_project_relative(self):
"""Test that project-relative new agents resolve correctly."""
for agent in ["roo", "cline", "bolt", "kilo"]:
path = get_agent_path(agent)
assert path.is_absolute()
assert str(Path.cwd()) in str(path)
def test_new_agents_global(self):
"""Test that global new agents resolve to home directory."""
for agent in ["aider", "continue", "kimi-code"]:
path = get_agent_path(agent)
assert path.is_absolute()
assert str(path).startswith(str(Path.home()))
def test_agent_path_case_insensitive(self):
"""Test that agent names are case-insensitive."""
path_lower = get_agent_path("claude")
@@ -340,7 +361,7 @@ class TestInstallToAllAgents:
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_install_to_all_success(self):
"""Test that install_to_all_agents attempts all 11 agents."""
"""Test that install_to_all_agents attempts all 18 agents."""
with tempfile.TemporaryDirectory() as agent_tmpdir:
def mock_get_agent_path(agent_name, _project_root=None):
@@ -352,7 +373,7 @@ class TestInstallToAllAgents:
):
results = install_to_all_agents(self.skill_dir, force=True)
assert len(results) == 11
assert len(results) == 18
assert "claude" in results
assert "cursor" in results
@@ -362,7 +383,7 @@ class TestInstallToAllAgents:
results = install_to_all_agents(self.skill_dir, dry_run=True)
# All should succeed in dry-run mode
assert len(results) == 11
assert len(results) == 18
for _agent_name, (success, message) in results.items():
assert success is True
assert "DRY RUN" in message
@@ -399,7 +420,7 @@ class TestInstallToAllAgents:
results = install_to_all_agents(self.skill_dir, dry_run=True)
assert isinstance(results, dict)
assert len(results) == 11
assert len(results) == 18
for agent_name, (success, message) in results.items():
assert isinstance(success, bool)

View File

@@ -0,0 +1,280 @@
#!/usr/bin/env python3
"""
Tests for OpenCode skill splitter and converter.
"""
import tempfile
import unittest
from pathlib import Path
from skill_seekers.cli.opencode_skill_splitter import (
OpenCodeSkillConverter,
OpenCodeSkillSplitter,
)
class TestOpenCodeSkillSplitter(unittest.TestCase):
"""Test skill splitting for OpenCode"""
def _create_skill(self, temp_dir, name="test-skill", content=None, refs=None):
"""Helper to create a test skill directory."""
skill_dir = Path(temp_dir) / name
skill_dir.mkdir()
if content is None:
content = "# Test Skill\n\n## Section A\n\nContent A\n\n## Section B\n\nContent B\n\n## Section C\n\nContent C"
(skill_dir / "SKILL.md").write_text(content)
if refs:
refs_dir = skill_dir / "references"
refs_dir.mkdir()
for fname, fcontent in refs.items():
(refs_dir / fname).write_text(fcontent)
return skill_dir
def test_needs_splitting_small(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp, content="Small content")
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=50000)
self.assertFalse(splitter.needs_splitting())
def test_needs_splitting_large(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp, content="x" * 60000)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=50000)
self.assertTrue(splitter.needs_splitting())
def test_extract_sections(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp)
splitter = OpenCodeSkillSplitter(skill_dir)
content = (skill_dir / "SKILL.md").read_text()
sections = splitter._extract_sections(content)
# Should have: overview + Section A + Section B + Section C
self.assertGreaterEqual(len(sections), 3)
def test_extract_sections_strips_frontmatter(self):
with tempfile.TemporaryDirectory() as tmp:
content = "---\nname: test\n---\n\n## Section A\n\nContent A"
skill_dir = self._create_skill(tmp, content=content)
splitter = OpenCodeSkillSplitter(skill_dir)
sections = splitter._extract_sections(content)
self.assertEqual(len(sections), 1)
self.assertEqual(sections[0]["title"], "Section A")
def test_split_creates_sub_skills(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=10)
output_dir = Path(tmp) / "output"
result = splitter.split(output_dir)
# Should create router + sub-skills
self.assertGreater(len(result), 1)
# Each should have SKILL.md
for d in result:
self.assertTrue((d / "SKILL.md").exists())
def test_split_router_has_frontmatter(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=10)
output_dir = Path(tmp) / "output"
result = splitter.split(output_dir)
# Router is first
router_content = (result[0] / "SKILL.md").read_text()
self.assertTrue(router_content.startswith("---"))
self.assertIn("is-router: true", router_content)
def test_split_sub_skills_have_frontmatter(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=10)
output_dir = Path(tmp) / "output"
result = splitter.split(output_dir)
# Sub-skills (skip router at index 0)
for d in result[1:]:
content = (d / "SKILL.md").read_text()
self.assertTrue(content.startswith("---"))
self.assertIn("compatibility: opencode", content)
self.assertIn("parent-skill:", content)
def test_split_by_references(self):
with tempfile.TemporaryDirectory() as tmp:
# Skill with no H2 sections but multiple reference files
skill_dir = self._create_skill(
tmp,
content="# Simple Skill\n\nJust one paragraph.",
refs={
"getting-started.md": "# Getting Started\n\nContent here",
"api-reference.md": "# API Reference\n\nAPI docs",
"advanced-topics.md": "# Advanced Topics\n\nAdvanced content",
},
)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=10)
output_dir = Path(tmp) / "output"
result = splitter.split(output_dir)
# Should split by references: router + 3 sub-skills
self.assertEqual(len(result), 4)
def test_no_split_needed(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp, content="# Simple\n\nSmall content")
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=100000)
output_dir = Path(tmp) / "output"
result = splitter.split(output_dir)
# Should return original skill dir (no split)
self.assertEqual(len(result), 1)
def test_group_small_sections(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = self._create_skill(tmp)
splitter = OpenCodeSkillSplitter(skill_dir, max_chars=100000)
sections = [
{"title": "a", "content": "short"},
{"title": "b", "content": "also short"},
{"title": "c", "content": "x" * 50000},
]
grouped = splitter._group_small_sections(sections)
# a and b should be merged, c stays separate
self.assertEqual(len(grouped), 2)
class TestOpenCodeSkillConverter(unittest.TestCase):
"""Test bi-directional skill format converter"""
def test_import_opencode_skill(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = Path(tmp) / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test skill\nversion: 2.0.0\n---\n\n# Content\n\nHello"
)
refs = skill_dir / "references"
refs.mkdir()
(refs / "guide.md").write_text("# Guide")
data = OpenCodeSkillConverter.import_opencode_skill(skill_dir)
self.assertEqual(data["name"], "my-skill")
self.assertEqual(data["description"], "Test skill")
self.assertEqual(data["version"], "2.0.0")
self.assertIn("# Content", data["content"])
self.assertIn("guide.md", data["references"])
self.assertEqual(data["source_format"], "opencode")
def test_import_opencode_skill_no_frontmatter(self):
with tempfile.TemporaryDirectory() as tmp:
skill_dir = Path(tmp) / "plain-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("# Plain content\n\nNo frontmatter")
data = OpenCodeSkillConverter.import_opencode_skill(skill_dir)
self.assertEqual(data["name"], "plain-skill")
self.assertIn("Plain content", data["content"])
def test_import_missing_skill(self):
with self.assertRaises(FileNotFoundError):
OpenCodeSkillConverter.import_opencode_skill("/nonexistent/path")
def test_export_to_claude(self):
with tempfile.TemporaryDirectory() as tmp:
# Create source skill
source = Path(tmp) / "source"
source.mkdir()
(source / "SKILL.md").write_text("---\nname: test\ndescription: Test\n---\n\n# Content")
# Import and export
data = OpenCodeSkillConverter.import_opencode_skill(source)
output = Path(tmp) / "output"
result = OpenCodeSkillConverter.export_to_target(data, "claude", output)
self.assertTrue(result.exists())
self.assertTrue((result / "SKILL.md").exists())
def test_export_to_markdown(self):
with tempfile.TemporaryDirectory() as tmp:
source = Path(tmp) / "source"
source.mkdir()
(source / "SKILL.md").write_text("# Simple content")
data = OpenCodeSkillConverter.import_opencode_skill(source)
output = Path(tmp) / "output"
result = OpenCodeSkillConverter.export_to_target(data, "markdown", output)
self.assertTrue(result.exists())
self.assertTrue((result / "SKILL.md").exists())
def test_roundtrip_opencode(self):
"""Test import from OpenCode -> export to OpenCode preserves content."""
with tempfile.TemporaryDirectory() as tmp:
# Create original
original = Path(tmp) / "original"
original.mkdir()
original_content = "---\nname: roundtrip-test\ndescription: Roundtrip test\n---\n\n# Roundtrip Content\n\nImportant data here."
(original / "SKILL.md").write_text(original_content)
refs = original / "references"
refs.mkdir()
(refs / "ref.md").write_text("# Reference")
# Import
data = OpenCodeSkillConverter.import_opencode_skill(original)
# Export to opencode
output = Path(tmp) / "output"
result = OpenCodeSkillConverter.export_to_target(data, "opencode", output)
# Verify
exported = (result / "SKILL.md").read_text()
self.assertIn("roundtrip-test", exported)
self.assertIn("compatibility: opencode", exported)
class TestGitHubActionsTemplate(unittest.TestCase):
"""Test that GitHub Actions template exists and is valid YAML."""
def test_template_exists(self):
template = (
Path(__file__).parent.parent / "templates" / "github-actions" / "update-skills.yml"
)
self.assertTrue(template.exists(), f"Template not found at {template}")
def test_template_has_required_keys(self):
template = (
Path(__file__).parent.parent / "templates" / "github-actions" / "update-skills.yml"
)
content = template.read_text()
self.assertIn("name:", content)
self.assertIn("on:", content)
self.assertIn("jobs:", content)
self.assertIn("skill-seekers", content)
self.assertIn("schedule:", content)
self.assertIn("workflow_dispatch:", content)
def test_template_lists_all_targets(self):
template = (
Path(__file__).parent.parent / "templates" / "github-actions" / "update-skills.yml"
)
content = template.read_text()
for target in ["claude", "opencode", "gemini", "openai", "kimi", "deepseek", "qwen"]:
self.assertIn(target, content, f"Target '{target}' not found in template")
if __name__ == "__main__":
unittest.main()