fix: Add tests for 6 RAG adaptors and CLI integration for 4 features
Critical Fixes (P0): - Add 66 new tests for langchain, llama_index, weaviate, chroma, faiss, qdrant adaptors - Add CLI integration for streaming_ingest, incremental_updater, multilang_support, quality_metrics - Add 'haystack' to package target choices - Add 4 entry points to pyproject.toml Test Coverage: - Before: 108 tests, 14% adaptor coverage (1/7 tested) - After: 174 tests, 100% adaptor coverage (7/7 tested) - All 159 adaptor tests passing (11 tests per adaptor) CLI Integration: - skill-seekers stream - Stream large files chunk-by-chunk - skill-seekers update - Incremental documentation updates - skill-seekers multilang - Multi-language documentation support - skill-seekers quality - Quality scoring for SKILL.md - skill-seekers package --target haystack - Now selectable Fixes QA Issues: - Honors 'never skip tests' requirement (100% adaptor coverage) - All features now accessible via CLI - No more dead code - all 4 features usable Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
197
tests/test_adaptors/test_chroma_adaptor.py
Normal file
197
tests/test_adaptors/test_chroma_adaptor.py
Normal file
@@ -0,0 +1,197 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for Chroma Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestChromaAdaptor:
|
||||
"""Test suite for ChromaAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that Chroma adaptor is registered."""
|
||||
adaptor = get_adaptor("chroma")
|
||||
assert adaptor.PLATFORM == "chroma"
|
||||
assert adaptor.PLATFORM_NAME == "Chroma (Vector Database)"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as Chroma collection data."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for Chroma format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as Chroma collection
|
||||
adaptor = get_adaptor("chroma")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
collection_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
collection = json.loads(collection_json)
|
||||
|
||||
assert "documents" in collection
|
||||
assert "metadatas" in collection
|
||||
assert "ids" in collection
|
||||
|
||||
assert len(collection["documents"]) == 3 # SKILL.md + 2 references
|
||||
assert len(collection["metadatas"]) == 3
|
||||
assert len(collection["ids"]) == 3
|
||||
|
||||
# Check metadata structure
|
||||
for meta in collection["metadatas"]:
|
||||
assert meta["source"] == "test_skill"
|
||||
assert meta["version"] == "1.0.0"
|
||||
assert "category" in meta
|
||||
assert "file" in meta
|
||||
assert "type" in meta
|
||||
|
||||
# Check categories
|
||||
categories = {meta["category"] for meta in collection["metadatas"]}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("chroma")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "chroma" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
collection = json.load(f)
|
||||
|
||||
assert "documents" in collection
|
||||
assert "metadatas" in collection
|
||||
assert "ids" in collection
|
||||
assert len(collection["documents"]) > 0
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("chroma")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-chroma.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "chroma" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-chroma.json"
|
||||
package_path.write_text('{"documents": [], "metadatas": [], "ids": []}')
|
||||
|
||||
adaptor = get_adaptor("chroma")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "import chromadb" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("chroma")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is empty (no API needed)."""
|
||||
adaptor = get_adaptor("chroma")
|
||||
assert adaptor.get_env_var_name() == ""
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("chroma")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("chroma")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("chroma")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
collection_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
collection = json.loads(collection_json)
|
||||
|
||||
# Should return empty arrays
|
||||
assert collection["documents"] == []
|
||||
assert collection["metadatas"] == []
|
||||
assert collection["ids"] == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("chroma")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
collection_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
collection = json.loads(collection_json)
|
||||
|
||||
assert len(collection["documents"]) == 1
|
||||
assert collection["metadatas"][0]["category"] == "test"
|
||||
assert collection["metadatas"][0]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
198
tests/test_adaptors/test_faiss_adaptor.py
Normal file
198
tests/test_adaptors/test_faiss_adaptor.py
Normal file
@@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for FAISS Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestFAISSAdaptor:
|
||||
"""Test suite for FAISSAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that FAISS adaptor is registered."""
|
||||
adaptor = get_adaptor("faiss")
|
||||
assert adaptor.PLATFORM == "faiss"
|
||||
assert adaptor.PLATFORM_NAME == "FAISS (Similarity Search)"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as FAISS index data."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for FAISS format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as FAISS index data
|
||||
adaptor = get_adaptor("faiss")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
index_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
index_data = json.loads(index_json)
|
||||
|
||||
assert "documents" in index_data
|
||||
assert "metadatas" in index_data
|
||||
assert "ids" in index_data
|
||||
assert "config" in index_data
|
||||
|
||||
assert len(index_data["documents"]) == 3 # SKILL.md + 2 references
|
||||
assert len(index_data["metadatas"]) == 3
|
||||
assert len(index_data["ids"]) == 3
|
||||
|
||||
# Check metadata structure
|
||||
for meta in index_data["metadatas"]:
|
||||
assert meta["source"] == "test_skill"
|
||||
assert meta["version"] == "1.0.0"
|
||||
assert "category" in meta
|
||||
assert "file" in meta
|
||||
assert "type" in meta
|
||||
|
||||
# Check categories
|
||||
categories = {meta["category"] for meta in index_data["metadatas"]}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("faiss")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "faiss" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
index_data = json.load(f)
|
||||
|
||||
assert "documents" in index_data
|
||||
assert "metadatas" in index_data
|
||||
assert "ids" in index_data
|
||||
assert len(index_data["documents"]) > 0
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("faiss")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-faiss.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "faiss" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-faiss.json"
|
||||
package_path.write_text('{"texts": [], "metadatas": []}')
|
||||
|
||||
adaptor = get_adaptor("faiss")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "import faiss" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("faiss")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is empty (no API needed)."""
|
||||
adaptor = get_adaptor("faiss")
|
||||
assert adaptor.get_env_var_name() == ""
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("faiss")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("faiss")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("faiss")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
index_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
index_data = json.loads(index_json)
|
||||
|
||||
# Should return empty arrays
|
||||
assert index_data["documents"] == []
|
||||
assert index_data["metadatas"] == []
|
||||
assert index_data["ids"] == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("faiss")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
index_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
index_data = json.loads(index_json)
|
||||
|
||||
assert len(index_data["documents"]) == 1
|
||||
assert index_data["metadatas"][0]["category"] == "test"
|
||||
assert index_data["metadatas"][0]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
191
tests/test_adaptors/test_langchain_adaptor.py
Normal file
191
tests/test_adaptors/test_langchain_adaptor.py
Normal file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for LangChain Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestLangChainAdaptor:
|
||||
"""Test suite for LangChainAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that LangChain adaptor is registered."""
|
||||
adaptor = get_adaptor("langchain")
|
||||
assert adaptor.PLATFORM == "langchain"
|
||||
assert adaptor.PLATFORM_NAME == "LangChain (RAG Framework)"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as LangChain Documents."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for LangChain format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as LangChain Documents
|
||||
adaptor = get_adaptor("langchain")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
assert len(documents) == 3 # SKILL.md + 2 references
|
||||
|
||||
# Check document structure
|
||||
for doc in documents:
|
||||
assert "page_content" in doc
|
||||
assert "metadata" in doc
|
||||
assert doc["metadata"]["source"] == "test_skill"
|
||||
assert doc["metadata"]["version"] == "1.0.0"
|
||||
assert "category" in doc["metadata"]
|
||||
assert "file" in doc["metadata"]
|
||||
assert "type" in doc["metadata"]
|
||||
|
||||
# Check categories
|
||||
categories = {doc["metadata"]["category"] for doc in documents}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("langchain")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "langchain" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
documents = json.load(f)
|
||||
|
||||
assert isinstance(documents, list)
|
||||
assert len(documents) > 0
|
||||
assert "page_content" in documents[0]
|
||||
assert "metadata" in documents[0]
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("langchain")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-langchain.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "langchain" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-langchain.json"
|
||||
package_path.write_text('[]')
|
||||
|
||||
adaptor = get_adaptor("langchain")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "from langchain" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("langchain")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is empty (no API needed)."""
|
||||
adaptor = get_adaptor("langchain")
|
||||
assert adaptor.get_env_var_name() == ""
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("langchain")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("langchain")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("langchain")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
# Should return empty list
|
||||
assert documents == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("langchain")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
assert len(documents) == 1
|
||||
assert documents[0]["metadata"]["category"] == "test"
|
||||
assert documents[0]["metadata"]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
191
tests/test_adaptors/test_llama_index_adaptor.py
Normal file
191
tests/test_adaptors/test_llama_index_adaptor.py
Normal file
@@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for LlamaIndex Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestLlamaIndexAdaptor:
|
||||
"""Test suite for LlamaIndexAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that LlamaIndex adaptor is registered."""
|
||||
adaptor = get_adaptor("llama-index")
|
||||
assert adaptor.PLATFORM == "llama-index"
|
||||
assert adaptor.PLATFORM_NAME == "LlamaIndex (RAG Framework)"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as LlamaIndex Documents."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for LlamaIndex format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as LlamaIndex Documents
|
||||
adaptor = get_adaptor("llama-index")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
assert len(documents) == 3 # SKILL.md + 2 references
|
||||
|
||||
# Check document structure
|
||||
for doc in documents:
|
||||
assert "text" in doc
|
||||
assert "metadata" in doc
|
||||
assert doc["metadata"]["source"] == "test_skill"
|
||||
assert doc["metadata"]["version"] == "1.0.0"
|
||||
assert "category" in doc["metadata"]
|
||||
assert "file" in doc["metadata"]
|
||||
assert "type" in doc["metadata"]
|
||||
|
||||
# Check categories
|
||||
categories = {doc["metadata"]["category"] for doc in documents}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("llama-index")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "llama" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
documents = json.load(f)
|
||||
|
||||
assert isinstance(documents, list)
|
||||
assert len(documents) > 0
|
||||
assert "text" in documents[0]
|
||||
assert "metadata" in documents[0]
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("llama-index")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-llama-index.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "llama" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-llama-index.json"
|
||||
package_path.write_text('[]')
|
||||
|
||||
adaptor = get_adaptor("llama-index")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "from llama_index" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("llama-index")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is empty (no API needed)."""
|
||||
adaptor = get_adaptor("llama-index")
|
||||
assert adaptor.get_env_var_name() == ""
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("llama-index")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("llama-index")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("llama-index")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
# Should return empty list
|
||||
assert documents == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("llama-index")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
documents_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
documents = json.loads(documents_json)
|
||||
|
||||
assert len(documents) == 1
|
||||
assert documents[0]["metadata"]["category"] == "test"
|
||||
assert documents[0]["metadata"]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
199
tests/test_adaptors/test_qdrant_adaptor.py
Normal file
199
tests/test_adaptors/test_qdrant_adaptor.py
Normal file
@@ -0,0 +1,199 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for Qdrant Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestQdrantAdaptor:
|
||||
"""Test suite for QdrantAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that Qdrant adaptor is registered."""
|
||||
adaptor = get_adaptor("qdrant")
|
||||
assert adaptor.PLATFORM == "qdrant"
|
||||
assert adaptor.PLATFORM_NAME == "Qdrant Vector Database"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as Qdrant points."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for Qdrant format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as Qdrant points
|
||||
adaptor = get_adaptor("qdrant")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
points_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
result = json.loads(points_json)
|
||||
|
||||
assert "collection_name" in result
|
||||
assert "points" in result
|
||||
assert "config" in result
|
||||
assert len(result["points"]) == 3 # SKILL.md + 2 references
|
||||
|
||||
# Check point structure
|
||||
for point in result["points"]:
|
||||
assert "id" in point
|
||||
assert "vector" in point # Will be None - user needs to add embeddings
|
||||
assert "payload" in point
|
||||
payload = point["payload"]
|
||||
assert "content" in payload
|
||||
assert payload["source"] == "test_skill"
|
||||
assert payload["version"] == "1.0.0"
|
||||
assert "category" in payload
|
||||
assert "file" in payload
|
||||
assert "type" in payload
|
||||
|
||||
# Check categories
|
||||
categories = {point["payload"]["category"] for point in result["points"]}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("qdrant")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "qdrant" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
result = json.load(f)
|
||||
|
||||
assert isinstance(result, dict)
|
||||
assert "points" in result
|
||||
assert len(result["points"]) > 0
|
||||
assert "id" in result["points"][0]
|
||||
assert "payload" in result["points"][0]
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("qdrant")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-qdrant.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "qdrant" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-qdrant.json"
|
||||
package_path.write_text('[]')
|
||||
|
||||
adaptor = get_adaptor("qdrant")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "from qdrant_client" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("qdrant")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is QDRANT_API_KEY (optional for Qdrant Cloud)."""
|
||||
adaptor = get_adaptor("qdrant")
|
||||
assert adaptor.get_env_var_name() == "QDRANT_API_KEY"
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("qdrant")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("qdrant")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("qdrant")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
points_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
result = json.loads(points_json)
|
||||
|
||||
# Should return structure with empty points array
|
||||
assert "points" in result
|
||||
assert result["points"] == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("qdrant")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
points_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
result = json.loads(points_json)
|
||||
|
||||
assert len(result["points"]) == 1
|
||||
assert result["points"][0]["payload"]["category"] == "test"
|
||||
assert result["points"][0]["payload"]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
199
tests/test_adaptors/test_weaviate_adaptor.py
Normal file
199
tests/test_adaptors/test_weaviate_adaptor.py
Normal file
@@ -0,0 +1,199 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for Weaviate Adaptor
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from skill_seekers.cli.adaptors import get_adaptor
|
||||
from skill_seekers.cli.adaptors.base import SkillMetadata
|
||||
|
||||
|
||||
class TestWeaviateAdaptor:
|
||||
"""Test suite for WeaviateAdaptor class."""
|
||||
|
||||
def test_adaptor_registration(self):
|
||||
"""Test that Weaviate adaptor is registered."""
|
||||
adaptor = get_adaptor("weaviate")
|
||||
assert adaptor.PLATFORM == "weaviate"
|
||||
assert adaptor.PLATFORM_NAME == "Weaviate (Vector Database)"
|
||||
|
||||
def test_format_skill_md(self, tmp_path):
|
||||
"""Test formatting SKILL.md as Weaviate objects."""
|
||||
# Create test skill directory
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
# Create SKILL.md
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(
|
||||
"# Test Skill\n\nThis is a test skill for Weaviate format."
|
||||
)
|
||||
|
||||
# Create references directory with files
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nQuick start.")
|
||||
(refs_dir / "api.md").write_text("# API Reference\n\nAPI docs.")
|
||||
|
||||
# Format as Weaviate objects
|
||||
adaptor = get_adaptor("weaviate")
|
||||
metadata = SkillMetadata(
|
||||
name="test_skill", description="Test skill", version="1.0.0"
|
||||
)
|
||||
|
||||
objects_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
|
||||
# Parse and validate
|
||||
result = json.loads(objects_json)
|
||||
|
||||
assert "schema" in result
|
||||
assert "objects" in result
|
||||
assert "class_name" in result
|
||||
assert len(result["objects"]) == 3 # SKILL.md + 2 references
|
||||
|
||||
# Check object structure
|
||||
for obj in result["objects"]:
|
||||
assert "id" in obj
|
||||
assert "properties" in obj
|
||||
props = obj["properties"]
|
||||
assert "content" in props
|
||||
assert "source" in props
|
||||
assert props["source"] == "test_skill"
|
||||
assert props["version"] == "1.0.0"
|
||||
assert "category" in props
|
||||
assert "file" in props
|
||||
assert "type" in props
|
||||
|
||||
# Check categories
|
||||
categories = {obj["properties"]["category"] for obj in result["objects"]}
|
||||
assert "overview" in categories # From SKILL.md
|
||||
assert "getting started" in categories or "api" in categories # From references
|
||||
|
||||
def test_package_creates_json(self, tmp_path):
|
||||
"""Test packaging skill into JSON file."""
|
||||
# Create test skill
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
# Package
|
||||
adaptor = get_adaptor("weaviate")
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
|
||||
# Verify output
|
||||
assert output_path.exists()
|
||||
assert output_path.suffix == ".json"
|
||||
assert "weaviate" in output_path.name
|
||||
|
||||
# Verify content
|
||||
with open(output_path) as f:
|
||||
result = json.load(f)
|
||||
|
||||
assert isinstance(result, dict)
|
||||
assert "objects" in result
|
||||
assert len(result["objects"]) > 0
|
||||
assert "id" in result["objects"][0]
|
||||
assert "properties" in result["objects"][0]
|
||||
|
||||
def test_package_output_filename(self, tmp_path):
|
||||
"""Test package output filename generation."""
|
||||
skill_dir = tmp_path / "react"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text("# React\n\nReact docs.")
|
||||
|
||||
adaptor = get_adaptor("weaviate")
|
||||
|
||||
# Test directory output
|
||||
output_path = adaptor.package(skill_dir, tmp_path)
|
||||
assert output_path.name == "react-weaviate.json"
|
||||
|
||||
# Test with .zip extension (should replace)
|
||||
output_path = adaptor.package(skill_dir, tmp_path / "test.zip")
|
||||
assert output_path.suffix == ".json"
|
||||
assert "weaviate" in output_path.name
|
||||
|
||||
def test_upload_returns_message(self, tmp_path):
|
||||
"""Test upload returns instructions (no actual upload)."""
|
||||
# Create test package
|
||||
package_path = tmp_path / "test-weaviate.json"
|
||||
package_path.write_text('[]')
|
||||
|
||||
adaptor = get_adaptor("weaviate")
|
||||
result = adaptor.upload(package_path, "fake-key")
|
||||
|
||||
assert result["success"] is False # No upload capability
|
||||
assert result["skill_id"] is None
|
||||
assert "message" in result
|
||||
assert "import weaviate" in result["message"]
|
||||
|
||||
def test_validate_api_key_returns_false(self):
|
||||
"""Test that API key validation returns False (no API needed)."""
|
||||
adaptor = get_adaptor("weaviate")
|
||||
assert adaptor.validate_api_key("any-key") is False
|
||||
|
||||
def test_get_env_var_name_returns_empty(self):
|
||||
"""Test that env var name is empty (no API needed)."""
|
||||
adaptor = get_adaptor("weaviate")
|
||||
assert adaptor.get_env_var_name() == ""
|
||||
|
||||
def test_supports_enhancement_returns_false(self):
|
||||
"""Test that enhancement is not supported."""
|
||||
adaptor = get_adaptor("weaviate")
|
||||
assert adaptor.supports_enhancement() is False
|
||||
|
||||
def test_enhance_returns_false(self, tmp_path):
|
||||
"""Test that enhance returns False."""
|
||||
skill_dir = tmp_path / "test_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("weaviate")
|
||||
result = adaptor.enhance(skill_dir, "fake-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_empty_skill_directory(self, tmp_path):
|
||||
"""Test handling of empty skill directory."""
|
||||
skill_dir = tmp_path / "empty_skill"
|
||||
skill_dir.mkdir()
|
||||
|
||||
adaptor = get_adaptor("weaviate")
|
||||
metadata = SkillMetadata(
|
||||
name="empty_skill", description="Empty", version="1.0.0"
|
||||
)
|
||||
|
||||
objects_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
result = json.loads(objects_json)
|
||||
|
||||
# Should return structure with empty objects array
|
||||
assert "objects" in result
|
||||
assert result["objects"] == []
|
||||
|
||||
def test_references_only(self, tmp_path):
|
||||
"""Test skill with references but no SKILL.md."""
|
||||
skill_dir = tmp_path / "refs_only"
|
||||
skill_dir.mkdir()
|
||||
|
||||
refs_dir = skill_dir / "references"
|
||||
refs_dir.mkdir()
|
||||
(refs_dir / "test.md").write_text("# Test\n\nTest content.")
|
||||
|
||||
adaptor = get_adaptor("weaviate")
|
||||
metadata = SkillMetadata(
|
||||
name="refs_only", description="Refs only", version="1.0.0"
|
||||
)
|
||||
|
||||
objects_json = adaptor.format_skill_md(skill_dir, metadata)
|
||||
result = json.loads(objects_json)
|
||||
|
||||
assert len(result["objects"]) == 1
|
||||
assert result["objects"][0]["properties"]["category"] == "test"
|
||||
assert result["objects"][0]["properties"]["type"] == "reference"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user