change max lenght

This commit is contained in:
Pablo Estevez
2026-01-17 17:48:15 +00:00
parent 97e597d9db
commit c33c6f9073
118 changed files with 3546 additions and 960 deletions

View File

@@ -282,7 +282,12 @@ Pass data to components:
def test_e2e_package_format_validation(self):
"""Test that each platform creates correct package format"""
test_cases = [("claude", ".zip"), ("gemini", ".tar.gz"), ("openai", ".zip"), ("markdown", ".zip")]
test_cases = [
("claude", ".zip"),
("gemini", ".tar.gz"),
("openai", ".zip"),
("markdown", ".zip"),
]
for platform, expected_ext in test_cases:
adaptor = get_adaptor(platform)
@@ -290,9 +295,13 @@ Pass data to components:
# Verify extension
if expected_ext == ".tar.gz":
self.assertTrue(str(package_path).endswith(".tar.gz"), f"{platform} should create .tar.gz file")
self.assertTrue(
str(package_path).endswith(".tar.gz"), f"{platform} should create .tar.gz file"
)
else:
self.assertTrue(str(package_path).endswith(".zip"), f"{platform} should create .zip file")
self.assertTrue(
str(package_path).endswith(".zip"), f"{platform} should create .zip file"
)
def test_e2e_package_filename_convention(self):
"""Test that package filenames follow convention"""
@@ -308,7 +317,9 @@ Pass data to components:
package_path = adaptor.package(self.skill_dir, self.output_dir)
# Verify filename
self.assertEqual(package_path.name, expected_name, f"{platform} package filename incorrect")
self.assertEqual(
package_path.name, expected_name, f"{platform} package filename incorrect"
)
def test_e2e_all_platforms_preserve_references(self):
"""Test that all platforms preserve reference files"""
@@ -324,7 +335,8 @@ Pass data to components:
names = tar.getnames()
for ref_file in ref_files:
self.assertTrue(
any(ref_file in name for name in names), f"{platform}: {ref_file} not found in package"
any(ref_file in name for name in names),
f"{platform}: {ref_file} not found in package",
)
else:
with zipfile.ZipFile(package_path, "r") as zf:
@@ -338,7 +350,8 @@ Pass data to components:
)
else:
self.assertTrue(
any(ref_file in name for name in names), f"{platform}: {ref_file} not found in package"
any(ref_file in name for name in names),
f"{platform}: {ref_file} not found in package",
)
def test_e2e_metadata_consistency(self):
@@ -357,7 +370,9 @@ Pass data to components:
metadata = json.loads(metadata_file.read().decode("utf-8"))
else:
with zipfile.ZipFile(package_path, "r") as zf:
metadata_filename = f"{platform}_metadata.json" if platform == "openai" else "metadata.json"
metadata_filename = (
f"{platform}_metadata.json" if platform == "openai" else "metadata.json"
)
metadata_content = zf.read(metadata_filename).decode("utf-8")
metadata = json.loads(metadata_content)
@@ -467,7 +482,9 @@ class TestAdaptorsWorkflowIntegration(unittest.TestCase):
# Should respect custom path
self.assertTrue(package_path.exists())
self.assertTrue("my-package" in package_path.name or package_path.parent.name == "custom")
self.assertTrue(
"my-package" in package_path.name or package_path.parent.name == "custom"
)
def test_workflow_api_key_validation(self):
"""Test API key validation for each platform"""
@@ -485,7 +502,9 @@ class TestAdaptorsWorkflowIntegration(unittest.TestCase):
for platform, api_key, expected in test_cases:
adaptor = get_adaptor(platform)
result = adaptor.validate_api_key(api_key)
self.assertEqual(result, expected, f"{platform}: validate_api_key('{api_key}') should be {expected}")
self.assertEqual(
result, expected, f"{platform}: validate_api_key('{api_key}') should be {expected}"
)
class TestAdaptorsErrorHandling(unittest.TestCase):

View File

@@ -58,7 +58,9 @@ class TestClaudeAdaptor(unittest.TestCase):
(skill_dir / "references").mkdir()
(skill_dir / "references" / "test.md").write_text("# Test content")
metadata = SkillMetadata(name="test-skill", description="Test skill description", version="1.0.0")
metadata = SkillMetadata(
name="test-skill", description="Test skill description", version="1.0.0"
)
formatted = self.adaptor.format_skill_md(skill_dir, metadata)
@@ -221,7 +223,9 @@ This is existing skill content that should be preserved.
self.assertTrue(package_path.exists())
# Should respect custom naming if provided
self.assertTrue("my-package" in package_path.name or package_path.parent.name == "custom")
self.assertTrue(
"my-package" in package_path.name or package_path.parent.name == "custom"
)
def test_package_to_directory(self):
"""Test packaging to directory (should auto-name)"""

View File

@@ -95,7 +95,9 @@ class TestAPIReferenceBuilder(unittest.TestCase):
"functions": [
{
"name": "calculate_sum",
"parameters": [{"name": "numbers", "type_hint": "list", "default": None}],
"parameters": [
{"name": "numbers", "type_hint": "list", "default": None}
],
"return_type": "int",
"docstring": "Calculate sum of numbers.",
"is_async": False,
@@ -166,7 +168,14 @@ class TestAPIReferenceBuilder(unittest.TestCase):
{
"file": "module.py",
"language": "Python",
"classes": [{"name": "TestClass", "docstring": "Test class.", "base_classes": [], "methods": []}],
"classes": [
{
"name": "TestClass",
"docstring": "Test class.",
"base_classes": [],
"methods": [],
}
],
"functions": [
{
"name": "test_func",

View File

@@ -192,9 +192,15 @@ How to use async tools.
with (
patch.object(GitHubThreeStreamFetcher, "clone_repo", return_value=mock_github_repo),
patch.object(
GitHubThreeStreamFetcher, "fetch_github_metadata", return_value=mock_github_api_data["metadata"]
GitHubThreeStreamFetcher,
"fetch_github_metadata",
return_value=mock_github_api_data["metadata"],
),
patch.object(
GitHubThreeStreamFetcher,
"fetch_issues",
return_value=mock_github_api_data["issues"],
),
patch.object(GitHubThreeStreamFetcher, "fetch_issues", return_value=mock_github_api_data["issues"]),
):
fetcher = GitHubThreeStreamFetcher("https://github.com/jlowin/fastmcp")
three_streams = fetcher.fetch()
@@ -227,10 +233,18 @@ How to use async tools.
with (
patch.object(GitHubThreeStreamFetcher, "clone_repo", return_value=mock_github_repo),
patch.object(
GitHubThreeStreamFetcher, "fetch_github_metadata", return_value=mock_github_api_data["metadata"]
GitHubThreeStreamFetcher,
"fetch_github_metadata",
return_value=mock_github_api_data["metadata"],
),
patch.object(GitHubThreeStreamFetcher, "fetch_issues", return_value=mock_github_api_data["issues"]),
patch("skill_seekers.cli.unified_codebase_analyzer.UnifiedCodebaseAnalyzer.c3x_analysis") as mock_c3x,
patch.object(
GitHubThreeStreamFetcher,
"fetch_issues",
return_value=mock_github_api_data["issues"],
),
patch(
"skill_seekers.cli.unified_codebase_analyzer.UnifiedCodebaseAnalyzer.c3x_analysis"
) as mock_c3x,
):
# Mock C3.x analysis to return sample data
mock_c3x.return_value = {
@@ -247,7 +261,9 @@ How to use async tools.
"c3_2_examples_count": 2,
"c3_3_guides": [{"title": "OAuth Setup Guide", "file": "docs/oauth.md"}],
"c3_4_configs": [],
"c3_7_architecture": [{"pattern": "Service Layer", "description": "OAuth provider abstraction"}],
"c3_7_architecture": [
{"pattern": "Service Layer", "description": "OAuth provider abstraction"}
],
}
analyzer = UnifiedCodebaseAnalyzer()
@@ -316,7 +332,13 @@ How to use async tools.
"description": "Python framework for MCP servers",
},
common_problems=[
{"number": 42, "title": "OAuth setup fails", "labels": ["oauth"], "comments": 15, "state": "open"},
{
"number": 42,
"title": "OAuth setup fails",
"labels": ["oauth"],
"comments": 15,
"state": "open",
},
{
"number": 38,
"title": "Async tools not working",
@@ -344,7 +366,9 @@ How to use async tools.
# Generate router
generator = RouterGenerator(
config_paths=[str(config1), str(config2)], router_name="fastmcp", github_streams=mock_streams
config_paths=[str(config1), str(config2)],
router_name="fastmcp",
github_streams=mock_streams,
)
skill_md = generator.generate_skill_md()
@@ -536,15 +560,21 @@ class TestScenario2MultiSource:
source1_data = {"api": [{"name": "GoogleProvider", "params": ["app_id", "app_secret"]}]}
# Mock source 2 (GitHub C3.x)
source2_data = {"api": [{"name": "GoogleProvider", "params": ["client_id", "client_secret"]}]}
source2_data = {
"api": [{"name": "GoogleProvider", "params": ["client_id", "client_secret"]}]
}
# Mock GitHub streams
github_streams = ThreeStreamData(
code_stream=CodeStream(directory=Path("/tmp"), files=[]),
docs_stream=DocsStream(readme="Use client_id and client_secret", contributing=None, docs_files=[]),
docs_stream=DocsStream(
readme="Use client_id and client_secret", contributing=None, docs_files=[]
),
insights_stream=InsightsStream(
metadata={"stars": 1000},
common_problems=[{"number": 42, "title": "OAuth parameter confusion", "labels": ["oauth"]}],
common_problems=[
{"number": 42, "title": "OAuth parameter confusion", "labels": ["oauth"]}
],
known_solutions=[],
top_labels=[],
),
@@ -633,7 +663,9 @@ def test_connection():
"""Test basic analysis of local codebase."""
analyzer = UnifiedCodebaseAnalyzer()
result = analyzer.analyze(source=str(local_codebase), depth="basic", fetch_github_metadata=False)
result = analyzer.analyze(
source=str(local_codebase), depth="basic", fetch_github_metadata=False
)
# Verify result
assert isinstance(result, AnalysisResult)
@@ -653,7 +685,9 @@ def test_connection():
"""Test C3.x analysis of local codebase."""
analyzer = UnifiedCodebaseAnalyzer()
with patch("skill_seekers.cli.unified_codebase_analyzer.UnifiedCodebaseAnalyzer.c3x_analysis") as mock_c3x:
with patch(
"skill_seekers.cli.unified_codebase_analyzer.UnifiedCodebaseAnalyzer.c3x_analysis"
) as mock_c3x:
# Mock C3.x to return sample data
mock_c3x.return_value = {
"files": ["database.py", "api.py"],
@@ -666,7 +700,9 @@ def test_connection():
"c3_7_architecture": [],
}
result = analyzer.analyze(source=str(local_codebase), depth="c3x", fetch_github_metadata=False)
result = analyzer.analyze(
source=str(local_codebase), depth="c3x", fetch_github_metadata=False
)
# Verify result
assert result.source_type == "local"
@@ -814,7 +850,12 @@ Based on analysis of GitHub issues:
github_overhead += 1
continue
if in_repo_info:
if line.startswith("**") or "github.com" in line or "" in line or "FastMCP is" in line:
if (
line.startswith("**")
or "github.com" in line
or "" in line
or "FastMCP is" in line
):
github_overhead += 1
if line.startswith("##"):
in_repo_info = False
@@ -894,7 +935,9 @@ provider = GitHubProvider(client_id="...", client_secret="...")
# Check minimum 3 code examples
code_blocks = sub_skill_md.count("```")
assert code_blocks >= 6, f"Need at least 3 code examples (6 markers), found {code_blocks // 2}"
assert code_blocks >= 6, (
f"Need at least 3 code examples (6 markers), found {code_blocks // 2}"
)
# Check language tags
assert "```python" in sub_skill_md, "Code blocks must have language tags"
@@ -909,7 +952,9 @@ provider = GitHubProvider(client_id="...", client_secret="...")
# Check solution indicators for closed issues
if "closed" in sub_skill_md.lower():
assert "" in sub_skill_md or "Solution" in sub_skill_md, "Closed issues should indicate solution found"
assert "" in sub_skill_md or "Solution" in sub_skill_md, (
"Closed issues should indicate solution found"
)
class TestTokenEfficiencyCalculation:
@@ -946,7 +991,9 @@ class TestTokenEfficiencyCalculation:
# With selective loading and caching, achieve 35-40%
# Even conservative estimate shows 29.5%, actual usage patterns show 35-40%
assert reduction_percent >= 29, f"Token reduction {reduction_percent:.1f}% below 29% (conservative target)"
assert reduction_percent >= 29, (
f"Token reduction {reduction_percent:.1f}% below 29% (conservative target)"
)
if __name__ == "__main__":

View File

@@ -92,7 +92,11 @@ class TestAsyncScrapeMethods(unittest.TestCase):
def test_scrape_page_async_exists(self):
"""Test scrape_page_async method exists"""
config = {"name": "test", "base_url": "https://example.com/", "selectors": {"main_content": "article"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
}
with tempfile.TemporaryDirectory() as tmpdir:
try:
@@ -105,7 +109,11 @@ class TestAsyncScrapeMethods(unittest.TestCase):
def test_scrape_all_async_exists(self):
"""Test scrape_all_async method exists"""
config = {"name": "test", "base_url": "https://example.com/", "selectors": {"main_content": "article"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
}
with tempfile.TemporaryDirectory() as tmpdir:
try:
@@ -144,7 +152,9 @@ class TestAsyncRouting(unittest.TestCase):
converter = DocToSkillConverter(config, dry_run=True)
# Mock scrape_all_async to verify it gets called
with patch.object(converter, "scrape_all_async", new_callable=AsyncMock) as mock_async:
with patch.object(
converter, "scrape_all_async", new_callable=AsyncMock
) as mock_async:
converter.scrape_all()
# Verify async version was called
mock_async.assert_called_once()
@@ -167,7 +177,9 @@ class TestAsyncRouting(unittest.TestCase):
converter = DocToSkillConverter(config, dry_run=True)
# Mock scrape_all_async to verify it does NOT get called
with patch.object(converter, "scrape_all_async", new_callable=AsyncMock) as mock_async:
with patch.object(
converter, "scrape_all_async", new_callable=AsyncMock
) as mock_async:
with patch.object(converter, "_try_llms_txt", return_value=False):
converter.scrape_all()
# Verify async version was NOT called
@@ -249,7 +261,9 @@ class TestAsyncErrorHandling(unittest.TestCase):
# Mock client.get to raise exception
with patch.object(client, "get", side_effect=httpx.HTTPError("Test error")):
# Should not raise exception, just log error
await converter.scrape_page_async("https://example.com/test", semaphore, client)
await converter.scrape_page_async(
"https://example.com/test", semaphore, client
)
# Run async test
asyncio.run(run_test())

View File

@@ -38,18 +38,16 @@ def project_root():
@pytest.fixture
def run_bootstrap(project_root):
"""Execute bootstrap script and return result"""
def _run(timeout=600):
script = project_root / "scripts" / "bootstrap_skill.sh"
result = subprocess.run(
["bash", str(script)],
cwd=project_root,
capture_output=True,
text=True,
timeout=timeout
["bash", str(script)], cwd=project_root, capture_output=True, text=True, timeout=timeout
)
return result
return _run
@@ -95,7 +93,7 @@ class TestBootstrapSkillE2E:
assert content.startswith("---"), "Missing frontmatter start"
# Find closing delimiter
lines = content.split('\n')
lines = content.split("\n")
closing_found = False
for i, line in enumerate(lines[1:], 1):
if line.strip() == "---":
@@ -129,11 +127,7 @@ class TestBootstrapSkillE2E:
# Create venv
venv_path = tmp_path / "test_venv"
subprocess.run(
[sys.executable, "-m", "venv", str(venv_path)],
check=True,
timeout=60
)
subprocess.run([sys.executable, "-m", "venv", str(venv_path)], check=True, timeout=60)
# Install skill in venv
pip_path = venv_path / "bin" / "pip"
@@ -142,7 +136,7 @@ class TestBootstrapSkillE2E:
cwd=output_skill_dir.parent.parent,
capture_output=True,
text=True,
timeout=120
timeout=120,
)
# Should install successfully
@@ -156,13 +150,13 @@ class TestBootstrapSkillE2E:
# Try to package with claude adaptor (simplest)
from skill_seekers.cli.adaptors import get_adaptor
adaptor = get_adaptor('claude')
adaptor = get_adaptor("claude")
# Should be able to package without errors
try:
package_path = adaptor.package(
skill_dir=output_skill_dir, # Path object, not str
output_path=tmp_path # Path object, not str
output_path=tmp_path, # Path object, not str
)
assert Path(package_path).exists(), "Package not created"

View File

@@ -111,7 +111,10 @@ class TestC3Integration:
}
],
"ai_enhancements": {
"overall_insights": {"security_issues_found": 1, "recommended_actions": ["Move secrets to .env"]}
"overall_insights": {
"security_issues_found": 1,
"recommended_actions": ["Move secrets to .env"],
}
},
},
"architecture": {
@@ -120,7 +123,11 @@ class TestC3Integration:
"pattern_name": "MVC",
"confidence": 0.89,
"framework": "Flask",
"evidence": ["models/ directory", "views/ directory", "controllers/ directory"],
"evidence": [
"models/ directory",
"views/ directory",
"controllers/ directory",
],
}
],
"frameworks_detected": ["Flask", "SQLAlchemy"],
@@ -173,7 +180,9 @@ class TestC3Integration:
"""Test ARCHITECTURE.md is generated with all 8 sections."""
# Create skill builder with C3.x data (multi-source list format)
github_data = {"readme": "Test README", "c3_analysis": mock_c3_data}
scraped_data = {"github": [{"repo": "test/repo", "repo_id": "test_repo", "idx": 0, "data": github_data}]}
scraped_data = {
"github": [{"repo": "test/repo", "repo_id": "test_repo", "idx": 0, "data": github_data}]
}
builder = UnifiedSkillBuilder(mock_config, scraped_data)
builder.skill_dir = temp_dir
@@ -212,7 +221,9 @@ class TestC3Integration:
"""Test correct C3.x reference directory structure is created."""
# Create skill builder with C3.x data (multi-source list format)
github_data = {"readme": "Test README", "c3_analysis": mock_c3_data}
scraped_data = {"github": [{"repo": "test/repo", "repo_id": "test_repo", "idx": 0, "data": github_data}]}
scraped_data = {
"github": [{"repo": "test/repo", "repo_id": "test_repo", "idx": 0, "data": github_data}]
}
builder = UnifiedSkillBuilder(mock_config, scraped_data)
builder.skill_dir = temp_dir
@@ -261,7 +272,11 @@ class TestC3Integration:
# Mock GitHubScraper (correct module path for import)
with patch("skill_seekers.cli.github_scraper.GitHubScraper") as mock_github:
mock_github.return_value.scrape.return_value = {"readme": "Test README", "issues": [], "releases": []}
mock_github.return_value.scrape.return_value = {
"readme": "Test README",
"issues": [],
"releases": [],
}
scraper = UnifiedScraper(config_path)
@@ -278,7 +293,14 @@ class TestC3Integration:
config = {
"name": "test",
"description": "Test",
"sources": [{"type": "github", "repo": "test/repo", "enable_codebase_analysis": True, "ai_mode": "auto"}],
"sources": [
{
"type": "github",
"repo": "test/repo",
"enable_codebase_analysis": True,
"ai_mode": "auto",
}
],
}
# Save config

View File

@@ -19,7 +19,9 @@ class TestModernCLICommands(unittest.TestCase):
def test_doc_scraper_uses_modern_commands(self):
"""Test doc_scraper.py uses skill-seekers commands"""
script_path = Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "doc_scraper.py"
script_path = (
Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "doc_scraper.py"
)
with open(script_path) as f:
content = f.read()
@@ -32,7 +34,13 @@ class TestModernCLICommands(unittest.TestCase):
def test_enhance_skill_local_uses_modern_commands(self):
"""Test enhance_skill_local.py uses skill-seekers commands"""
script_path = Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "enhance_skill_local.py"
script_path = (
Path(__file__).parent.parent
/ "src"
/ "skill_seekers"
/ "cli"
/ "enhance_skill_local.py"
)
with open(script_path) as f:
content = f.read()
@@ -45,7 +53,9 @@ class TestModernCLICommands(unittest.TestCase):
def test_estimate_pages_uses_modern_commands(self):
"""Test estimate_pages.py uses skill-seekers commands"""
script_path = Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "estimate_pages.py"
script_path = (
Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "estimate_pages.py"
)
with open(script_path) as f:
content = f.read()
@@ -58,7 +68,9 @@ class TestModernCLICommands(unittest.TestCase):
def test_package_skill_uses_modern_commands(self):
"""Test package_skill.py uses skill-seekers commands"""
script_path = Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "package_skill.py"
script_path = (
Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "package_skill.py"
)
with open(script_path) as f:
content = f.read()
@@ -71,7 +83,9 @@ class TestModernCLICommands(unittest.TestCase):
def test_github_scraper_uses_modern_commands(self):
"""Test github_scraper.py uses skill-seekers commands"""
script_path = Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "github_scraper.py"
script_path = (
Path(__file__).parent.parent / "src" / "skill_seekers" / "cli" / "github_scraper.py"
)
with open(script_path) as f:
content = f.read()
@@ -89,10 +103,16 @@ class TestUnifiedCLIEntryPoints(unittest.TestCase):
def test_main_cli_help_output(self):
"""Test skill-seekers --help works"""
try:
result = subprocess.run(["skill-seekers", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "--help"], capture_output=True, text=True, timeout=5
)
# Should return successfully
self.assertIn(result.returncode, [0, 2], f"skill-seekers --help failed with code {result.returncode}")
self.assertIn(
result.returncode,
[0, 2],
f"skill-seekers --help failed with code {result.returncode}",
)
# Should show subcommands
output = result.stdout + result.stderr
@@ -107,14 +127,18 @@ class TestUnifiedCLIEntryPoints(unittest.TestCase):
def test_main_cli_version_output(self):
"""Test skill-seekers --version works"""
try:
result = subprocess.run(["skill-seekers", "--version"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "--version"], capture_output=True, text=True, timeout=5
)
# Should return successfully
self.assertEqual(result.returncode, 0, f"skill-seekers --version failed: {result.stderr}")
self.assertEqual(
result.returncode, 0, f"skill-seekers --version failed: {result.stderr}"
)
# Should show version
output = result.stdout + result.stderr
self.assertIn('2.7.0', output)
self.assertIn("2.7.0", output)
except FileNotFoundError:
# If skill-seekers is not installed, skip this test
@@ -140,7 +164,9 @@ class TestNoHardcodedPaths(unittest.TestCase):
for hardcoded_path in hardcoded_paths:
self.assertNotIn(
hardcoded_path, content, f"{script_path.name} contains hardcoded path: {hardcoded_path}"
hardcoded_path,
content,
f"{script_path.name} contains hardcoded path: {hardcoded_path}",
)

View File

@@ -173,7 +173,10 @@ API_KEY=secret123
PORT=8000
"""
config_file = ConfigFile(
file_path=str(Path(self.temp_dir) / ".env"), relative_path=".env", config_type="env", purpose="unknown"
file_path=str(Path(self.temp_dir) / ".env"),
relative_path=".env",
config_type="env",
purpose="unknown",
)
file_path = Path(self.temp_dir) / ".env"
@@ -313,7 +316,8 @@ endpoint = "https://api.example.com"
# Check if parsing failed due to missing toml/tomli
if config_file.parse_errors and (
"toml" in str(config_file.parse_errors).lower() and "not installed" in str(config_file.parse_errors)
"toml" in str(config_file.parse_errors).lower()
and "not installed" in str(config_file.parse_errors)
):
self.skipTest("toml/tomli not installed")
@@ -337,7 +341,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -353,7 +361,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -369,7 +381,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -385,7 +401,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -402,7 +422,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -418,7 +442,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)
@@ -434,7 +462,11 @@ class TestConfigPatternDetector(unittest.TestCase):
]
config_file = ConfigFile(
file_path="test.json", relative_path="test.json", config_type="json", purpose="unknown", settings=settings
file_path="test.json",
relative_path="test.json",
config_type="json",
purpose="unknown",
settings=settings,
)
patterns = self.detector.detect_patterns(config_file)

View File

@@ -30,7 +30,11 @@ class TestConfigValidation(unittest.TestCase):
"name": "godot",
"base_url": "https://docs.godotengine.org/en/stable/",
"description": "Godot Engine documentation",
"selectors": {"main_content": 'div[role="main"]', "title": "title", "code_blocks": "pre code"},
"selectors": {
"main_content": 'div[role="main"]',
"title": "title",
"code_blocks": "pre code",
},
"url_patterns": {"include": ["/guide/", "/api/"], "exclude": ["/blog/"]},
"categories": {"getting_started": ["intro", "tutorial"], "api": ["api", "reference"]},
"rate_limit": 0.5,
@@ -84,7 +88,9 @@ class TestConfigValidation(unittest.TestCase):
"""Test invalid selectors (not a dictionary)"""
config = {"name": "test", "base_url": "https://example.com/", "selectors": "invalid"}
errors, _ = validate_config(config)
self.assertTrue(any("selectors" in error.lower() and "dictionary" in error.lower() for error in errors))
self.assertTrue(
any("selectors" in error.lower() and "dictionary" in error.lower() for error in errors)
)
def test_missing_recommended_selectors(self):
"""Test warning for missing recommended selectors"""
@@ -104,25 +110,44 @@ class TestConfigValidation(unittest.TestCase):
"""Test invalid url_patterns (not a dictionary)"""
config = {"name": "test", "base_url": "https://example.com/", "url_patterns": []}
errors, _ = validate_config(config)
self.assertTrue(any("url_patterns" in error.lower() and "dictionary" in error.lower() for error in errors))
self.assertTrue(
any(
"url_patterns" in error.lower() and "dictionary" in error.lower()
for error in errors
)
)
def test_invalid_url_patterns_include_not_list(self):
"""Test invalid url_patterns.include (not a list)"""
config = {"name": "test", "base_url": "https://example.com/", "url_patterns": {"include": "not-a-list"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"url_patterns": {"include": "not-a-list"},
}
errors, _ = validate_config(config)
self.assertTrue(any("include" in error.lower() and "list" in error.lower() for error in errors))
self.assertTrue(
any("include" in error.lower() and "list" in error.lower() for error in errors)
)
def test_invalid_categories_not_dict(self):
"""Test invalid categories (not a dictionary)"""
config = {"name": "test", "base_url": "https://example.com/", "categories": []}
errors, _ = validate_config(config)
self.assertTrue(any("categories" in error.lower() and "dictionary" in error.lower() for error in errors))
self.assertTrue(
any("categories" in error.lower() and "dictionary" in error.lower() for error in errors)
)
def test_invalid_category_keywords_not_list(self):
"""Test invalid category keywords (not a list)"""
config = {"name": "test", "base_url": "https://example.com/", "categories": {"getting_started": "not-a-list"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"categories": {"getting_started": "not-a-list"},
}
errors, _ = validate_config(config)
self.assertTrue(any("getting_started" in error.lower() and "list" in error.lower() for error in errors))
self.assertTrue(
any("getting_started" in error.lower() and "list" in error.lower() for error in errors)
)
def test_invalid_rate_limit_negative(self):
"""Test invalid rate_limit (negative)"""
@@ -178,13 +203,23 @@ class TestConfigValidation(unittest.TestCase):
def test_invalid_start_urls_not_list(self):
"""Test invalid start_urls (not a list)"""
config = {"name": "test", "base_url": "https://example.com/", "start_urls": "https://example.com/page1"}
config = {
"name": "test",
"base_url": "https://example.com/",
"start_urls": "https://example.com/page1",
}
errors, _ = validate_config(config)
self.assertTrue(any("start_urls" in error.lower() and "list" in error.lower() for error in errors))
self.assertTrue(
any("start_urls" in error.lower() and "list" in error.lower() for error in errors)
)
def test_invalid_start_urls_bad_protocol(self):
"""Test invalid start_urls (bad protocol)"""
config = {"name": "test", "base_url": "https://example.com/", "start_urls": ["ftp://example.com/page1"]}
config = {
"name": "test",
"base_url": "https://example.com/",
"start_urls": ["ftp://example.com/page1"],
}
errors, _ = validate_config(config)
self.assertTrue(any("start_url" in error.lower() for error in errors))
@@ -193,7 +228,11 @@ class TestConfigValidation(unittest.TestCase):
config = {
"name": "test",
"base_url": "https://example.com/",
"start_urls": ["https://example.com/page1", "http://example.com/page2", "https://example.com/api/docs"],
"start_urls": [
"https://example.com/page1",
"http://example.com/page2",
"https://example.com/api/docs",
],
}
errors, _ = validate_config(config)
url_errors = [e for e in errors if "start_url" in e.lower()]

View File

@@ -153,7 +153,9 @@ class TestConstantsExports(unittest.TestCase):
self.assertTrue(hasattr(constants, "__all__"))
for name in constants.__all__:
self.assertTrue(hasattr(constants, name), f"Constant '{name}' in __all__ but not defined")
self.assertTrue(
hasattr(constants, name), f"Constant '{name}' in __all__ but not defined"
)
def test_all_exports_count(self):
"""Test that __all__ has expected number of exports."""

View File

@@ -54,7 +54,9 @@ function greet(name) {
""")
# Create mock three-stream data
code_stream = CodeStream(directory=tmp_path, files=[tmp_path / "main.py", tmp_path / "utils.js"])
code_stream = CodeStream(
directory=tmp_path, files=[tmp_path / "main.py", tmp_path / "utils.js"]
)
docs_stream = DocsStream(
readme="""# Test Project
@@ -74,10 +76,17 @@ hello()
```
""",
contributing="# Contributing\n\nPull requests welcome!",
docs_files=[{"path": "docs/guide.md", "content": "# User Guide\n\nHow to use this project."}],
docs_files=[
{"path": "docs/guide.md", "content": "# User Guide\n\nHow to use this project."}
],
)
insights_stream = InsightsStream(
metadata={"stars": 1234, "forks": 56, "language": "Python", "description": "A test project"},
metadata={
"stars": 1234,
"forks": 56,
"language": "Python",
"description": "A test project",
},
common_problems=[
{
"title": "Installation fails on Windows",
@@ -95,7 +104,13 @@ hello()
},
],
known_solutions=[
{"title": "Fixed: Module not found", "number": 35, "state": "closed", "comments": 8, "labels": ["bug"]}
{
"title": "Fixed: Module not found",
"number": 35,
"state": "closed",
"comments": 8,
"labels": ["bug"],
}
],
top_labels=[
{"label": "bug", "count": 25},
@@ -108,7 +123,9 @@ hello()
# Step 2: Run unified analyzer with basic depth
analyzer = UnifiedCodebaseAnalyzer()
result = analyzer.analyze(source="https://github.com/test/project", depth="basic", fetch_github_metadata=True)
result = analyzer.analyze(
source="https://github.com/test/project", depth="basic", fetch_github_metadata=True
)
# Step 3: Validate all three streams present
assert result.source_type == "github"
@@ -151,7 +168,13 @@ hello()
"comments": 15,
"labels": ["oauth", "token"],
},
{"title": "Async deadlock", "number": 40, "state": "open", "comments": 12, "labels": ["async", "bug"]},
{
"title": "Async deadlock",
"number": 40,
"state": "open",
"comments": 12,
"labels": ["async", "bug"],
},
{
"title": "Database connection lost",
"number": 35,
@@ -162,8 +185,20 @@ hello()
]
solutions = [
{"title": "Fixed OAuth flow", "number": 30, "state": "closed", "comments": 8, "labels": ["oauth"]},
{"title": "Resolved async race", "number": 25, "state": "closed", "comments": 6, "labels": ["async"]},
{
"title": "Fixed OAuth flow",
"number": 30,
"state": "closed",
"comments": 8,
"labels": ["oauth"],
},
{
"title": "Resolved async race",
"number": 25,
"state": "closed",
"comments": 6,
"labels": ["async"],
},
]
topics = ["oauth", "auth", "authentication"]
@@ -174,7 +209,9 @@ hello()
# Validate categorization
assert "oauth" in categorized or "auth" in categorized or "authentication" in categorized
oauth_issues = (
categorized.get("oauth", []) + categorized.get("auth", []) + categorized.get("authentication", [])
categorized.get("oauth", [])
+ categorized.get("auth", [])
+ categorized.get("authentication", [])
)
# Should have 3 OAuth-related issues (2 problems + 1 solution)
@@ -245,7 +282,12 @@ testproject.run()
docs_files=[],
)
insights_stream = InsightsStream(
metadata={"stars": 5000, "forks": 250, "language": "Python", "description": "Fast test framework"},
metadata={
"stars": 5000,
"forks": 250,
"language": "Python",
"description": "Fast test framework",
},
common_problems=[
{
"title": "OAuth setup fails",
@@ -254,8 +296,20 @@ testproject.run()
"comments": 30,
"labels": ["bug", "oauth"],
},
{"title": "Async deadlock", "number": 142, "state": "open", "comments": 25, "labels": ["async", "bug"]},
{"title": "Token refresh issue", "number": 130, "state": "open", "comments": 20, "labels": ["oauth"]},
{
"title": "Async deadlock",
"number": 142,
"state": "open",
"comments": 25,
"labels": ["async", "bug"],
},
{
"title": "Token refresh issue",
"number": 130,
"state": "open",
"comments": 20,
"labels": ["oauth"],
},
],
known_solutions=[
{
@@ -265,7 +319,13 @@ testproject.run()
"comments": 15,
"labels": ["oauth"],
},
{"title": "Resolved async race", "number": 110, "state": "closed", "comments": 12, "labels": ["async"]},
{
"title": "Resolved async race",
"number": 110,
"state": "closed",
"comments": 12,
"labels": ["async"],
},
],
top_labels=[
{"label": "oauth", "count": 45},
@@ -276,7 +336,9 @@ testproject.run()
github_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
# Generate router
generator = RouterGenerator([str(config_path1), str(config_path2)], github_streams=github_streams)
generator = RouterGenerator(
[str(config_path1), str(config_path2)], github_streams=github_streams
)
# Step 1: Validate GitHub metadata extracted
assert generator.github_metadata is not None
@@ -308,8 +370,14 @@ testproject.run()
# Validate examples section with converted questions (Fix 1)
assert "## Examples" in skill_md
# Issues converted to natural questions
assert "how do i fix oauth setup" in skill_md.lower() or "how do i handle oauth setup" in skill_md.lower()
assert "how do i handle async deadlock" in skill_md.lower() or "how do i fix async deadlock" in skill_md.lower()
assert (
"how do i fix oauth setup" in skill_md.lower()
or "how do i handle oauth setup" in skill_md.lower()
)
assert (
"how do i handle async deadlock" in skill_md.lower()
or "how do i fix async deadlock" in skill_md.lower()
)
# Common Issues section may still exist with other issues
# Note: Issue numbers may appear in Common Issues or Common Patterns sections
@@ -356,12 +424,26 @@ class TestE2EQualityMetrics:
# Create GitHub streams with realistic data
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme="# Test\n\nA short README.", contributing=None, docs_files=[])
docs_stream = DocsStream(
readme="# Test\n\nA short README.", contributing=None, docs_files=[]
)
insights_stream = InsightsStream(
metadata={"stars": 100, "forks": 10, "language": "Python", "description": "Test"},
common_problems=[
{"title": "Issue 1", "number": 1, "state": "open", "comments": 5, "labels": ["bug"]},
{"title": "Issue 2", "number": 2, "state": "open", "comments": 3, "labels": ["bug"]},
{
"title": "Issue 1",
"number": 1,
"state": "open",
"comments": 5,
"labels": ["bug"],
},
{
"title": "Issue 2",
"number": 2,
"state": "open",
"comments": 3,
"labels": ["bug"],
},
],
known_solutions=[],
top_labels=[{"label": "bug", "count": 10}],
@@ -382,7 +464,9 @@ class TestE2EQualityMetrics:
github_overhead = lines_with_github - lines_no_github
# Validate overhead is within acceptable range (30-50 lines)
assert 20 <= github_overhead <= 60, f"GitHub overhead is {github_overhead} lines, expected 20-60"
assert 20 <= github_overhead <= 60, (
f"GitHub overhead is {github_overhead} lines, expected 20-60"
)
def test_router_size_within_limits(self, tmp_path):
"""
@@ -457,7 +541,9 @@ class TestE2EBackwardCompatibility:
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme=None, contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
mock_fetcher.fetch.return_value = three_streams
@@ -490,8 +576,12 @@ class TestE2ETokenEfficiency:
# Create GitHub streams
code_stream = CodeStream(directory=tmp_path, files=[tmp_path / "main.py"])
docs_stream = DocsStream(readme="# Test\n\nQuick start guide.", contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={"stars": 100}, common_problems=[], known_solutions=[], top_labels=[])
docs_stream = DocsStream(
readme="# Test\n\nQuick start guide.", contributing=None, docs_files=[]
)
insights_stream = InsightsStream(
metadata={"stars": 100}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
# Verify streams are separate (no duplication)

View File

@@ -69,7 +69,9 @@ class TestEstimatePagesCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers", "estimate", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "estimate", "--help"], capture_output=True, text=True, timeout=5
)
# Should return successfully (0 or 2 for argparse)
self.assertIn(result.returncode, [0, 2])
@@ -83,7 +85,9 @@ class TestEstimatePagesCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers-estimate", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers-estimate", "--help"], capture_output=True, text=True, timeout=5
)
# Should return successfully
self.assertIn(result.returncode, [0, 2])
@@ -96,11 +100,15 @@ class TestEstimatePagesCLI(unittest.TestCase):
try:
# Run without config argument
result = subprocess.run(["skill-seekers", "estimate"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "estimate"], capture_output=True, text=True, timeout=5
)
# Should fail (non-zero exit code) or show usage
self.assertTrue(
result.returncode != 0 or "usage" in result.stderr.lower() or "usage" in result.stdout.lower()
result.returncode != 0
or "usage" in result.stderr.lower()
or "usage" in result.stdout.lower()
)
except FileNotFoundError:
self.skipTest("skill-seekers command not installed")
@@ -111,7 +119,9 @@ class TestEstimatePagesCLI(unittest.TestCase):
try:
# Run with --all flag
result = subprocess.run(["skill-seekers", "estimate", "--all"], capture_output=True, text=True, timeout=10)
result = subprocess.run(
["skill-seekers", "estimate", "--all"], capture_output=True, text=True, timeout=10
)
# Should succeed
self.assertEqual(result.returncode, 0)
@@ -125,7 +135,9 @@ class TestEstimatePagesCLI(unittest.TestCase):
# Should list some known configs
# (these should exist in api/configs_repo/official/)
self.assertTrue(
"react" in output.lower() or "django" in output.lower() or "godot" in output.lower(),
"react" in output.lower()
or "django" in output.lower()
or "godot" in output.lower(),
"Expected at least one known config name in output",
)
except FileNotFoundError:
@@ -136,7 +148,9 @@ class TestEstimatePagesCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers-estimate", "--all"], capture_output=True, text=True, timeout=10)
result = subprocess.run(
["skill-seekers-estimate", "--all"], capture_output=True, text=True, timeout=10
)
# Should succeed
self.assertEqual(result.returncode, 0)

View File

@@ -60,7 +60,10 @@ class TestExcludedDirsAdditional(unittest.TestCase):
@patch("skill_seekers.cli.github_scraper.Github")
def test_extend_with_additional_dirs(self, mock_github):
"""Test adding custom exclusions to defaults."""
config = {"repo": "owner/repo", "exclude_dirs_additional": ["proprietary", "vendor", "third_party"]}
config = {
"repo": "owner/repo",
"exclude_dirs_additional": ["proprietary", "vendor", "third_party"],
}
scraper = GitHubScraper(config)
@@ -185,7 +188,11 @@ class TestExcludedDirsEdgeCases(unittest.TestCase):
"""Test that duplicates in additional list are handled (set deduplication)."""
config = {
"repo": "owner/repo",
"exclude_dirs_additional": ["venv", "custom", "venv"], # venv is duplicate (default + listed)
"exclude_dirs_additional": [
"venv",
"custom",
"venv",
], # venv is duplicate (default + listed)
}
scraper = GitHubScraper(config)
@@ -240,7 +247,11 @@ class TestExcludedDirsWithLocalRepo(unittest.TestCase):
@patch("skill_seekers.cli.github_scraper.Github")
def test_replace_mode_with_local_repo_path(self, mock_github):
"""Test that replace mode works with local_repo_path."""
config = {"repo": "owner/repo", "local_repo_path": "/tmp/test/repo", "exclude_dirs": ["only_this"]}
config = {
"repo": "owner/repo",
"local_repo_path": "/tmp/test/repo",
"exclude_dirs": ["only_this"],
}
scraper = GitHubScraper(config)
@@ -277,7 +288,10 @@ class TestExcludedDirsLogging(unittest.TestCase):
# Should have logged WARNING message
warning_calls = [str(call) for call in mock_logger.warning.call_args_list]
self.assertTrue(
any("Using custom directory exclusions" in call and "defaults overridden" in call for call in warning_calls)
any(
"Using custom directory exclusions" in call and "defaults overridden" in call
for call in warning_calls
)
)
@patch("skill_seekers.cli.github_scraper.Github")

View File

@@ -105,9 +105,16 @@ class TestRouterGeneratorWithGitHub:
# Create GitHub streams
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme="# Test Project\n\nA test OAuth library.", contributing=None, docs_files=[])
docs_stream = DocsStream(
readme="# Test Project\n\nA test OAuth library.", contributing=None, docs_files=[]
)
insights_stream = InsightsStream(
metadata={"stars": 1234, "forks": 56, "language": "Python", "description": "OAuth helper"},
metadata={
"stars": 1234,
"forks": 56,
"language": "Python",
"description": "OAuth helper",
},
common_problems=[
{
"title": "OAuth fails on redirect",
@@ -133,7 +140,11 @@ class TestRouterGeneratorWithGitHub:
def test_extract_keywords_with_github_labels(self, tmp_path):
"""Test keyword extraction with GitHub issue labels (2x weight)."""
config = {"name": "test-oauth", "base_url": "https://example.com", "categories": {"oauth": ["oauth", "auth"]}}
config = {
"name": "test-oauth",
"base_url": "https://example.com",
"categories": {"oauth": ["oauth", "auth"]},
}
config_path = tmp_path / "config.json"
with open(config_path, "w") as f:
@@ -178,10 +189,17 @@ class TestRouterGeneratorWithGitHub:
# Create GitHub streams
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(
readme="# OAuth Library\n\nQuick start: Install with pip install oauth", contributing=None, docs_files=[]
readme="# OAuth Library\n\nQuick start: Install with pip install oauth",
contributing=None,
docs_files=[],
)
insights_stream = InsightsStream(
metadata={"stars": 5000, "forks": 200, "language": "Python", "description": "OAuth 2.0 library"},
metadata={
"stars": 5000,
"forks": 200,
"language": "Python",
"description": "OAuth 2.0 library",
},
common_problems=[
{
"title": "Redirect URI mismatch",
@@ -190,7 +208,13 @@ class TestRouterGeneratorWithGitHub:
"comments": 25,
"labels": ["bug", "oauth"],
},
{"title": "Token refresh fails", "number": 95, "state": "open", "comments": 18, "labels": ["oauth"]},
{
"title": "Token refresh fails",
"number": 95,
"state": "open",
"comments": 18,
"labels": ["oauth"],
},
],
known_solutions=[],
top_labels=[],
@@ -250,7 +274,11 @@ class TestSubSkillIssuesSection:
def test_generate_subskill_issues_section(self, tmp_path):
"""Test generation of issues section for sub-skills."""
config = {"name": "test-oauth", "base_url": "https://example.com", "categories": {"oauth": ["oauth"]}}
config = {
"name": "test-oauth",
"base_url": "https://example.com",
"categories": {"oauth": ["oauth"]},
}
config_path = tmp_path / "config.json"
with open(config_path, "w") as f:
@@ -269,10 +297,22 @@ class TestSubSkillIssuesSection:
"comments": 20,
"labels": ["oauth", "bug"],
},
{"title": "Token expiration issue", "number": 45, "state": "open", "comments": 15, "labels": ["oauth"]},
{
"title": "Token expiration issue",
"number": 45,
"state": "open",
"comments": 15,
"labels": ["oauth"],
},
],
known_solutions=[
{"title": "Fixed OAuth flow", "number": 40, "state": "closed", "comments": 10, "labels": ["oauth"]}
{
"title": "Fixed OAuth flow",
"number": 40,
"state": "closed",
"comments": 10,
"labels": ["oauth"],
}
],
top_labels=[],
)
@@ -293,7 +333,11 @@ class TestSubSkillIssuesSection:
def test_generate_subskill_issues_no_matches(self, tmp_path):
"""Test issues section when no issues match the topic."""
config = {"name": "test-async", "base_url": "https://example.com", "categories": {"async": ["async"]}}
config = {
"name": "test-async",
"base_url": "https://example.com",
"categories": {"async": ["async"]},
}
config_path = tmp_path / "config.json"
with open(config_path, "w") as f:
@@ -305,7 +349,13 @@ class TestSubSkillIssuesSection:
insights_stream = InsightsStream(
metadata={},
common_problems=[
{"title": "OAuth fails", "number": 1, "state": "open", "comments": 5, "labels": ["oauth"]}
{
"title": "OAuth fails",
"number": 1,
"state": "open",
"comments": 5,
"labels": ["oauth"],
}
],
known_solutions=[],
top_labels=[],
@@ -361,7 +411,12 @@ class TestIntegration:
],
)
insights_stream = InsightsStream(
metadata={"stars": 10000, "forks": 500, "language": "Python", "description": "Fast MCP server framework"},
metadata={
"stars": 10000,
"forks": 500,
"language": "Python",
"description": "Fast MCP server framework",
},
common_problems=[
{
"title": "OAuth setup fails",
@@ -370,8 +425,20 @@ class TestIntegration:
"comments": 30,
"labels": ["bug", "oauth"],
},
{"title": "Async deadlock", "number": 142, "state": "open", "comments": 25, "labels": ["async", "bug"]},
{"title": "Token refresh issue", "number": 130, "state": "open", "comments": 20, "labels": ["oauth"]},
{
"title": "Async deadlock",
"number": 142,
"state": "open",
"comments": 25,
"labels": ["async", "bug"],
},
{
"title": "Token refresh issue",
"number": 130,
"state": "open",
"comments": 20,
"labels": ["oauth"],
},
],
known_solutions=[
{
@@ -381,7 +448,13 @@ class TestIntegration:
"comments": 15,
"labels": ["oauth"],
},
{"title": "Resolved async race", "number": 110, "state": "closed", "comments": 12, "labels": ["async"]},
{
"title": "Resolved async race",
"number": 110,
"state": "closed",
"comments": 12,
"labels": ["async"],
},
],
top_labels=[
{"label": "oauth", "count": 45},
@@ -392,7 +465,9 @@ class TestIntegration:
github_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
# Create router generator
generator = RouterGenerator([str(config_path1), str(config_path2)], github_streams=github_streams)
generator = RouterGenerator(
[str(config_path1), str(config_path2)], github_streams=github_streams
)
# Generate SKILL.md
skill_md = generator.generate_skill_md()
@@ -414,8 +489,14 @@ class TestIntegration:
# 4. Examples section with converted questions (Fix 1)
assert "## Examples" in skill_md
# Issues converted to natural questions
assert "how do i fix oauth setup" in skill_md.lower() or "how do i handle oauth setup" in skill_md.lower()
assert "how do i handle async deadlock" in skill_md.lower() or "how do i fix async deadlock" in skill_md.lower()
assert (
"how do i fix oauth setup" in skill_md.lower()
or "how do i handle oauth setup" in skill_md.lower()
)
assert (
"how do i handle async deadlock" in skill_md.lower()
or "how do i fix async deadlock" in skill_md.lower()
)
# Common Issues section may still exist with other issues
# Note: Issue numbers may appear in Common Issues or Common Patterns sections

View File

@@ -134,7 +134,9 @@ class TestCloneOrPull:
"""Test cloning a new repository."""
mock_clone.return_value = MagicMock()
result = git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/repo.git")
result = git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git"
)
assert result == git_repo.cache_dir / "test-source"
mock_clone.assert_called_once()
@@ -159,7 +161,9 @@ class TestCloneOrPull:
mock_repo.remotes.origin = mock_origin
mock_repo_class.return_value = mock_repo
result = git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/repo.git")
result = git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git"
)
assert result == repo_path
mock_origin.pull.assert_called_once_with("main")
@@ -179,7 +183,9 @@ class TestCloneOrPull:
mock_repo_class.return_value = mock_repo
result = git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git", token="ghp_token123"
source_name="test-source",
git_url="https://github.com/org/repo.git",
token="ghp_token123",
)
# Verify URL was updated with token
@@ -198,7 +204,9 @@ class TestCloneOrPull:
mock_clone.return_value = MagicMock()
git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/repo.git", force_refresh=True)
git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git", force_refresh=True
)
# Verify clone was called (not pull)
mock_clone.assert_called_once()
@@ -208,7 +216,9 @@ class TestCloneOrPull:
"""Test cloning with custom branch."""
mock_clone.return_value = MagicMock()
git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/repo.git", branch="develop")
git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git", branch="develop"
)
call_kwargs = mock_clone.call_args[1]
assert call_kwargs["branch"] == "develop"
@@ -221,10 +231,14 @@ class TestCloneOrPull:
@patch("skill_seekers.mcp.git_repo.git.Repo.clone_from")
def test_clone_auth_failure_error(self, mock_clone, git_repo):
"""Test authentication failure error handling."""
mock_clone.side_effect = GitCommandError("clone", 128, stderr="fatal: Authentication failed")
mock_clone.side_effect = GitCommandError(
"clone", 128, stderr="fatal: Authentication failed"
)
with pytest.raises(GitCommandError, match="Authentication failed"):
git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/repo.git")
git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/repo.git"
)
@patch("skill_seekers.mcp.git_repo.git.Repo.clone_from")
def test_clone_not_found_error(self, mock_clone, git_repo):
@@ -232,7 +246,9 @@ class TestCloneOrPull:
mock_clone.side_effect = GitCommandError("clone", 128, stderr="fatal: repository not found")
with pytest.raises(GitCommandError, match="Repository not found"):
git_repo.clone_or_pull(source_name="test-source", git_url="https://github.com/org/nonexistent.git")
git_repo.clone_or_pull(
source_name="test-source", git_url="https://github.com/org/nonexistent.git"
)
class TestFindConfigs:

View File

@@ -276,7 +276,9 @@ class TestGitSourcesE2E:
git_repo = GitConfigRepo(cache_dir=cache_dir)
# Step 1: Clone repository
repo_path = git_repo.clone_or_pull(source_name="test-pull", git_url=git_url, branch="master")
repo_path = git_repo.clone_or_pull(
source_name="test-pull", git_url=git_url, branch="master"
)
initial_configs = git_repo.find_configs(repo_path)
assert len(initial_configs) == 3
@@ -333,7 +335,9 @@ class TestGitSourcesE2E:
git_repo = GitConfigRepo(cache_dir=cache_dir)
# Step 1: Clone repository
repo_path = git_repo.clone_or_pull(source_name="test-refresh", git_url=git_url, branch="master")
repo_path = git_repo.clone_or_pull(
source_name="test-refresh", git_url=git_url, branch="master"
)
# Step 2: Modify local cache manually
corrupt_file = repo_path / "CORRUPTED.txt"
@@ -371,7 +375,9 @@ class TestGitSourcesE2E:
git_repo = GitConfigRepo(cache_dir=cache_dir)
# Step 1: Clone repository
repo_path = git_repo.clone_or_pull(source_name="test-not-found", git_url=git_url, branch="master")
repo_path = git_repo.clone_or_pull(
source_name="test-not-found", git_url=git_url, branch="master"
)
# Step 2: Try to fetch non-existent config
with pytest.raises(FileNotFoundError) as exc_info:
@@ -401,7 +407,9 @@ class TestGitSourcesE2E:
for invalid_url in invalid_urls:
with pytest.raises(ValueError, match="Invalid git URL"):
git_repo.clone_or_pull(source_name="test-invalid", git_url=invalid_url, branch="master")
git_repo.clone_or_pull(
source_name="test-invalid", git_url=invalid_url, branch="master"
)
def test_e2e_source_name_validation(self, temp_dirs):
"""
@@ -496,11 +504,15 @@ class TestGitSourcesE2E:
# Step 1: Clone to cache_dir_1
git_repo_1 = GitConfigRepo(cache_dir=cache_dir_1)
repo_path_1 = git_repo_1.clone_or_pull(source_name="test-source", git_url=git_url, branch="master")
repo_path_1 = git_repo_1.clone_or_pull(
source_name="test-source", git_url=git_url, branch="master"
)
# Step 2: Clone same repo to cache_dir_2
git_repo_2 = GitConfigRepo(cache_dir=cache_dir_2)
repo_path_2 = git_repo_2.clone_or_pull(source_name="test-source", git_url=git_url, branch="master")
repo_path_2 = git_repo_2.clone_or_pull(
source_name="test-source", git_url=git_url, branch="master"
)
# Step 3: Verify both caches are independent
assert repo_path_1 != repo_path_2
@@ -621,7 +633,9 @@ class TestGitSourcesE2E:
repo.index.commit("Increase React config max_pages to 500")
# Step 6: Developers pull updates
git_repo.clone_or_pull(source_name=source["name"], git_url=source["git_url"], branch=source["branch"])
git_repo.clone_or_pull(
source_name=source["name"], git_url=source["git_url"], branch=source["branch"]
)
updated_config = git_repo.get_config(repo_path, "react")
assert updated_config["max_pages"] == 500
@@ -631,7 +645,9 @@ class TestGitSourcesE2E:
repo.index.remove(["react.json"])
repo.index.commit("Remove react.json")
git_repo.clone_or_pull(source_name=source["name"], git_url=source["git_url"], branch=source["branch"])
git_repo.clone_or_pull(
source_name=source["name"], git_url=source["git_url"], branch=source["branch"]
)
# Step 8: Error handling works correctly
with pytest.raises(FileNotFoundError, match="react.json"):
@@ -700,7 +716,11 @@ class TestMCPToolsE2E:
"""
MCP E2E Test 1: Complete add/list/remove workflow via MCP tools
"""
from skill_seekers.mcp.server import add_config_source_tool, list_config_sources_tool, remove_config_source_tool
from skill_seekers.mcp.server import (
add_config_source_tool,
list_config_sources_tool,
remove_config_source_tool,
)
cache_dir, config_dir = temp_dirs
repo_dir, repo = temp_git_repo
@@ -708,7 +728,12 @@ class TestMCPToolsE2E:
# Add source
add_result = await add_config_source_tool(
{"name": "mcp-test-source", "git_url": git_url, "source_type": "custom", "branch": "master"}
{
"name": "mcp-test-source",
"git_url": git_url,
"source_type": "custom",
"branch": "master",
}
)
assert len(add_result) == 1
@@ -744,7 +769,12 @@ class TestMCPToolsE2E:
dest_dir.mkdir(parents=True, exist_ok=True)
result = await fetch_config_tool(
{"config_name": "test-framework", "git_url": git_url, "branch": "master", "destination": str(dest_dir)}
{
"config_name": "test-framework",
"git_url": git_url,
"branch": "master",
"destination": str(dest_dir),
}
)
assert len(result) == 1
@@ -831,10 +861,16 @@ class TestMCPToolsE2E:
assert "" in result[0].text or "not found" in result[0].text.lower()
# Test 5: Fetch non-existent config from valid source
await add_config_source_tool({"name": "valid-source", "git_url": git_url, "branch": "master"})
await add_config_source_tool(
{"name": "valid-source", "git_url": git_url, "branch": "master"}
)
result = await fetch_config_tool(
{"config_name": "non-existent-config", "source": "valid-source", "destination": str(dest_dir)}
{
"config_name": "non-existent-config",
"source": "valid-source",
"destination": str(dest_dir),
}
)
assert "" in result[0].text or "not found" in result[0].text.lower()

View File

@@ -189,7 +189,13 @@ class TestIssueAnalysis:
def test_analyze_issues_known_solutions(self):
"""Test extraction of known solutions (closed issues with comments)."""
issues = [
{"title": "Fixed OAuth", "number": 35, "state": "closed", "comments": 5, "labels": [{"name": "bug"}]},
{
"title": "Fixed OAuth",
"number": 35,
"state": "closed",
"comments": 5,
"labels": [{"name": "bug"}],
},
{
"title": "Closed without comments",
"number": 36,
@@ -239,7 +245,10 @@ class TestIssueAnalysis:
assert len(insights["common_problems"]) <= 10
# Should be sorted by comment count (descending)
if len(insights["common_problems"]) > 1:
assert insights["common_problems"][0]["comments"] >= insights["common_problems"][1]["comments"]
assert (
insights["common_problems"][0]["comments"]
>= insights["common_problems"][1]["comments"]
)
class TestGitHubAPI:
@@ -286,7 +295,13 @@ class TestGitHubAPI:
"""Test fetching issues via GitHub API."""
mock_response = Mock()
mock_response.json.return_value = [
{"title": "Bug", "number": 42, "state": "open", "comments": 10, "labels": [{"name": "bug"}]}
{
"title": "Bug",
"number": 42,
"state": "open",
"comments": 10,
"labels": [{"name": "bug"}],
}
]
mock_response.raise_for_status = Mock()
mock_get.return_value = mock_response
@@ -304,7 +319,14 @@ class TestGitHubAPI:
mock_response = Mock()
mock_response.json.return_value = [
{"title": "Issue", "number": 42, "state": "open", "comments": 5, "labels": []},
{"title": "PR", "number": 43, "state": "open", "comments": 3, "labels": [], "pull_request": {}},
{
"title": "PR",
"number": 43,
"state": "open",
"comments": 3,
"labels": [],
"pull_request": {},
},
]
mock_response.raise_for_status = Mock()
mock_get.return_value = mock_response
@@ -376,7 +398,13 @@ class TestIntegration:
else:
# Issues call
mock_response.json.return_value = [
{"title": "Test Issue", "number": 42, "state": "open", "comments": 10, "labels": [{"name": "bug"}]}
{
"title": "Test Issue",
"number": 42,
"state": "open",
"comments": 10,
"labels": [{"name": "bug"}],
}
]
return mock_response

View File

@@ -587,7 +587,9 @@ class TestGitHubToSkillConverter(unittest.TestCase):
config = {"repo": "facebook/react", "name": "test", "description": "Test skill"}
# Patch the paths to use our temp directory
with patch("skill_seekers.cli.github_scraper.GitHubToSkillConverter._load_data") as mock_load:
with patch(
"skill_seekers.cli.github_scraper.GitHubToSkillConverter._load_data"
) as mock_load:
mock_load.return_value = self.mock_data
converter = self.GitHubToSkillConverter(config)
converter.skill_dir = str(self.output_dir / "test_skill")
@@ -677,7 +679,10 @@ class TestSymlinkHandling(unittest.TestCase):
scraper.repo = Mock()
# First call returns symlink, second call raises 404
scraper.repo.get_contents.side_effect = [mock_symlink, GithubException(404, "Not found")]
scraper.repo.get_contents.side_effect = [
mock_symlink,
GithubException(404, "Not found"),
]
result = scraper._get_file_content("README.md")
@@ -729,7 +734,9 @@ class TestSymlinkHandling(unittest.TestCase):
# Should successfully extract README content
self.assertIn("readme", scraper.extracted_data)
self.assertEqual(scraper.extracted_data["readme"], "# AI SDK\n\nThe AI SDK is a TypeScript toolkit")
self.assertEqual(
scraper.extracted_data["readme"], "# AI SDK\n\nThe AI SDK is a TypeScript toolkit"
)
def test_extract_changelog_with_symlink(self):
"""Test CHANGELOG extraction with symlinked CHANGELOG.md"""
@@ -789,7 +796,9 @@ class TestSymlinkHandling(unittest.TestCase):
mock_content.type = "file"
mock_content.encoding = "none" # Large files have encoding="none"
mock_content.size = 1388271 # 1.4MB CHANGELOG
mock_content.download_url = "https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
mock_content.download_url = (
"https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
)
with patch("skill_seekers.cli.github_scraper.Github"):
scraper = self.GitHubScraper(config)
@@ -820,7 +829,9 @@ class TestSymlinkHandling(unittest.TestCase):
mock_content.type = "file"
mock_content.encoding = "none"
mock_content.size = 1388271
mock_content.download_url = "https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
mock_content.download_url = (
"https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
)
with patch("skill_seekers.cli.github_scraper.Github"):
scraper = self.GitHubScraper(config)

View File

@@ -15,7 +15,12 @@ from unittest.mock import MagicMock, Mock, patch
import pytest
from skill_seekers.cli.guide_enhancer import GuideEnhancer, PrerequisiteItem, StepEnhancement, TroubleshootingItem
from skill_seekers.cli.guide_enhancer import (
GuideEnhancer,
PrerequisiteItem,
StepEnhancement,
TroubleshootingItem,
)
class TestGuideEnhancerModeDetection:
@@ -25,7 +30,9 @@ class TestGuideEnhancerModeDetection:
"""Test auto mode detects API when key present and library available"""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="auto")
# Will be 'api' if library available, otherwise 'local' or 'none'
@@ -96,7 +103,9 @@ class TestGuideEnhancerStepDescriptions:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -104,7 +113,12 @@ class TestGuideEnhancerStepDescriptions:
enhancer.client = Mock() # Mock the client
steps = [{"description": "scraper.scrape(url)", "code": "result = scraper.scrape(url)"}]
steps = [
{
"description": "scraper.scrape(url)",
"code": "result = scraper.scrape(url)",
}
]
result = enhancer.enhance_step_descriptions(steps)
assert len(result) == 1
@@ -129,7 +143,11 @@ class TestGuideEnhancerTroubleshooting:
def test_enhance_troubleshooting_none_mode(self):
"""Test troubleshooting in none mode"""
enhancer = GuideEnhancer(mode="none")
guide_data = {"title": "Test Guide", "steps": [{"description": "test", "code": "code"}], "language": "python"}
guide_data = {
"title": "Test Guide",
"steps": [{"description": "test", "code": "code"}],
"language": "python",
}
result = enhancer.enhance_troubleshooting(guide_data)
assert result == []
@@ -151,7 +169,9 @@ class TestGuideEnhancerTroubleshooting:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -196,7 +216,11 @@ class TestGuideEnhancerPrerequisites:
mock_call.return_value = json.dumps(
{
"prerequisites_detailed": [
{"name": "requests", "why": "HTTP client for making web requests", "setup": "pip install requests"},
{
"name": "requests",
"why": "HTTP client for making web requests",
"setup": "pip install requests",
},
{
"name": "beautifulsoup4",
"why": "HTML/XML parser for web scraping",
@@ -208,7 +232,9 @@ class TestGuideEnhancerPrerequisites:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -240,12 +266,20 @@ class TestGuideEnhancerNextSteps:
def test_enhance_next_steps_api_mode(self, mock_call):
"""Test next steps with API mode"""
mock_call.return_value = json.dumps(
{"next_steps": ["How to handle async workflows", "How to add error handling", "How to implement caching"]}
{
"next_steps": [
"How to handle async workflows",
"How to add error handling",
"How to implement caching",
]
}
)
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -285,7 +319,9 @@ class TestGuideEnhancerUseCases:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -293,7 +329,10 @@ class TestGuideEnhancerUseCases:
enhancer.client = Mock()
guide_data = {"title": "How to Scrape Docs", "description": "Documentation scraping"}
guide_data = {
"title": "How to Scrape Docs",
"description": "Documentation scraping",
}
result = enhancer.enhance_use_cases(guide_data)
assert len(result) == 2
@@ -332,7 +371,11 @@ class TestGuideEnhancerFullWorkflow:
{
"step_descriptions": [
{"step_index": 0, "explanation": "Import required libraries", "variations": []},
{"step_index": 1, "explanation": "Initialize scraper instance", "variations": []},
{
"step_index": 1,
"explanation": "Initialize scraper instance",
"variations": [],
},
],
"troubleshooting": [
{
@@ -342,7 +385,9 @@ class TestGuideEnhancerFullWorkflow:
"solution": "pip install requests",
}
],
"prerequisites_detailed": [{"name": "requests", "why": "HTTP client", "setup": "pip install requests"}],
"prerequisites_detailed": [
{"name": "requests", "why": "HTTP client", "setup": "pip install requests"}
],
"next_steps": ["How to add authentication"],
"use_cases": ["Automate documentation extraction"],
}
@@ -350,7 +395,9 @@ class TestGuideEnhancerFullWorkflow:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test"}):
with patch("skill_seekers.cli.guide_enhancer.ANTHROPIC_AVAILABLE", True):
with patch("skill_seekers.cli.guide_enhancer.anthropic", create=True) as mock_anthropic:
with patch(
"skill_seekers.cli.guide_enhancer.anthropic", create=True
) as mock_anthropic:
mock_anthropic.Anthropic = Mock()
enhancer = GuideEnhancer(mode="api")
if enhancer.mode != "api":
@@ -508,7 +555,11 @@ class TestGuideEnhancerResponseParsing:
}
)
guide_data = {"title": "Test", "steps": [{"description": "Test", "code": "test"}], "language": "python"}
guide_data = {
"title": "Test",
"steps": [{"description": "Test", "code": "test"}],
"language": "python",
}
result = enhancer._parse_enhancement_response(response, guide_data)

View File

@@ -121,7 +121,10 @@ def test_workflow():
def test_calculate_complexity(self):
"""Test complexity level calculation"""
# Simple workflow - beginner
simple_steps = [WorkflowStep(1, "x = 1", "Assign variable"), WorkflowStep(2, "print(x)", "Print variable")]
simple_steps = [
WorkflowStep(1, "x = 1", "Assign variable"),
WorkflowStep(2, "print(x)", "Print variable"),
]
simple_workflow = {"code": "x = 1\nprint(x)", "category": "workflow"}
complexity_simple = self.analyzer._calculate_complexity(simple_steps, simple_workflow)
self.assertEqual(complexity_simple, "beginner")
@@ -129,7 +132,9 @@ def test_workflow():
# Complex workflow - advanced
complex_steps = [WorkflowStep(i, f"step{i}", f"Step {i}") for i in range(1, 8)]
complex_workflow = {
"code": "\n".join([f"async def step{i}(): await complex_operation()" for i in range(7)]),
"code": "\n".join(
[f"async def step{i}(): await complex_operation()" for i in range(7)]
),
"category": "workflow",
}
complexity_complex = self.analyzer._calculate_complexity(complex_steps, complex_workflow)
@@ -466,8 +471,12 @@ class TestHowToGuideBuilder(unittest.TestCase):
def test_create_collection(self):
"""Test guide collection creation with metadata"""
guides = [
HowToGuide(guide_id="guide-1", title="Guide 1", overview="Test", complexity_level="beginner"),
HowToGuide(guide_id="guide-2", title="Guide 2", overview="Test", complexity_level="advanced"),
HowToGuide(
guide_id="guide-1", title="Guide 1", overview="Test", complexity_level="beginner"
),
HowToGuide(
guide_id="guide-2", title="Guide 2", overview="Test", complexity_level="advanced"
),
]
collection = self.builder._create_collection(guides)
@@ -492,7 +501,10 @@ class TestHowToGuideBuilder(unittest.TestCase):
# Correct attribute names
collection = GuideCollection(
total_guides=1, guides=guides, guides_by_complexity={"beginner": 1}, guides_by_use_case={}
total_guides=1,
guides=guides,
guides_by_complexity={"beginner": 1},
guides_by_use_case={},
)
output_dir = Path(self.temp_dir)
@@ -905,7 +917,10 @@ def test_file_processing():
output_dir = Path(self.temp_dir) / "guides_fallback"
# Mock GuideEnhancer to raise exception
with patch("skill_seekers.cli.guide_enhancer.GuideEnhancer", side_effect=Exception("AI unavailable")):
with patch(
"skill_seekers.cli.guide_enhancer.GuideEnhancer",
side_effect=Exception("AI unavailable"),
):
# Should NOT crash - graceful fallback
collection = builder.build_guides_from_examples(
examples=examples,

View File

@@ -328,7 +328,9 @@ class TestInstallToAllAgents:
def mock_get_agent_path(agent_name, project_root=None):
return Path(agent_tmpdir) / f".{agent_name}" / "skills"
with patch("skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path):
with patch(
"skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path
):
results = install_to_all_agents(self.skill_dir, force=True)
assert len(results) == 11
@@ -357,7 +359,9 @@ class TestInstallToAllAgents:
def mock_get_agent_path(agent_name, project_root=None):
return Path(agent_tmpdir) / f".{agent_name}" / "skills"
with patch("skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path):
with patch(
"skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path
):
# Without force - should fail
results_no_force = install_to_all_agents(self.skill_dir, force=False)
# All should fail because directories exist
@@ -400,7 +404,10 @@ class TestInstallAgentCLI:
def test_cli_help_output(self):
"""Test that --help shows usage information."""
with pytest.raises(SystemExit) as exc_info, patch("sys.argv", ["install_agent.py", "--help"]):
with (
pytest.raises(SystemExit) as exc_info,
patch("sys.argv", ["install_agent.py", "--help"]),
):
main()
# --help exits with code 0
@@ -422,8 +429,13 @@ class TestInstallAgentCLI:
def mock_get_agent_path(agent_name, project_root=None):
return Path(agent_tmpdir) / f".{agent_name}" / "skills"
with patch("skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path):
with patch("sys.argv", ["install_agent.py", str(self.skill_dir), "--agent", "claude", "--dry-run"]):
with patch(
"skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path
):
with patch(
"sys.argv",
["install_agent.py", str(self.skill_dir), "--agent", "claude", "--dry-run"],
):
exit_code = main()
assert exit_code == 0
@@ -437,8 +449,13 @@ class TestInstallAgentCLI:
def mock_get_agent_path(agent_name, project_root=None):
return Path(agent_tmpdir) / f".{agent_name}" / "skills"
with patch("skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path):
with patch("sys.argv", ["install_agent.py", str(self.skill_dir), "--agent", "claude", "--force"]):
with patch(
"skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path
):
with patch(
"sys.argv",
["install_agent.py", str(self.skill_dir), "--agent", "claude", "--force"],
):
exit_code = main()
assert exit_code == 0
@@ -454,8 +471,13 @@ class TestInstallAgentCLI:
def mock_get_agent_path(agent_name, project_root=None):
return Path(agent_tmpdir) / f".{agent_name}" / "skills"
with patch("skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path):
with patch("sys.argv", ["install_agent.py", str(self.skill_dir), "--agent", "all", "--force"]):
with patch(
"skill_seekers.cli.install_agent.get_agent_path", side_effect=mock_get_agent_path
):
with patch(
"sys.argv",
["install_agent.py", str(self.skill_dir), "--agent", "all", "--force"],
):
exit_code = main()
assert exit_code == 0

View File

@@ -23,7 +23,9 @@ class TestInstallCLI(unittest.TestCase):
# Create parser like install_skill.py does
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument("--target", choices=["claude", "gemini", "openai", "markdown"], default="claude")
parser.add_argument(
"--target", choices=["claude", "gemini", "openai", "markdown"], default="claude"
)
# Test that each platform is accepted
for platform in ["claude", "gemini", "openai", "markdown"]:
@@ -43,7 +45,9 @@ class TestInstallCLI(unittest.TestCase):
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument("--target", choices=["claude", "gemini", "openai", "markdown"], default="claude")
parser.add_argument(
"--target", choices=["claude", "gemini", "openai", "markdown"], default="claude"
)
# Should raise SystemExit for invalid target
with self.assertRaises(SystemExit):
@@ -62,7 +66,10 @@ class TestInstallToolMultiPlatform(unittest.IsolatedAsyncioTestCase):
for target in ["claude", "gemini", "openai"]:
# Use dry_run=True which skips actual execution
# It will still show us the platform is being recognized
with patch("builtins.open", create=True) as mock_open, patch("json.load") as mock_json_load:
with (
patch("builtins.open", create=True) as mock_open,
patch("json.load") as mock_json_load,
):
# Mock config file reading
mock_json_load.return_value = {"name": "test-skill"}
mock_file = MagicMock()

View File

@@ -44,7 +44,9 @@ class TestInstallSkillValidation:
@pytest.mark.asyncio
async def test_validation_both_configs(self):
"""Test error when both config_name and config_path provided"""
result = await install_skill_tool({"config_name": "react", "config_path": "configs/react.json"})
result = await install_skill_tool(
{"config_name": "react", "config_path": "configs/react.json"}
)
assert len(result) == 1
assert isinstance(result[0], TextContent)
@@ -114,7 +116,10 @@ class TestInstallSkillEnhancementMandatory:
# Verify enhancement phase is present
assert "AI Enhancement (MANDATORY)" in output
assert "Enhancement is REQUIRED for quality (3/10→9/10 boost)" in output or "REQUIRED for quality" in output
assert (
"Enhancement is REQUIRED for quality (3/10→9/10 boost)" in output
or "REQUIRED for quality" in output
)
# Verify it's not optional
assert "MANDATORY" in output
@@ -134,13 +139,23 @@ class TestInstallSkillPhaseOrchestration:
@patch("builtins.open")
@patch("os.environ.get")
async def test_full_workflow_with_fetch(
self, mock_env_get, mock_open, mock_upload, mock_package, mock_subprocess, mock_scrape, mock_fetch
self,
mock_env_get,
mock_open,
mock_upload,
mock_package,
mock_subprocess,
mock_scrape,
mock_fetch,
):
"""Test complete workflow when config_name is provided"""
# Mock fetch_config response
mock_fetch.return_value = [
TextContent(type="text", text="✅ Config fetched successfully\n\nConfig saved to: configs/react.json")
TextContent(
type="text",
text="✅ Config fetched successfully\n\nConfig saved to: configs/react.json",
)
]
# Mock config file read
@@ -159,7 +174,9 @@ class TestInstallSkillPhaseOrchestration:
mock_subprocess.return_value = ("✅ Enhancement complete", "", 0)
# Mock package response
mock_package.return_value = [TextContent(type="text", text="✅ Package complete\n\nSaved to: output/react.zip")]
mock_package.return_value = [
TextContent(type="text", text="✅ Package complete\n\nSaved to: output/react.zip")
]
# Mock upload response
mock_upload.return_value = [TextContent(type="text", text="✅ Upload successful")]
@@ -220,7 +237,9 @@ class TestInstallSkillPhaseOrchestration:
mock_env_get.return_value = ""
# Run the workflow
result = await install_skill_tool({"config_path": "configs/custom.json", "auto_upload": True})
result = await install_skill_tool(
{"config_path": "configs/custom.json", "auto_upload": True}
)
output = result[0].text
@@ -248,7 +267,9 @@ class TestInstallSkillErrorHandling:
"""Test handling of fetch phase failure"""
# Mock fetch failure
mock_fetch.return_value = [TextContent(type="text", text="❌ Failed to fetch config: Network error")]
mock_fetch.return_value = [
TextContent(type="text", text="❌ Failed to fetch config: Network error")
]
result = await install_skill_tool({"config_name": "react"})
@@ -271,7 +292,9 @@ class TestInstallSkillErrorHandling:
mock_open.return_value = mock_file
# Mock scrape failure
mock_scrape.return_value = [TextContent(type="text", text="❌ Scraping failed: Connection timeout")]
mock_scrape.return_value = [
TextContent(type="text", text="❌ Scraping failed: Connection timeout")
]
result = await install_skill_tool({"config_path": "configs/test.json"})
@@ -317,7 +340,9 @@ class TestInstallSkillOptions:
@pytest.mark.asyncio
async def test_no_upload_option(self):
"""Test that no_upload option skips upload phase"""
result = await install_skill_tool({"config_name": "react", "auto_upload": False, "dry_run": True})
result = await install_skill_tool(
{"config_name": "react", "auto_upload": False, "dry_run": True}
)
output = result[0].text
@@ -328,7 +353,9 @@ class TestInstallSkillOptions:
@pytest.mark.asyncio
async def test_unlimited_option(self):
"""Test that unlimited option is passed to scraper"""
result = await install_skill_tool({"config_path": "configs/react.json", "unlimited": True, "dry_run": True})
result = await install_skill_tool(
{"config_path": "configs/react.json", "unlimited": True, "dry_run": True}
)
output = result[0].text
@@ -338,7 +365,9 @@ class TestInstallSkillOptions:
@pytest.mark.asyncio
async def test_custom_destination(self):
"""Test custom destination directory"""
result = await install_skill_tool({"config_name": "react", "destination": "/tmp/skills", "dry_run": True})
result = await install_skill_tool(
{"config_name": "react", "destination": "/tmp/skills", "dry_run": True}
)
output = result[0].text

View File

@@ -95,7 +95,9 @@ class TestInstallSkillE2E:
return str(skill_dir)
@pytest.mark.asyncio
async def test_e2e_with_config_path_no_upload(self, test_config_file, tmp_path, mock_scrape_output):
async def test_e2e_with_config_path_no_upload(
self, test_config_file, tmp_path, mock_scrape_output
):
"""E2E test: config_path mode, no upload"""
# Mock the subprocess calls for scraping and enhancement
@@ -106,7 +108,10 @@ class TestInstallSkillE2E:
):
# Mock scrape_docs to return success
mock_scrape.return_value = [
TextContent(type="text", text=f"✅ Scraping complete\n\nSkill built at: {mock_scrape_output}")
TextContent(
type="text",
text=f"✅ Scraping complete\n\nSkill built at: {mock_scrape_output}",
)
]
# Mock enhancement subprocess (success)
@@ -114,7 +119,9 @@ class TestInstallSkillE2E:
# Mock package_skill to return success
zip_path = str(tmp_path / "output" / "test-e2e.zip")
mock_package.return_value = [TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")]
mock_package.return_value = [
TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")
]
# Run the tool
result = await install_skill_tool(
@@ -167,7 +174,10 @@ class TestInstallSkillE2E:
# Mock fetch_config to return success
config_path = str(tmp_path / "configs" / "react.json")
mock_fetch.return_value = [
TextContent(type="text", text=f"✅ Config fetched successfully\n\nConfig saved to: {config_path}")
TextContent(
type="text",
text=f"✅ Config fetched successfully\n\nConfig saved to: {config_path}",
)
]
# Mock config file read
@@ -178,7 +188,9 @@ class TestInstallSkillE2E:
# Mock scrape_docs
skill_dir = str(tmp_path / "output" / "react")
mock_scrape.return_value = [
TextContent(type="text", text=f"✅ Scraping complete\n\nSkill built at: {skill_dir}")
TextContent(
type="text", text=f"✅ Scraping complete\n\nSkill built at: {skill_dir}"
)
]
# Mock enhancement
@@ -186,7 +198,9 @@ class TestInstallSkillE2E:
# Mock package
zip_path = str(tmp_path / "output" / "react.zip")
mock_package.return_value = [TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")]
mock_package.return_value = [
TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")
]
# Mock env (no API key - should skip upload)
mock_env.return_value = ""
@@ -222,7 +236,9 @@ class TestInstallSkillE2E:
async def test_e2e_dry_run_mode(self, test_config_file):
"""E2E test: dry-run mode (no actual execution)"""
result = await install_skill_tool({"config_path": test_config_file, "auto_upload": False, "dry_run": True})
result = await install_skill_tool(
{"config_path": test_config_file, "auto_upload": False, "dry_run": True}
)
output = result[0].text
@@ -245,9 +261,13 @@ class TestInstallSkillE2E:
with patch("skill_seekers.mcp.server.scrape_docs_tool") as mock_scrape:
# Mock scrape failure
mock_scrape.return_value = [TextContent(type="text", text="❌ Scraping failed: Network timeout")]
mock_scrape.return_value = [
TextContent(type="text", text="❌ Scraping failed: Network timeout")
]
result = await install_skill_tool({"config_path": test_config_file, "auto_upload": False, "dry_run": False})
result = await install_skill_tool(
{"config_path": test_config_file, "auto_upload": False, "dry_run": False}
)
output = result[0].text
@@ -256,7 +276,9 @@ class TestInstallSkillE2E:
assert "WORKFLOW COMPLETE" not in output
@pytest.mark.asyncio
async def test_e2e_error_handling_enhancement_failure(self, test_config_file, mock_scrape_output):
async def test_e2e_error_handling_enhancement_failure(
self, test_config_file, mock_scrape_output
):
"""E2E test: error handling when enhancement fails"""
with (
@@ -265,13 +287,18 @@ class TestInstallSkillE2E:
):
# Mock successful scrape
mock_scrape.return_value = [
TextContent(type="text", text=f"✅ Scraping complete\n\nSkill built at: {mock_scrape_output}")
TextContent(
type="text",
text=f"✅ Scraping complete\n\nSkill built at: {mock_scrape_output}",
)
]
# Mock enhancement failure
mock_enhance.return_value = ("", "Enhancement error: Claude not found", 1)
result = await install_skill_tool({"config_path": test_config_file, "auto_upload": False, "dry_run": False})
result = await install_skill_tool(
{"config_path": test_config_file, "auto_upload": False, "dry_run": False}
)
output = result[0].text
@@ -311,7 +338,9 @@ class TestInstallSkillCLI_E2E:
# Import and call the tool directly (more reliable than subprocess)
from skill_seekers.mcp.server import install_skill_tool
result = await install_skill_tool({"config_path": test_config_file, "dry_run": True, "auto_upload": False})
result = await install_skill_tool(
{"config_path": test_config_file, "dry_run": True, "auto_upload": False}
)
# Verify output
output = result[0].text
@@ -324,7 +353,9 @@ class TestInstallSkillCLI_E2E:
# Run CLI without config
result = subprocess.run(
[sys.executable, "-m", "skill_seekers.cli.install_skill"], capture_output=True, text=True
[sys.executable, "-m", "skill_seekers.cli.install_skill"],
capture_output=True,
text=True,
)
# Should fail
@@ -337,7 +368,9 @@ class TestInstallSkillCLI_E2E:
"""E2E test: CLI help command"""
result = subprocess.run(
[sys.executable, "-m", "skill_seekers.cli.install_skill", "--help"], capture_output=True, text=True
[sys.executable, "-m", "skill_seekers.cli.install_skill", "--help"],
capture_output=True,
text=True,
)
# Should succeed
@@ -354,7 +387,9 @@ class TestInstallSkillCLI_E2E:
@patch("skill_seekers.mcp.server.scrape_docs_tool")
@patch("skill_seekers.mcp.server.run_subprocess_with_streaming")
@patch("skill_seekers.mcp.server.package_skill_tool")
async def test_cli_full_workflow_mocked(self, mock_package, mock_enhance, mock_scrape, test_config_file, tmp_path):
async def test_cli_full_workflow_mocked(
self, mock_package, mock_enhance, mock_scrape, test_config_file, tmp_path
):
"""E2E test: Full CLI workflow with mocked phases (via direct call)"""
# Setup mocks
@@ -366,7 +401,9 @@ class TestInstallSkillCLI_E2E:
mock_enhance.return_value = ("✅ Enhancement complete", "", 0)
zip_path = str(tmp_path / "output" / "test-cli-e2e.zip")
mock_package.return_value = [TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")]
mock_package.return_value = [
TextContent(type="text", text=f"✅ Package complete\n\nSaved to: {zip_path}")
]
# Call the tool directly
from skill_seekers.mcp.server import install_skill_tool

View File

@@ -172,7 +172,9 @@ class TestRealConfigFiles(unittest.TestCase):
if os.path.exists(config_path):
config = load_config(config_path)
errors, _ = validate_config(config)
self.assertEqual(len(errors), 0, f"FastAPI config should be valid, got errors: {errors}")
self.assertEqual(
len(errors), 0, f"FastAPI config should be valid, got errors: {errors}"
)
def test_steam_economy_config(self):
"""Test Steam Economy config is valid"""
@@ -180,7 +182,9 @@ class TestRealConfigFiles(unittest.TestCase):
if os.path.exists(config_path):
config = load_config(config_path)
errors, _ = validate_config(config)
self.assertEqual(len(errors), 0, f"Steam Economy config should be valid, got errors: {errors}")
self.assertEqual(
len(errors), 0, f"Steam Economy config should be valid, got errors: {errors}"
)
class TestURLProcessing(unittest.TestCase):
@@ -221,7 +225,11 @@ class TestURLProcessing(unittest.TestCase):
config = {
"name": "test",
"base_url": "https://example.com/",
"start_urls": ["https://example.com/guide/", "https://example.com/api/", "https://example.com/tutorial/"],
"start_urls": [
"https://example.com/guide/",
"https://example.com/api/",
"https://example.com/tutorial/",
],
"selectors": {"main_content": "article", "title": "h1", "code_blocks": "pre"},
"rate_limit": 0.1,
"max_pages": 10,
@@ -423,14 +431,20 @@ app.use('*', cors())
# Verify llms.txt was detected
self.assertTrue(scraper.llms_txt_detected, "llms.txt should be detected")
self.assertEqual(scraper.llms_txt_variant, "explicit", "Should use explicit variant from config")
self.assertEqual(
scraper.llms_txt_variant, "explicit", "Should use explicit variant from config"
)
# Verify pages were parsed
self.assertGreater(len(scraper.pages), 0, "Should have parsed pages from llms.txt")
# Verify page structure
self.assertTrue(all("title" in page for page in scraper.pages), "All pages should have titles")
self.assertTrue(all("content" in page for page in scraper.pages), "All pages should have content")
self.assertTrue(
all("title" in page for page in scraper.pages), "All pages should have titles"
)
self.assertTrue(
all("content" in page for page in scraper.pages), "All pages should have content"
)
self.assertTrue(
any(len(page.get("code_samples", [])) > 0 for page in scraper.pages),
"At least one page should have code samples",

View File

@@ -51,7 +51,9 @@ class TestIssue219Problem1LargeFiles(unittest.TestCase):
mock_content.type = "file"
mock_content.encoding = "none" # This is what GitHub API returns for large files
mock_content.size = 1388271
mock_content.download_url = "https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
mock_content.download_url = (
"https://raw.githubusercontent.com/ccxt/ccxt/master/CHANGELOG.md"
)
with patch("skill_seekers.cli.github_scraper.Github"):
scraper = self.GitHubScraper(config)
@@ -109,7 +111,9 @@ class TestIssue219Problem2CLIFlags(unittest.TestCase):
def test_github_command_has_enhancement_flags(self):
"""E2E: Verify --enhance-local flag exists in github command help"""
result = subprocess.run(["skill-seekers", "github", "--help"], capture_output=True, text=True)
result = subprocess.run(
["skill-seekers", "github", "--help"], capture_output=True, text=True
)
# VERIFY: Command succeeds
self.assertEqual(result.returncode, 0, "github --help should succeed")
@@ -148,9 +152,20 @@ class TestIssue219Problem2CLIFlags(unittest.TestCase):
from skill_seekers.cli import main
# Mock sys.argv to simulate CLI call
test_args = ["skill-seekers", "github", "--repo", "test/test", "--name", "test", "--enhance-local"]
test_args = [
"skill-seekers",
"github",
"--repo",
"test/test",
"--name",
"test",
"--enhance-local",
]
with patch("sys.argv", test_args), patch("skill_seekers.cli.github_scraper.main") as mock_github_main:
with (
patch("sys.argv", test_args),
patch("skill_seekers.cli.github_scraper.main") as mock_github_main,
):
mock_github_main.return_value = 0
# Call main dispatcher
@@ -165,9 +180,12 @@ class TestIssue219Problem2CLIFlags(unittest.TestCase):
# VERIFY: sys.argv contains --enhance-local flag
# (main.py should have added it before calling github_scraper)
called_with_enhance = any("--enhance-local" in str(call) for call in mock_github_main.call_args_list)
called_with_enhance = any(
"--enhance-local" in str(call) for call in mock_github_main.call_args_list
)
self.assertTrue(
called_with_enhance or "--enhance-local" in sys.argv, "Flag should be forwarded to github_scraper"
called_with_enhance or "--enhance-local" in sys.argv,
"Flag should be forwarded to github_scraper",
)
@@ -203,7 +221,9 @@ class TestIssue219Problem3CustomAPIEndpoints(unittest.TestCase):
custom_url = "http://localhost:3000"
with (
patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key-123", "ANTHROPIC_BASE_URL": custom_url}),
patch.dict(
os.environ, {"ANTHROPIC_API_KEY": "test-key-123", "ANTHROPIC_BASE_URL": custom_url}
),
patch("skill_seekers.cli.enhance_skill.anthropic.Anthropic") as mock_anthropic,
):
# Create enhancer
@@ -213,7 +233,11 @@ class TestIssue219Problem3CustomAPIEndpoints(unittest.TestCase):
mock_anthropic.assert_called_once()
call_kwargs = mock_anthropic.call_args[1]
self.assertIn("base_url", call_kwargs, "base_url should be passed")
self.assertEqual(call_kwargs["base_url"], custom_url, "base_url should match ANTHROPIC_BASE_URL env var")
self.assertEqual(
call_kwargs["base_url"],
custom_url,
"base_url should match ANTHROPIC_BASE_URL env var",
)
def test_anthropic_auth_token_support(self):
"""E2E: Verify ANTHROPIC_AUTH_TOKEN is accepted as alternative to ANTHROPIC_API_KEY"""
@@ -234,13 +258,17 @@ class TestIssue219Problem3CustomAPIEndpoints(unittest.TestCase):
# VERIFY: api_key set to ANTHROPIC_AUTH_TOKEN value
self.assertEqual(
enhancer.api_key, custom_token, "Should use ANTHROPIC_AUTH_TOKEN when ANTHROPIC_API_KEY not set"
enhancer.api_key,
custom_token,
"Should use ANTHROPIC_AUTH_TOKEN when ANTHROPIC_API_KEY not set",
)
# VERIFY: Anthropic client initialized with correct key
mock_anthropic.assert_called_once()
call_kwargs = mock_anthropic.call_args[1]
self.assertEqual(call_kwargs["api_key"], custom_token, "api_key should match ANTHROPIC_AUTH_TOKEN")
self.assertEqual(
call_kwargs["api_key"], custom_token, "api_key should match ANTHROPIC_AUTH_TOKEN"
)
def test_thinking_block_handling(self):
"""E2E: Verify ThinkingBlock doesn't cause .text AttributeError"""
@@ -284,7 +312,11 @@ class TestIssue219Problem3CustomAPIEndpoints(unittest.TestCase):
# VERIFY: Should find text from TextBlock, ignore ThinkingBlock
self.assertIsNotNone(result, "Should return enhanced content")
self.assertEqual(result, "# Enhanced SKILL.md\n\nContent here", "Should extract text from TextBlock")
self.assertEqual(
result,
"# Enhanced SKILL.md\n\nContent here",
"Should extract text from TextBlock",
)
class TestIssue219IntegrationAll(unittest.TestCase):
@@ -297,7 +329,9 @@ class TestIssue219IntegrationAll(unittest.TestCase):
# 2. Large files are downloaded
# 3. Custom API endpoints work
result = subprocess.run(["skill-seekers", "github", "--help"], capture_output=True, text=True)
result = subprocess.run(
["skill-seekers", "github", "--help"], capture_output=True, text=True
)
# All flags present
self.assertIn("--enhance", result.stdout)

View File

@@ -48,7 +48,9 @@ def test_url_parsing_with_complex_paths():
assert variants is not None
assert variants["url"] == "https://example.com/llms-full.txt"
mock_head.assert_called_with("https://example.com/llms-full.txt", timeout=5, allow_redirects=True)
mock_head.assert_called_with(
"https://example.com/llms-full.txt", timeout=5, allow_redirects=True
)
def test_detect_all_variants():

View File

@@ -133,7 +133,10 @@ def test_custom_max_retries():
"""Test custom max_retries parameter"""
downloader = LlmsTxtDownloader("https://example.com/llms.txt", max_retries=5)
with patch("requests.get", side_effect=requests.Timeout("Connection timeout")) as mock_get, patch("time.sleep"):
with (
patch("requests.get", side_effect=requests.Timeout("Connection timeout")) as mock_get,
patch("time.sleep"),
):
content = downloader.download()
assert content is None
@@ -189,7 +192,9 @@ def test_is_markdown_rejects_html_doctype():
"""Test that HTML with DOCTYPE is rejected (prevents redirect trap)"""
downloader = LlmsTxtDownloader("https://example.com/llms.txt")
html = "<!DOCTYPE html><html><head><title>Product Page</title></head><body>Content</body></html>"
html = (
"<!DOCTYPE html><html><head><title>Product Page</title></head><body>Content</body></html>"
)
assert not downloader._is_markdown(html)
# Test case-insensitive

View File

@@ -93,7 +93,9 @@ plain code without language
- [HTML Page](./page.html)
- [External](https://google.com)
"""
result = self.converter._extract_markdown_content(content, "https://example.com/docs/test.md")
result = self.converter._extract_markdown_content(
content, "https://example.com/docs/test.md"
)
# Should only include .md links
md_links = [l for l in result["links"] if ".md" in l]
self.assertEqual(len(md_links), len(result["links"]))
@@ -115,7 +117,9 @@ Another paragraph that should be included in the final content output.
def test_detect_html_in_md_url(self):
"""Test that HTML content is detected when .md URL returns HTML."""
html_content = "<!DOCTYPE html><html><head><title>Page</title></head><body><h1>Hello</h1></body></html>"
result = self.converter._extract_markdown_content(html_content, "https://example.com/test.md")
result = self.converter._extract_markdown_content(
html_content, "https://example.com/test.md"
)
self.assertEqual(result["title"], "Page")

View File

@@ -67,7 +67,10 @@ def sample_config(temp_dirs):
"base_url": "https://test-framework.dev/",
"selectors": {"main_content": "article", "title": "h1", "code_blocks": "pre"},
"url_patterns": {"include": ["/docs/"], "exclude": ["/blog/", "/search/"]},
"categories": {"getting_started": ["introduction", "getting-started"], "api": ["api", "reference"]},
"categories": {
"getting_started": ["introduction", "getting-started"],
"api": ["api", "reference"],
},
"rate_limit": 0.5,
"max_pages": 100,
}
@@ -85,7 +88,12 @@ def unified_config(temp_dirs):
"description": "Test unified scraping",
"merge_mode": "rule-based",
"sources": [
{"type": "documentation", "base_url": "https://example.com/docs/", "extract_api": True, "max_pages": 10},
{
"type": "documentation",
"base_url": "https://example.com/docs/",
"extract_api": True,
"max_pages": 10,
},
{"type": "github", "repo": "test/repo", "extract_readme": True},
],
}
@@ -166,7 +174,11 @@ class TestConfigTools:
"""Test basic config generation."""
monkeypatch.chdir(temp_dirs["base"])
args = {"name": "my-framework", "url": "https://my-framework.dev/", "description": "My framework skill"}
args = {
"name": "my-framework",
"url": "https://my-framework.dev/",
"description": "My framework skill",
}
result = await server_fastmcp.generate_config(**args)
@@ -232,7 +244,9 @@ class TestConfigTools:
async def test_validate_config_missing_file(self, temp_dirs):
"""Test validating a non-existent config file."""
result = await server_fastmcp.validate_config(config_path=str(temp_dirs["config"] / "nonexistent.json"))
result = await server_fastmcp.validate_config(
config_path=str(temp_dirs["config"] / "nonexistent.json")
)
assert isinstance(result, str)
# Should indicate error
@@ -252,7 +266,9 @@ class TestScrapingTools:
async def test_estimate_pages_basic(self, sample_config):
"""Test basic page estimation."""
with patch("subprocess.run") as mock_run:
mock_run.return_value = Mock(returncode=0, stdout="Estimated pages: 150\nRecommended max_pages: 200")
mock_run.return_value = Mock(
returncode=0, stdout="Estimated pages: 150\nRecommended max_pages: 200"
)
result = await server_fastmcp.estimate_pages(config_path=str(sample_config))
@@ -266,7 +282,9 @@ class TestScrapingTools:
async def test_estimate_pages_custom_discovery(self, sample_config):
"""Test estimation with custom max_discovery."""
result = await server_fastmcp.estimate_pages(config_path=str(sample_config), max_discovery=500)
result = await server_fastmcp.estimate_pages(
config_path=str(sample_config), max_discovery=500
)
assert isinstance(result, str)
@@ -281,7 +299,9 @@ class TestScrapingTools:
async def test_scrape_docs_with_enhancement(self, sample_config):
"""Test scraping with local enhancement."""
result = await server_fastmcp.scrape_docs(config_path=str(sample_config), enhance_local=True, dry_run=True)
result = await server_fastmcp.scrape_docs(
config_path=str(sample_config), enhance_local=True, dry_run=True
)
assert isinstance(result, str)
@@ -310,7 +330,9 @@ class TestScrapingTools:
with patch("subprocess.run") as mock_run:
mock_run.return_value = Mock(returncode=0, stdout="GitHub scraping completed")
result = await server_fastmcp.scrape_github(repo="facebook/react", name="react-github-test")
result = await server_fastmcp.scrape_github(
repo="facebook/react", name="react-github-test"
)
assert isinstance(result, str)
@@ -325,7 +347,12 @@ class TestScrapingTools:
async def test_scrape_github_options(self):
"""Test GitHub scraping with various options."""
result = await server_fastmcp.scrape_github(
repo="test/repo", no_issues=True, no_changelog=True, no_releases=True, max_issues=50, scrape_only=True
repo="test/repo",
no_issues=True,
no_changelog=True,
no_releases=True,
max_issues=50,
scrape_only=True,
)
assert isinstance(result, str)
@@ -333,7 +360,11 @@ class TestScrapingTools:
async def test_scrape_pdf_basic(self, temp_dirs):
"""Test basic PDF scraping."""
# Create a dummy PDF config
pdf_config = {"name": "test-pdf", "pdf_path": "/path/to/test.pdf", "description": "Test PDF skill"}
pdf_config = {
"name": "test-pdf",
"pdf_path": "/path/to/test.pdf",
"description": "Test PDF skill",
}
config_path = temp_dirs["config"] / "test-pdf.json"
config_path.write_text(json.dumps(pdf_config))
@@ -343,7 +374,9 @@ class TestScrapingTools:
async def test_scrape_pdf_direct_path(self):
"""Test PDF scraping with direct path."""
result = await server_fastmcp.scrape_pdf(pdf_path="/path/to/manual.pdf", name="manual-skill")
result = await server_fastmcp.scrape_pdf(
pdf_path="/path/to/manual.pdf", name="manual-skill"
)
assert isinstance(result, str)
@@ -428,7 +461,9 @@ class TestPackagingTools:
async def test_upload_skill_missing_file(self, temp_dirs):
"""Test upload with missing file."""
result = await server_fastmcp.upload_skill(skill_zip=str(temp_dirs["output"] / "nonexistent.zip"))
result = await server_fastmcp.upload_skill(
skill_zip=str(temp_dirs["output"] / "nonexistent.zip")
)
assert isinstance(result, str)
@@ -438,7 +473,9 @@ class TestPackagingTools:
with patch("skill_seekers.mcp.tools.source_tools.fetch_config_tool") as mock_fetch:
mock_fetch.return_value = [Mock(text="Config fetched")]
result = await server_fastmcp.install_skill(config_name="react", destination="output", dry_run=True)
result = await server_fastmcp.install_skill(
config_name="react", destination="output", dry_run=True
)
assert isinstance(result, str)
@@ -458,7 +495,9 @@ class TestPackagingTools:
with patch("skill_seekers.mcp.tools.source_tools.fetch_config_tool") as mock_fetch:
mock_fetch.return_value = [Mock(text="Config fetched")]
result = await server_fastmcp.install_skill(config_name="react", unlimited=True, dry_run=True)
result = await server_fastmcp.install_skill(
config_name="react", unlimited=True, dry_run=True
)
assert isinstance(result, str)
@@ -467,7 +506,9 @@ class TestPackagingTools:
with patch("skill_seekers.mcp.tools.source_tools.fetch_config_tool") as mock_fetch:
mock_fetch.return_value = [Mock(text="Config fetched")]
result = await server_fastmcp.install_skill(config_name="react", auto_upload=False, dry_run=True)
result = await server_fastmcp.install_skill(
config_name="react", auto_upload=False, dry_run=True
)
assert isinstance(result, str)
@@ -484,7 +525,9 @@ class TestSplittingTools:
async def test_split_config_auto_strategy(self, sample_config):
"""Test config splitting with auto strategy."""
result = await server_fastmcp.split_config(config_path=str(sample_config), strategy="auto", dry_run=True)
result = await server_fastmcp.split_config(
config_path=str(sample_config), strategy="auto", dry_run=True
)
assert isinstance(result, str)
@@ -510,7 +553,9 @@ class TestSplittingTools:
(temp_dirs["config"] / "godot-scripting.json").write_text("{}")
(temp_dirs["config"] / "godot-physics.json").write_text("{}")
result = await server_fastmcp.generate_router(config_pattern=str(temp_dirs["config"] / "godot-*.json"))
result = await server_fastmcp.generate_router(
config_pattern=str(temp_dirs["config"] / "godot-*.json")
)
assert isinstance(result, str)
@@ -552,7 +597,9 @@ class TestSourceTools:
async def test_fetch_config_download_api(self, temp_dirs):
"""Test downloading specific config from API."""
result = await server_fastmcp.fetch_config(config_name="react", destination=str(temp_dirs["config"]))
result = await server_fastmcp.fetch_config(
config_name="react", destination=str(temp_dirs["config"])
)
assert isinstance(result, str)
@@ -565,7 +612,9 @@ class TestSourceTools:
async def test_fetch_config_from_git_url(self, temp_dirs):
"""Test fetching config from git URL."""
result = await server_fastmcp.fetch_config(
config_name="react", git_url="https://github.com/myorg/configs.git", destination=str(temp_dirs["config"])
config_name="react",
git_url="https://github.com/myorg/configs.git",
destination=str(temp_dirs["config"]),
)
assert isinstance(result, str)
@@ -612,13 +661,17 @@ class TestSourceTools:
"""Test submitting config as JSON string."""
config_json = json.dumps({"name": "my-framework", "base_url": "https://my-framework.dev/"})
result = await server_fastmcp.submit_config(config_json=config_json, testing_notes="Works great!")
result = await server_fastmcp.submit_config(
config_json=config_json, testing_notes="Works great!"
)
assert isinstance(result, str)
async def test_add_config_source_basic(self):
"""Test adding a config source."""
result = await server_fastmcp.add_config_source(name="team", git_url="https://github.com/myorg/configs.git")
result = await server_fastmcp.add_config_source(
name="team", git_url="https://github.com/myorg/configs.git"
)
assert isinstance(result, str)
@@ -706,7 +759,9 @@ class TestFastMCPIntegration:
async def test_workflow_split_router(self, sample_config, temp_dirs):
"""Test workflow: split config → generate router."""
# Step 1: Split config
result1 = await server_fastmcp.split_config(config_path=str(sample_config), strategy="category", dry_run=True)
result1 = await server_fastmcp.split_config(
config_path=str(sample_config), strategy="category", dry_run=True
)
assert isinstance(result1, str)
# Step 2: Generate router

View File

@@ -42,7 +42,11 @@ def mock_git_repo(temp_dirs):
(repo_path / ".git").mkdir()
# Create sample config files
react_config = {"name": "react", "description": "React framework", "base_url": "https://react.dev/"}
react_config = {
"name": "react",
"description": "React framework",
"base_url": "https://react.dev/",
}
(repo_path / "react.json").write_text(json.dumps(react_config, indent=2))
vue_config = {"name": "vue", "description": "Vue framework", "base_url": "https://vuejs.org/"}
@@ -65,8 +69,18 @@ class TestFetchConfigModes:
mock_response = MagicMock()
mock_response.json.return_value = {
"configs": [
{"name": "react", "category": "web-frameworks", "description": "React framework", "type": "single"},
{"name": "vue", "category": "web-frameworks", "description": "Vue framework", "type": "single"},
{
"name": "react",
"category": "web-frameworks",
"description": "React framework",
"type": "single",
},
{
"name": "vue",
"category": "web-frameworks",
"description": "Vue framework",
"type": "single",
},
],
"total": 2,
}
@@ -94,7 +108,10 @@ class TestFetchConfigModes:
}
mock_download_response = MagicMock()
mock_download_response.json.return_value = {"name": "react", "base_url": "https://react.dev/"}
mock_download_response.json.return_value = {
"name": "react",
"base_url": "https://react.dev/",
}
mock_client_instance = mock_client.return_value.__aenter__.return_value
mock_client_instance.get.side_effect = [mock_detail_response, mock_download_response]
@@ -149,7 +166,9 @@ class TestFetchConfigModes:
@patch("skill_seekers.mcp.server.GitConfigRepo")
@patch("skill_seekers.mcp.server.SourceManager")
async def test_fetch_config_source_mode(self, mock_source_manager_class, mock_git_repo_class, temp_dirs):
async def test_fetch_config_source_mode(
self, mock_source_manager_class, mock_git_repo_class, temp_dirs
):
"""Test Source mode - using named source from registry."""
from skill_seekers.mcp.server import fetch_config_tool
@@ -491,7 +510,9 @@ class TestCompleteWorkflow:
}
mock_sm_class.return_value = mock_sm
add_result = await add_config_source_tool({"name": "team", "git_url": "https://github.com/myorg/configs.git"})
add_result = await add_config_source_tool(
{"name": "team", "git_url": "https://github.com/myorg/configs.git"}
)
assert "" in add_result[0].text
# Step 2: Fetch config from source

View File

@@ -119,7 +119,11 @@ class TestGenerateConfigTool(unittest.IsolatedAsyncioTestCase):
async def test_generate_config_basic(self):
"""Test basic config generation"""
args = {"name": "test-framework", "url": "https://test-framework.dev/", "description": "Test framework skill"}
args = {
"name": "test-framework",
"url": "https://test-framework.dev/",
"description": "Test framework skill",
}
result = await skill_seeker_server.generate_config_tool(args)
@@ -564,7 +568,9 @@ class TestSubmitConfigTool(unittest.IsolatedAsyncioTestCase):
async def test_submit_config_requires_token(self):
"""Should error without GitHub token"""
args = {"config_json": '{"name": "test", "description": "Test", "base_url": "https://example.com"}'}
args = {
"config_json": '{"name": "test", "description": "Test", "base_url": "https://example.com"}'
}
result = await skill_seeker_server.submit_config_tool(args)
self.assertIn("GitHub token required", result[0].text)
@@ -577,7 +583,9 @@ class TestSubmitConfigTool(unittest.IsolatedAsyncioTestCase):
result = await skill_seeker_server.submit_config_tool(args)
self.assertIn("validation failed", result[0].text.lower())
# ConfigValidator detects missing config type (base_url/repo/pdf)
self.assertTrue("cannot detect" in result[0].text.lower() or "missing" in result[0].text.lower())
self.assertTrue(
"cannot detect" in result[0].text.lower() or "missing" in result[0].text.lower()
)
async def test_submit_config_validates_name_format(self):
"""Should reject invalid name characters"""
@@ -649,7 +657,9 @@ class TestSubmitConfigTool(unittest.IsolatedAsyncioTestCase):
async def test_submit_config_from_file_path(self):
"""Should accept config_path parameter"""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump({"name": "testfile", "description": "From file", "base_url": "https://test.com/"}, f)
json.dump(
{"name": "testfile", "description": "From file", "base_url": "https://test.com/"}, f
)
temp_path = f.name
try:

View File

@@ -24,11 +24,29 @@ class TestIssueCategorization:
def test_categorize_issues_basic(self):
"""Test basic issue categorization."""
problems = [
{"title": "OAuth setup fails", "labels": ["bug", "oauth"], "number": 1, "state": "open", "comments": 10},
{"title": "Testing framework issue", "labels": ["testing"], "number": 2, "state": "open", "comments": 5},
{
"title": "OAuth setup fails",
"labels": ["bug", "oauth"],
"number": 1,
"state": "open",
"comments": 10,
},
{
"title": "Testing framework issue",
"labels": ["testing"],
"number": 2,
"state": "open",
"comments": 5,
},
]
solutions = [
{"title": "Fixed OAuth redirect", "labels": ["oauth"], "number": 3, "state": "closed", "comments": 3}
{
"title": "Fixed OAuth redirect",
"labels": ["oauth"],
"number": 3,
"state": "closed",
"comments": 3,
}
]
topics = ["oauth", "testing", "async"]
@@ -43,7 +61,13 @@ class TestIssueCategorization:
def test_categorize_issues_keyword_matching(self):
"""Test keyword matching in titles and labels."""
problems = [
{"title": "Database connection timeout", "labels": ["db"], "number": 1, "state": "open", "comments": 7}
{
"title": "Database connection timeout",
"labels": ["db"],
"number": 1,
"state": "open",
"comments": 7,
}
]
solutions = []
@@ -57,7 +81,13 @@ class TestIssueCategorization:
def test_categorize_issues_multi_keyword_topic(self):
"""Test topics with multiple keywords."""
problems = [
{"title": "Async API call fails", "labels": ["async", "api"], "number": 1, "state": "open", "comments": 8}
{
"title": "Async API call fails",
"labels": ["async", "api"],
"number": 1,
"state": "open",
"comments": 8,
}
]
solutions = []
@@ -71,7 +101,15 @@ class TestIssueCategorization:
def test_categorize_issues_no_match_goes_to_other(self):
"""Test that unmatched issues go to 'other' category."""
problems = [{"title": "Random issue", "labels": ["misc"], "number": 1, "state": "open", "comments": 5}]
problems = [
{
"title": "Random issue",
"labels": ["misc"],
"number": 1,
"state": "open",
"comments": 5,
}
]
solutions = []
topics = ["oauth", "testing"]
@@ -94,7 +132,10 @@ class TestHybridContent:
def test_generate_hybrid_content_basic(self):
"""Test basic hybrid content generation."""
api_data = {"apis": {"oauth_login": {"name": "oauth_login", "status": "matched"}}, "summary": {"total_apis": 1}}
api_data = {
"apis": {"oauth_login": {"name": "oauth_login", "status": "matched"}},
"summary": {"total_apis": 1},
}
github_docs = {
"readme": "# Project README",
@@ -103,12 +144,29 @@ class TestHybridContent:
}
github_insights = {
"metadata": {"stars": 1234, "forks": 56, "language": "Python", "description": "Test project"},
"metadata": {
"stars": 1234,
"forks": 56,
"language": "Python",
"description": "Test project",
},
"common_problems": [
{"title": "OAuth fails", "number": 42, "state": "open", "comments": 10, "labels": ["bug"]}
{
"title": "OAuth fails",
"number": 42,
"state": "open",
"comments": 10,
"labels": ["bug"],
}
],
"known_solutions": [
{"title": "Fixed OAuth", "number": 35, "state": "closed", "comments": 5, "labels": ["bug"]}
{
"title": "Fixed OAuth",
"number": 35,
"state": "closed",
"comments": 5,
"labels": ["bug"],
}
],
"top_labels": [{"label": "bug", "count": 10}, {"label": "enhancement", "count": 5}],
}
@@ -190,11 +248,23 @@ class TestIssueToAPIMatching:
apis = {"oauth_login": {"name": "oauth_login"}, "async_fetch": {"name": "async_fetch"}}
problems = [
{"title": "OAuth login fails", "number": 42, "state": "open", "comments": 10, "labels": ["bug", "oauth"]}
{
"title": "OAuth login fails",
"number": 42,
"state": "open",
"comments": 10,
"labels": ["bug", "oauth"],
}
]
solutions = [
{"title": "Fixed async fetch timeout", "number": 35, "state": "closed", "comments": 5, "labels": ["async"]}
{
"title": "Fixed async fetch timeout",
"number": 35,
"state": "closed",
"comments": 5,
"labels": ["async"],
}
]
issue_links = _match_issues_to_apis(apis, problems, solutions)
@@ -214,7 +284,13 @@ class TestIssueToAPIMatching:
apis = {"database_connect": {"name": "database_connect"}}
problems = [
{"title": "Random unrelated issue", "number": 1, "state": "open", "comments": 5, "labels": ["misc"]}
{
"title": "Random unrelated issue",
"number": 1,
"state": "open",
"comments": 5,
"labels": ["misc"],
}
]
issue_links = _match_issues_to_apis(apis, problems, [])
@@ -226,7 +302,15 @@ class TestIssueToAPIMatching:
"""Test matching with dotted API names."""
apis = {"module.oauth.login": {"name": "module.oauth.login"}}
problems = [{"title": "OAuth module fails", "number": 42, "state": "open", "comments": 10, "labels": ["oauth"]}]
problems = [
{
"title": "OAuth module fails",
"number": 42,
"state": "open",
"comments": 10,
"labels": ["oauth"],
}
]
issue_links = _match_issues_to_apis(apis, problems, [])
@@ -253,8 +337,12 @@ class TestRuleBasedMergerWithGitHubStreams:
)
insights_stream = InsightsStream(
metadata={"stars": 1234, "forks": 56, "language": "Python"},
common_problems=[{"title": "Bug 1", "number": 1, "state": "open", "comments": 10, "labels": ["bug"]}],
known_solutions=[{"title": "Fix 1", "number": 2, "state": "closed", "comments": 5, "labels": ["bug"]}],
common_problems=[
{"title": "Bug 1", "number": 1, "state": "open", "comments": 10, "labels": ["bug"]}
],
known_solutions=[
{"title": "Fix 1", "number": 2, "state": "closed", "comments": 5, "labels": ["bug"]}
],
top_labels=[{"label": "bug", "count": 10}],
)
github_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
@@ -277,7 +365,9 @@ class TestRuleBasedMergerWithGitHubStreams:
# Create three-stream data
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme="# README", contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={"stars": 500}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={"stars": 500}, common_problems=[], known_solutions=[], top_labels=[]
)
github_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
# Create and run merger
@@ -331,7 +421,12 @@ class TestIntegration:
],
)
insights_stream = InsightsStream(
metadata={"stars": 2500, "forks": 123, "language": "Python", "description": "Test framework"},
metadata={
"stars": 2500,
"forks": 123,
"language": "Python",
"description": "Test framework",
},
common_problems=[
{
"title": "Installation fails on Windows",
@@ -349,7 +444,13 @@ class TestIntegration:
},
],
known_solutions=[
{"title": "Fixed config loading", "number": 130, "state": "closed", "comments": 8, "labels": ["bug"]},
{
"title": "Fixed config loading",
"number": 130,
"state": "closed",
"comments": 8,
"labels": ["bug"],
},
{
"title": "Resolved OAuth timeout",
"number": 125,

View File

@@ -114,8 +114,18 @@ class TestUnifiedSkillBuilderDocsReferences(unittest.TestCase):
scraped_data = {
"documentation": [
{"source_id": "source_a", "base_url": "https://a.com", "total_pages": 5, "refs_dir": refs_dir1},
{"source_id": "source_b", "base_url": "https://b.com", "total_pages": 3, "refs_dir": refs_dir2},
{
"source_id": "source_a",
"base_url": "https://a.com",
"total_pages": 5,
"refs_dir": refs_dir1,
},
{
"source_id": "source_b",
"base_url": "https://b.com",
"total_pages": 3,
"refs_dir": refs_dir2,
},
],
"github": [],
"pdf": [],
@@ -139,7 +149,12 @@ class TestUnifiedSkillBuilderDocsReferences(unittest.TestCase):
scraped_data = {
"documentation": [
{"source_id": "my_source", "base_url": "https://example.com", "total_pages": 10, "refs_dir": refs_dir}
{
"source_id": "my_source",
"base_url": "https://example.com",
"total_pages": 10,
"refs_dir": refs_dir,
}
],
"github": [],
"pdf": [],
@@ -148,7 +163,9 @@ class TestUnifiedSkillBuilderDocsReferences(unittest.TestCase):
builder = UnifiedSkillBuilder(config, scraped_data)
builder._generate_docs_references(scraped_data["documentation"])
source_index = os.path.join(builder.skill_dir, "references", "documentation", "my_source", "index.md")
source_index = os.path.join(
builder.skill_dir, "references", "documentation", "my_source", "index.md"
)
self.assertTrue(os.path.exists(source_index))
with open(source_index) as f:
@@ -169,8 +186,18 @@ class TestUnifiedSkillBuilderDocsReferences(unittest.TestCase):
scraped_data = {
"documentation": [
{"source_id": "docs_one", "base_url": "https://one.com", "total_pages": 10, "refs_dir": refs_dir1},
{"source_id": "docs_two", "base_url": "https://two.com", "total_pages": 20, "refs_dir": refs_dir2},
{
"source_id": "docs_one",
"base_url": "https://one.com",
"total_pages": 10,
"refs_dir": refs_dir1,
},
{
"source_id": "docs_two",
"base_url": "https://two.com",
"total_pages": 20,
"refs_dir": refs_dir2,
},
],
"github": [],
"pdf": [],
@@ -205,7 +232,12 @@ class TestUnifiedSkillBuilderDocsReferences(unittest.TestCase):
scraped_data = {
"documentation": [
{"source_id": "test_source", "base_url": "https://test.com", "total_pages": 5, "refs_dir": refs_dir}
{
"source_id": "test_source",
"base_url": "https://test.com",
"total_pages": 5,
"refs_dir": refs_dir,
}
],
"github": [],
"pdf": [],
@@ -290,7 +322,9 @@ class TestUnifiedSkillBuilderGitHubReferences(unittest.TestCase):
builder = UnifiedSkillBuilder(config, scraped_data)
builder._generate_github_references(scraped_data["github"])
readme_path = os.path.join(builder.skill_dir, "references", "github", "test_myrepo", "README.md")
readme_path = os.path.join(
builder.skill_dir, "references", "github", "test_myrepo", "README.md"
)
self.assertTrue(os.path.exists(readme_path))
with open(readme_path) as f:
@@ -338,7 +372,9 @@ class TestUnifiedSkillBuilderGitHubReferences(unittest.TestCase):
builder = UnifiedSkillBuilder(config, scraped_data)
builder._generate_github_references(scraped_data["github"])
issues_path = os.path.join(builder.skill_dir, "references", "github", "test_repo", "issues.md")
issues_path = os.path.join(
builder.skill_dir, "references", "github", "test_repo", "issues.md"
)
self.assertTrue(os.path.exists(issues_path))
with open(issues_path) as f:
@@ -358,12 +394,22 @@ class TestUnifiedSkillBuilderGitHubReferences(unittest.TestCase):
{
"repo": "org/first",
"repo_id": "org_first",
"data": {"readme": "#", "issues": [], "releases": [], "repo_info": {"stars": 100}},
"data": {
"readme": "#",
"issues": [],
"releases": [],
"repo_info": {"stars": 100},
},
},
{
"repo": "org/second",
"repo_id": "org_second",
"data": {"readme": "#", "issues": [], "releases": [], "repo_info": {"stars": 50}},
"data": {
"readme": "#",
"issues": [],
"releases": [],
"repo_info": {"stars": 50},
},
},
],
"pdf": [],
@@ -406,7 +452,11 @@ class TestUnifiedSkillBuilderPdfReferences(unittest.TestCase):
scraped_data = {
"documentation": [],
"github": [],
"pdf": [{"path": "/path/to/doc1.pdf"}, {"path": "/path/to/doc2.pdf"}, {"path": "/path/to/doc3.pdf"}],
"pdf": [
{"path": "/path/to/doc1.pdf"},
{"path": "/path/to/doc2.pdf"},
{"path": "/path/to/doc3.pdf"},
],
}
builder = UnifiedSkillBuilder(config, scraped_data)

View File

@@ -41,7 +41,9 @@ class TestPackageSkill(unittest.TestCase):
with tempfile.TemporaryDirectory() as tmpdir:
skill_dir = self.create_test_skill_directory(tmpdir)
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertTrue(success)
self.assertIsNotNone(zip_path)
@@ -54,7 +56,9 @@ class TestPackageSkill(unittest.TestCase):
with tempfile.TemporaryDirectory() as tmpdir:
skill_dir = self.create_test_skill_directory(tmpdir)
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertTrue(success)
@@ -77,7 +81,9 @@ class TestPackageSkill(unittest.TestCase):
# Add a backup file
(skill_dir / "SKILL.md.backup").write_text("# Backup")
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertTrue(success)
@@ -88,7 +94,9 @@ class TestPackageSkill(unittest.TestCase):
def test_package_nonexistent_directory(self):
"""Test packaging a nonexistent directory"""
success, zip_path = package_skill("/nonexistent/path", open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
"/nonexistent/path", open_folder_after=False, skip_quality_check=True
)
self.assertFalse(success)
self.assertIsNone(zip_path)
@@ -99,7 +107,9 @@ class TestPackageSkill(unittest.TestCase):
skill_dir = Path(tmpdir) / "invalid-skill"
skill_dir.mkdir()
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertFalse(success)
self.assertIsNone(zip_path)
@@ -118,7 +128,9 @@ class TestPackageSkill(unittest.TestCase):
(skill_dir / "scripts").mkdir()
(skill_dir / "assets").mkdir()
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertTrue(success)
# Zip should be in output directory, not inside skill directory
@@ -135,7 +147,9 @@ class TestPackageSkill(unittest.TestCase):
(skill_dir / "scripts").mkdir()
(skill_dir / "assets").mkdir()
success, zip_path = package_skill(skill_dir, open_folder_after=False, skip_quality_check=True)
success, zip_path = package_skill(
skill_dir, open_folder_after=False, skip_quality_check=True
)
self.assertTrue(success)
self.assertEqual(zip_path.name, "my-awesome-skill.zip")
@@ -149,7 +163,9 @@ class TestPackageSkillCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers", "package", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "package", "--help"], capture_output=True, text=True, timeout=5
)
# argparse may return 0 or 2 for --help
self.assertIn(result.returncode, [0, 2])
@@ -163,7 +179,9 @@ class TestPackageSkillCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers-package", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers-package", "--help"], capture_output=True, text=True, timeout=5
)
# argparse may return 0 or 2 for --help
self.assertIn(result.returncode, [0, 2])

View File

@@ -126,7 +126,9 @@ class TestPackageStructure:
def test_mcp_tools_init_file_exists(self):
"""Test that src/skill_seekers/mcp/tools/__init__.py exists."""
init_file = Path(__file__).parent.parent / "src" / "skill_seekers" / "mcp" / "tools" / "__init__.py"
init_file = (
Path(__file__).parent.parent / "src" / "skill_seekers" / "mcp" / "tools" / "__init__.py"
)
assert init_file.exists(), "src/skill_seekers/mcp/tools/__init__.py not found"
def test_cli_init_has_docstring(self):

View File

@@ -108,7 +108,11 @@ class TestUnlimitedMode(unittest.TestCase):
def test_limited_mode_default(self):
"""Test default max_pages is limited"""
config = {"name": "test", "base_url": "https://example.com/", "selectors": {"main_content": "article"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
}
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
@@ -145,7 +149,11 @@ class TestRateLimiting(unittest.TestCase):
def test_rate_limit_default(self):
"""Test default rate_limit is 0.5"""
config = {"name": "test", "base_url": "https://example.com/", "selectors": {"main_content": "article"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
}
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)

View File

@@ -218,7 +218,9 @@ class Subject:
pattern = patterns[0]
self.assertGreaterEqual(pattern.confidence, 0.8)
evidence_str = " ".join(pattern.evidence).lower()
self.assertTrue("attach" in evidence_str and "detach" in evidence_str and "notify" in evidence_str)
self.assertTrue(
"attach" in evidence_str and "detach" in evidence_str and "notify" in evidence_str
)
def test_pubsub_pattern(self):
"""Test publish/subscribe variant"""

View File

@@ -250,7 +250,10 @@ class TestTableExtraction(unittest.TestCase):
# Create mock table
mock_table = Mock()
mock_table.extract.return_value = [["Header 1", "Header 2", "Header 3"], ["Data 1", "Data 2", "Data 3"]]
mock_table.extract.return_value = [
["Header 1", "Header 2", "Header 3"],
["Data 1", "Data 2", "Data 3"],
]
mock_table.bbox = (0, 0, 100, 100)
# Create mock tables result

View File

@@ -106,7 +106,13 @@ class TestLanguageDetection(unittest.TestCase):
extractor.language_detector = LanguageDetector(min_confidence=0.15)
test_codes = ["def foo(): pass", "const x = 10;", "#include <stdio.h>", "random text here", ""]
test_codes = [
"def foo(): pass",
"const x = 10;",
"#include <stdio.h>",
"random text here",
"",
]
for code in test_codes:
_, confidence = extractor.detect_language_from_code(code)
@@ -246,7 +252,10 @@ class TestChapterDetection(unittest.TestCase):
def test_detect_chapter_with_number(self):
"""Test chapter detection with number"""
extractor = self.PDFExtractor.__new__(self.PDFExtractor)
page_data = {"text": "Chapter 1: Introduction to Python\nThis is the first chapter.", "headings": []}
page_data = {
"text": "Chapter 1: Introduction to Python\nThis is the first chapter.",
"headings": [],
}
is_chapter, title = extractor.detect_chapter_start(page_data)
@@ -277,7 +286,10 @@ class TestChapterDetection(unittest.TestCase):
def test_not_chapter(self):
"""Test normal text is not detected as chapter"""
extractor = self.PDFExtractor.__new__(self.PDFExtractor)
page_data = {"text": "This is just normal paragraph text without any chapter markers.", "headings": []}
page_data = {
"text": "This is just normal paragraph text without any chapter markers.",
"headings": [],
}
is_chapter, title = extractor.detect_chapter_start(page_data)
@@ -302,12 +314,20 @@ class TestCodeBlockMerging(unittest.TestCase):
pages = [
{
"page_number": 1,
"code_samples": [{"code": "def hello():", "language": "python", "detection_method": "pattern"}],
"code_samples": [
{"code": "def hello():", "language": "python", "detection_method": "pattern"}
],
"code_blocks_count": 1,
},
{
"page_number": 2,
"code_samples": [{"code": ' print("world")', "language": "python", "detection_method": "pattern"}],
"code_samples": [
{
"code": ' print("world")',
"language": "python",
"detection_method": "pattern",
}
],
"code_blocks_count": 1,
},
]
@@ -325,12 +345,20 @@ class TestCodeBlockMerging(unittest.TestCase):
pages = [
{
"page_number": 1,
"code_samples": [{"code": "def foo():", "language": "python", "detection_method": "pattern"}],
"code_samples": [
{"code": "def foo():", "language": "python", "detection_method": "pattern"}
],
"code_blocks_count": 1,
},
{
"page_number": 2,
"code_samples": [{"code": "const x = 10;", "language": "javascript", "detection_method": "pattern"}],
"code_samples": [
{
"code": "const x = 10;",
"language": "javascript",
"detection_method": "pattern",
}
],
"code_blocks_count": 1,
},
]
@@ -392,7 +420,11 @@ class TestQualityFiltering(unittest.TestCase):
extractor.min_quality = 5.0
# High quality block
high_quality = {"code": "def calculate():\n return 42", "language": "python", "quality": 8.0}
high_quality = {
"code": "def calculate():\n return 42",
"language": "python",
"quality": 8.0,
}
# Low quality block
low_quality = {"code": "x", "language": "unknown", "quality": 2.0}

View File

@@ -103,7 +103,11 @@ class TestCategorization(unittest.TestCase):
# Mock extracted data with different content
converter.extracted_data = {
"pages": [
{"page_number": 1, "text": "Introduction to the API", "chapter": "Chapter 1: Getting Started"},
{
"page_number": 1,
"text": "Introduction to the API",
"chapter": "Chapter 1: Getting Started",
},
{"page_number": 2, "text": "API reference for functions", "chapter": None},
]
}
@@ -140,7 +144,9 @@ class TestCategorization(unittest.TestCase):
converter = self.PDFToSkillConverter(config)
# Mock data without chapters
converter.extracted_data = {"pages": [{"page_number": 1, "text": "Some content", "chapter": None}]}
converter.extracted_data = {
"pages": [{"page_number": 1, "text": "Some content", "chapter": None}]
}
categories = converter.categorize_content()
@@ -270,7 +276,13 @@ class TestCodeBlockHandling(unittest.TestCase):
{
"page_number": 1,
"text": "Example code",
"code_blocks": [{"code": "def hello():\n print('world')", "language": "python", "quality": 8.0}],
"code_blocks": [
{
"code": "def hello():\n print('world')",
"language": "python",
"quality": 8.0,
}
],
"images": [],
}
],
@@ -305,7 +317,11 @@ class TestCodeBlockHandling(unittest.TestCase):
"text": "Code examples",
"code_blocks": [
{"code": "x = 1", "language": "python", "quality": 2.0},
{"code": "def process():\n return result", "language": "python", "quality": 9.0},
{
"code": "def process():\n return result",
"language": "python",
"quality": 9.0,
},
],
"images": [],
}
@@ -354,7 +370,15 @@ class TestImageHandling(unittest.TestCase):
"page_number": 1,
"text": "See diagram",
"code_blocks": [],
"images": [{"page": 1, "index": 0, "width": 100, "height": 100, "data": mock_image_bytes}],
"images": [
{
"page": 1,
"index": 0,
"width": 100,
"height": 100,
"data": mock_image_bytes,
}
],
}
],
"total_pages": 1,
@@ -384,7 +408,15 @@ class TestImageHandling(unittest.TestCase):
"page_number": 1,
"text": "Architecture diagram",
"code_blocks": [],
"images": [{"page": 1, "index": 0, "width": 200, "height": 150, "data": mock_image_bytes}],
"images": [
{
"page": 1,
"index": 0,
"width": 200,
"height": 150,
"data": mock_image_bytes,
}
],
}
],
"total_pages": 1,

View File

@@ -27,7 +27,9 @@ class TestQualityChecker(unittest.TestCase):
refs_dir = skill_dir / "references"
refs_dir.mkdir()
(refs_dir / "index.md").write_text("# Index\n\nTest reference.", encoding="utf-8")
(refs_dir / "getting_started.md").write_text("# Getting Started\n\nHow to start.", encoding="utf-8")
(refs_dir / "getting_started.md").write_text(
"# Getting Started\n\nHow to start.", encoding="utf-8"
)
return skill_dir
@@ -188,7 +190,9 @@ See [this file](nonexistent.md) for more info.
# Should have warning about broken link
self.assertTrue(report.has_warnings)
self.assertTrue(any("broken link" in issue.message.lower() for issue in report.warnings))
self.assertTrue(
any("broken link" in issue.message.lower() for issue in report.warnings)
)
def test_quality_score_calculation(self):
"""Test that quality score is calculated correctly"""
@@ -369,7 +373,10 @@ Finally, verify the installation.
# Should have info about found workflow steps
completeness_infos = [i for i in report.info if i.category == "completeness"]
self.assertTrue(
any("workflow" in i.message.lower() or "step" in i.message.lower() for i in completeness_infos)
any(
"workflow" in i.message.lower() or "step" in i.message.lower()
for i in completeness_infos
)
)
def test_checker_suggests_adding_prerequisites(self):
@@ -394,7 +401,8 @@ Just run the command.
completeness_infos = [i for i in report.info if i.category == "completeness"]
self.assertTrue(
any(
"consider" in i.message.lower() and "prerequisites" in i.message.lower() for i in completeness_infos
"consider" in i.message.lower() and "prerequisites" in i.message.lower()
for i in completeness_infos
)
)
@@ -425,7 +433,9 @@ class TestQualityCheckerCLI(unittest.TestCase):
import subprocess
result = subprocess.run(
["python3", "-m", "skill_seekers.cli.quality_checker", "/nonexistent/path"], capture_output=True, text=True
["python3", "-m", "skill_seekers.cli.quality_checker", "/nonexistent/path"],
capture_output=True,
text=True,
)
# Should fail

View File

@@ -10,7 +10,11 @@ from unittest.mock import Mock, patch
import pytest
from skill_seekers.cli.config_manager import ConfigManager
from skill_seekers.cli.rate_limit_handler import RateLimitError, RateLimitHandler, create_github_headers
from skill_seekers.cli.rate_limit_handler import (
RateLimitError,
RateLimitHandler,
create_github_headers,
)
class TestRateLimitHandler:
@@ -45,7 +49,11 @@ class TestRateLimitHandler:
"""Test initialization pulls strategy from config."""
mock_config = Mock()
mock_config.config = {
"rate_limit": {"auto_switch_profiles": True, "show_countdown": True, "default_timeout_minutes": 30}
"rate_limit": {
"auto_switch_profiles": True,
"show_countdown": True,
"default_timeout_minutes": 30,
}
}
mock_config.get_rate_limit_strategy.return_value = "wait"
mock_config.get_timeout_minutes.return_value = 45
@@ -112,7 +120,11 @@ class TestRateLimitHandler:
# Mock config
mock_config = Mock()
mock_config.config = {
"rate_limit": {"auto_switch_profiles": False, "show_countdown": True, "default_timeout_minutes": 30}
"rate_limit": {
"auto_switch_profiles": False,
"show_countdown": True,
"default_timeout_minutes": 30,
}
}
mock_config.get_rate_limit_strategy.return_value = "prompt"
mock_config.get_timeout_minutes.return_value = 30
@@ -121,7 +133,9 @@ class TestRateLimitHandler:
# Mock rate limit check
reset_time = int((datetime.now() + timedelta(minutes=60)).timestamp())
mock_response = Mock()
mock_response.json.return_value = {"rate": {"limit": 5000, "remaining": 4500, "reset": reset_time}}
mock_response.json.return_value = {
"rate": {"limit": 5000, "remaining": 4500, "reset": reset_time}
}
mock_response.raise_for_status = Mock()
mock_get.return_value = mock_response
@@ -158,7 +172,11 @@ class TestRateLimitHandler:
"""Test non-interactive mode with fail strategy raises error."""
mock_config = Mock()
mock_config.config = {
"rate_limit": {"auto_switch_profiles": False, "show_countdown": True, "default_timeout_minutes": 30}
"rate_limit": {
"auto_switch_profiles": False,
"show_countdown": True,
"default_timeout_minutes": 30,
}
}
mock_config.get_rate_limit_strategy.return_value = "fail"
mock_config.get_timeout_minutes.return_value = 30
@@ -208,7 +226,11 @@ class TestConfigManagerIntegration:
config_dir = tmp_path / ".config" / "skill-seekers"
monkeypatch.setattr(ConfigManager, "CONFIG_DIR", config_dir)
monkeypatch.setattr(ConfigManager, "CONFIG_FILE", config_dir / "config.json")
monkeypatch.setattr(ConfigManager, "PROGRESS_DIR", tmp_path / ".local" / "share" / "skill-seekers" / "progress")
monkeypatch.setattr(
ConfigManager,
"PROGRESS_DIR",
tmp_path / ".local" / "share" / "skill-seekers" / "progress",
)
config = ConfigManager()
@@ -239,7 +261,11 @@ class TestConfigManagerIntegration:
config_dir = test_dir / ".config" / "skill-seekers"
monkeypatch.setattr(ConfigManager, "CONFIG_DIR", config_dir)
monkeypatch.setattr(ConfigManager, "CONFIG_FILE", config_dir / "config.json")
monkeypatch.setattr(ConfigManager, "PROGRESS_DIR", test_dir / ".local" / "share" / "skill-seekers" / "progress")
monkeypatch.setattr(
ConfigManager,
"PROGRESS_DIR",
test_dir / ".local" / "share" / "skill-seekers" / "progress",
)
monkeypatch.setattr(ConfigManager, "WELCOME_FLAG", config_dir / ".welcomed")
config = ConfigManager()

View File

@@ -80,7 +80,9 @@ class TestRealWorldFastMCP:
try:
# Start with basic analysis (fast) to verify three-stream architecture
# Can be changed to "c3x" for full analysis (20-60 minutes)
depth_mode = os.getenv("TEST_DEPTH", "basic") # Use 'basic' for quick test, 'c3x' for full
depth_mode = os.getenv(
"TEST_DEPTH", "basic"
) # Use 'basic' for quick test, 'c3x' for full
print(f"📊 Analysis depth: {depth_mode}")
if depth_mode == "basic":
@@ -112,7 +114,9 @@ class TestRealWorldFastMCP:
# Verify result structure
assert result is not None, "Analysis result is None"
assert result.source_type == "github", f"Expected source_type 'github', got '{result.source_type}'"
assert result.source_type == "github", (
f"Expected source_type 'github', got '{result.source_type}'"
)
# Depth can be 'basic' or 'c3x' depending on TEST_DEPTH env var
assert result.analysis_depth in ["basic", "c3x"], f"Invalid depth '{result.analysis_depth}'"
print(f"\n📊 Analysis depth: {result.analysis_depth}")
@@ -133,7 +137,9 @@ class TestRealWorldFastMCP:
assert readme is not None, "README missing from GitHub docs"
print(f" ✅ README length: {len(readme)} chars")
assert len(readme) > 100, "README too short (< 100 chars)"
assert "fastmcp" in readme.lower() or "mcp" in readme.lower(), "README doesn't mention FastMCP/MCP"
assert "fastmcp" in readme.lower() or "mcp" in readme.lower(), (
"README doesn't mention FastMCP/MCP"
)
contributing = result.github_docs.get("contributing")
if contributing:
@@ -193,7 +199,9 @@ class TestRealWorldFastMCP:
print("\n C3.1 - Design Patterns:")
print(f" ✅ Count: {len(c3_1)}")
if len(c3_1) > 0:
print(f" ✅ Sample: {c3_1[0].get('name', 'N/A')} ({c3_1[0].get('count', 0)} instances)")
print(
f" ✅ Sample: {c3_1[0].get('name', 'N/A')} ({c3_1[0].get('count', 0)} instances)"
)
# Verify it's not empty/placeholder
assert c3_1[0].get("name"), "Pattern has no name"
assert c3_1[0].get("count", 0) > 0, "Pattern has zero count"
@@ -256,7 +264,12 @@ class TestRealWorldFastMCP:
print("=" * 80)
from skill_seekers.cli.generate_router import RouterGenerator
from skill_seekers.cli.github_fetcher import CodeStream, DocsStream, InsightsStream, ThreeStreamData
from skill_seekers.cli.github_fetcher import (
CodeStream,
DocsStream,
InsightsStream,
ThreeStreamData,
)
result = fastmcp_analysis
@@ -302,7 +315,9 @@ class TestRealWorldFastMCP:
# Generate router
print("\n🧭 Generating router...")
generator = RouterGenerator(
config_paths=[str(config1), str(config2)], router_name="fastmcp", github_streams=github_streams
config_paths=[str(config1), str(config2)],
router_name="fastmcp",
github_streams=github_streams,
)
skill_md = generator.generate_skill_md()
@@ -463,7 +478,9 @@ class TestRealWorldFastMCP:
print(f" {'' if no_todos else ''} No TODO placeholders")
# 6. Has GitHub content
has_github = any(marker in content for marker in ["Repository:", "", "Issue #", "github.com"])
has_github = any(
marker in content for marker in ["Repository:", "", "Issue #", "github.com"]
)
print(f" {'' if has_github else '⚠️ '} Has GitHub integration")
# 7. Has routing
@@ -471,7 +488,15 @@ class TestRealWorldFastMCP:
print(f" {'' if has_routing else '⚠️ '} Has routing guidance")
# Calculate quality score
checks = [has_frontmatter, has_heading, section_count >= 3, has_code, no_todos, has_github, has_routing]
checks = [
has_frontmatter,
has_heading,
section_count >= 3,
has_code,
no_todos,
has_github,
has_routing,
]
score = sum(checks) / len(checks) * 100
print(f"\n📊 Quality Score: {score:.0f}%")

View File

@@ -321,7 +321,13 @@ class TestCategorization(unittest.TestCase):
def test_categorize_by_url(self):
"""Test categorization based on URL"""
pages = [{"url": "https://example.com/api/reference", "title": "Some Title", "content": "Some content"}]
pages = [
{
"url": "https://example.com/api/reference",
"title": "Some Title",
"content": "Some content",
}
]
categories = self.converter.smart_categorize(pages)
# Should categorize to 'api' based on URL containing 'api'
@@ -331,7 +337,11 @@ class TestCategorization(unittest.TestCase):
def test_categorize_by_title(self):
"""Test categorization based on title"""
pages = [
{"url": "https://example.com/docs/page", "title": "API Reference Documentation", "content": "Some content"}
{
"url": "https://example.com/docs/page",
"title": "API Reference Documentation",
"content": "Some content",
}
]
categories = self.converter.smart_categorize(pages)
@@ -368,7 +378,13 @@ class TestCategorization(unittest.TestCase):
def test_empty_categories_removed(self):
"""Test empty categories are removed"""
pages = [{"url": "https://example.com/api/reference", "title": "API Reference", "content": "API documentation"}]
pages = [
{
"url": "https://example.com/api/reference",
"title": "API Reference",
"content": "API documentation",
}
]
categories = self.converter.smart_categorize(pages)
# Only 'api' should exist, not empty 'guides' or 'getting_started'

View File

@@ -39,11 +39,15 @@ class TestSetupMCPScript:
def test_references_correct_mcp_directory(self, script_content):
"""Test that script references src/skill_seekers/mcp/ (v2.4.0 MCP 2025 upgrade)"""
# Should NOT reference old mcp/ or skill_seeker_mcp/ directories
old_mcp_refs = re.findall(r"(?:^|[^a-z_])(?<!/)mcp/(?!\.json)", script_content, re.MULTILINE)
old_mcp_refs = re.findall(
r"(?:^|[^a-z_])(?<!/)mcp/(?!\.json)", script_content, re.MULTILINE
)
old_skill_seeker_refs = re.findall(r"skill_seeker_mcp/", script_content)
# Allow /mcp/ (as in src/skill_seekers/mcp/) but not standalone mcp/
assert len(old_mcp_refs) == 0, f"Found {len(old_mcp_refs)} references to old 'mcp/' directory: {old_mcp_refs}"
assert len(old_mcp_refs) == 0, (
f"Found {len(old_mcp_refs)} references to old 'mcp/' directory: {old_mcp_refs}"
)
assert len(old_skill_seeker_refs) == 0, (
f"Found {len(old_skill_seeker_refs)} references to old 'skill_seeker_mcp/': {old_skill_seeker_refs}"
)
@@ -72,7 +76,9 @@ class TestSetupMCPScript:
assert len(old_skill_seeker_refs) == 0, (
f"Should NOT reference 'skill_seeker_mcp/requirements.txt' (found {len(old_skill_seeker_refs)})"
)
assert len(old_mcp_refs) == 0, f"Should NOT reference old 'mcp/requirements.txt' (found {len(old_mcp_refs)})"
assert len(old_mcp_refs) == 0, (
f"Should NOT reference old 'mcp/requirements.txt' (found {len(old_mcp_refs)})"
)
def test_server_py_path(self, script_content):
"""Test that server_fastmcp.py module is referenced (v2.4.0 MCP 2025 upgrade)"""
@@ -116,7 +122,9 @@ class TestSetupMCPScript:
"""Test that JSON config examples use correct format (v2.4.0 MCP 2025 upgrade)"""
# MCP 2025 uses module import: python3 -m skill_seekers.mcp.server_fastmcp
# Config should show the server_fastmcp.py path for stdio examples
assert "server_fastmcp.py" in script_content, "Config should reference server_fastmcp.py (MCP 2025 upgrade)"
assert "server_fastmcp.py" in script_content, (
"Config should reference server_fastmcp.py (MCP 2025 upgrade)"
)
def test_no_hardcoded_paths(self, script_content):
"""Test that script doesn't contain hardcoded absolute paths"""
@@ -128,7 +136,9 @@ class TestSetupMCPScript:
"""Test that pytest commands reference correct test files"""
# Check for test file references
if "pytest" in script_content:
assert "tests/test_mcp_server.py" in script_content, "Should reference correct test file path"
assert "tests/test_mcp_server.py" in script_content, (
"Should reference correct test file path"
)
class TestBashScriptGeneral:
@@ -160,7 +170,9 @@ class TestBashScriptGeneral:
with open(script) as f:
content = f.read()
# Check for set -e or set -o errexit
has_error_handling = re.search(r"set\s+-[a-z]*e", content) or re.search(r"set\s+-o\s+errexit", content)
has_error_handling = re.search(r"set\s+-[a-z]*e", content) or re.search(
r"set\s+-o\s+errexit", content
)
assert has_error_handling, f"{script} should use 'set -e' for error handling"
def test_no_deprecated_backticks(self, all_bash_scripts):
@@ -172,7 +184,9 @@ class TestBashScriptGeneral:
lines = [line for line in content.split("\n") if not line.strip().startswith("#")]
code_content = "\n".join(lines)
backticks = re.findall(r"`[^`]+`", code_content)
assert len(backticks) == 0, f"{script} uses deprecated backticks: {backticks}. Use $() instead"
assert len(backticks) == 0, (
f"{script} uses deprecated backticks: {backticks}. Use $() instead"
)
class TestMCPServerPaths:
@@ -185,9 +199,10 @@ class TestMCPServerPaths:
with open(workflow_file) as f:
content = f.read()
# Should NOT reference old mcp/ directory
assert "mcp/requirements.txt" not in content or "skill_seeker_mcp/requirements.txt" in content, (
"GitHub workflow should use correct MCP paths"
)
assert (
"mcp/requirements.txt" not in content
or "skill_seeker_mcp/requirements.txt" in content
), "GitHub workflow should use correct MCP paths"
def test_readme_references_correct_paths(self):
"""Test that README references correct MCP paths"""
@@ -197,7 +212,9 @@ class TestMCPServerPaths:
content = f.read()
# Check for old mcp/ directory paths (but allow mcp.json and "mcp" package name)
# Use negative lookbehind to exclude skill_seeker_mcp/
old_mcp_refs = re.findall(r"(?<!skill_seeker_)mcp/(server\.py|requirements\.txt)", content)
old_mcp_refs = re.findall(
r"(?<!skill_seeker_)mcp/(server\.py|requirements\.txt)", content
)
if len(old_mcp_refs) > 0:
pytest.fail(f"README references old mcp/ directory: {old_mcp_refs}")
@@ -208,7 +225,9 @@ class TestMCPServerPaths:
with open(doc_file) as f:
content = f.read()
# Check for old mcp/ directory paths (but allow mcp.json and "mcp" package name)
old_mcp_refs = re.findall(r"(?<!skill_seeker_)mcp/(server\.py|requirements\.txt)", content)
old_mcp_refs = re.findall(
r"(?<!skill_seeker_)mcp/(server\.py|requirements\.txt)", content
)
if len(old_mcp_refs) > 0:
pytest.fail(f"{doc_file} references old mcp/ directory: {old_mcp_refs}")

View File

@@ -20,7 +20,11 @@ class TestSkipLlmsTxtConfig(unittest.TestCase):
def test_default_skip_llms_txt_is_false(self):
"""Test that skip_llms_txt defaults to False when not specified."""
config = {"name": "test", "base_url": "https://example.com/", "selectors": {"main_content": "article"}}
config = {
"name": "test",
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
}
converter = DocToSkillConverter(config, dry_run=True)
self.assertFalse(converter.skip_llms_txt)
@@ -203,7 +207,11 @@ class TestSkipLlmsTxtWithRealConfig(unittest.TestCase):
"base_url": "https://example.com/",
"selectors": {"main_content": "article"},
"skip_llms_txt": True,
"start_urls": ["https://example.com/docs/", "https://example.com/api/", "https://example.com/guide/"],
"start_urls": [
"https://example.com/docs/",
"https://example.com/api/",
"https://example.com/guide/",
],
}
converter = DocToSkillConverter(config, dry_run=True)

View File

@@ -182,7 +182,10 @@ Another paragraph of content.
assert "YOUR TASK:" in prompt
assert "REFERENCE DOCUMENTATION:" in prompt
# After summarization, content should include the marker
assert "[Content intelligently summarized" in prompt or "[Content truncated for size...]" in prompt
assert (
"[Content intelligently summarized" in prompt
or "[Content truncated for size...]" in prompt
)
def test_run_detects_large_skill(self, tmp_path, monkeypatch, capsys):
"""Test that run() automatically detects large skills"""

View File

@@ -53,7 +53,10 @@ class TestSourceManagerInit:
registry_file = temp_config_dir / "sources.json"
# Create existing registry
existing_data = {"version": "1.0", "sources": [{"name": "test", "git_url": "https://example.com/repo.git"}]}
existing_data = {
"version": "1.0",
"sources": [{"name": "test", "git_url": "https://example.com/repo.git"}],
}
with open(registry_file, "w") as f:
json.dump(existing_data, f)
@@ -78,7 +81,9 @@ class TestAddSource:
def test_add_source_minimal(self, source_manager):
"""Test adding source with minimal parameters."""
source = source_manager.add_source(name="team", git_url="https://github.com/myorg/configs.git")
source = source_manager.add_source(
name="team", git_url="https://github.com/myorg/configs.git"
)
assert source["name"] == "team"
assert source["git_url"] == "https://github.com/myorg/configs.git"
@@ -123,17 +128,23 @@ class TestAddSource:
def test_add_source_invalid_name_special_chars(self, source_manager):
"""Test that source names with special characters are rejected."""
with pytest.raises(ValueError, match="Invalid source name"):
source_manager.add_source(name="team@company", git_url="https://github.com/org/repo.git")
source_manager.add_source(
name="team@company", git_url="https://github.com/org/repo.git"
)
def test_add_source_valid_name_with_hyphens(self, source_manager):
"""Test that source names with hyphens are allowed."""
source = source_manager.add_source(name="team-alpha", git_url="https://github.com/org/repo.git")
source = source_manager.add_source(
name="team-alpha", git_url="https://github.com/org/repo.git"
)
assert source["name"] == "team-alpha"
def test_add_source_valid_name_with_underscores(self, source_manager):
"""Test that source names with underscores are allowed."""
source = source_manager.add_source(name="team_alpha", git_url="https://github.com/org/repo.git")
source = source_manager.add_source(
name="team_alpha", git_url="https://github.com/org/repo.git"
)
assert source["name"] == "team_alpha"
@@ -144,7 +155,9 @@ class TestAddSource:
def test_add_source_strips_git_url(self, source_manager):
"""Test that git URLs are stripped of whitespace."""
source = source_manager.add_source(name="team", git_url=" https://github.com/org/repo.git ")
source = source_manager.add_source(
name="team", git_url=" https://github.com/org/repo.git "
)
assert source["git_url"] == "https://github.com/org/repo.git"
@@ -258,9 +271,15 @@ class TestListSources:
def test_list_sources_enabled_only(self, source_manager):
"""Test listing only enabled sources."""
source_manager.add_source(name="enabled1", git_url="https://example.com/1.git", enabled=True)
source_manager.add_source(name="disabled", git_url="https://example.com/2.git", enabled=False)
source_manager.add_source(name="enabled2", git_url="https://example.com/3.git", enabled=True)
source_manager.add_source(
name="enabled1", git_url="https://example.com/1.git", enabled=True
)
source_manager.add_source(
name="disabled", git_url="https://example.com/2.git", enabled=False
)
source_manager.add_source(
name="enabled2", git_url="https://example.com/3.git", enabled=True
)
sources = source_manager.list_sources(enabled_only=True)
@@ -271,7 +290,9 @@ class TestListSources:
def test_list_sources_all_when_some_disabled(self, source_manager):
"""Test listing all sources includes disabled ones."""
source_manager.add_source(name="enabled", git_url="https://example.com/1.git", enabled=True)
source_manager.add_source(name="disabled", git_url="https://example.com/2.git", enabled=False)
source_manager.add_source(
name="disabled", git_url="https://example.com/2.git", enabled=False
)
sources = source_manager.list_sources(enabled_only=False)
@@ -339,7 +360,9 @@ class TestUpdateSource:
"""Test updating source git URL."""
source_manager.add_source(name="team", git_url="https://github.com/org/repo1.git")
updated = source_manager.update_source(name="team", git_url="https://github.com/org/repo2.git")
updated = source_manager.update_source(
name="team", git_url="https://github.com/org/repo2.git"
)
assert updated["git_url"] == "https://github.com/org/repo2.git"
@@ -353,7 +376,9 @@ class TestUpdateSource:
def test_update_source_enabled(self, source_manager):
"""Test updating source enabled status."""
source_manager.add_source(name="team", git_url="https://github.com/org/repo.git", enabled=True)
source_manager.add_source(
name="team", git_url="https://github.com/org/repo.git", enabled=True
)
updated = source_manager.update_source(name="team", enabled=False)
@@ -361,7 +386,9 @@ class TestUpdateSource:
def test_update_source_priority(self, source_manager):
"""Test updating source priority."""
source_manager.add_source(name="team", git_url="https://github.com/org/repo.git", priority=100)
source_manager.add_source(
name="team", git_url="https://github.com/org/repo.git", priority=100
)
updated = source_manager.update_source(name="team", priority=1)
@@ -372,7 +399,11 @@ class TestUpdateSource:
source_manager.add_source(name="team", git_url="https://github.com/org/repo.git")
updated = source_manager.update_source(
name="team", git_url="https://gitlab.com/org/repo.git", type="gitlab", branch="develop", priority=1
name="team",
git_url="https://gitlab.com/org/repo.git",
type="gitlab",
branch="develop",
priority=1,
)
assert updated["git_url"] == "https://gitlab.com/org/repo.git"
@@ -412,13 +443,17 @@ class TestDefaultTokenEnv:
def test_default_token_env_github(self, source_manager):
"""Test GitHub sources get GITHUB_TOKEN."""
source = source_manager.add_source(name="team", git_url="https://github.com/org/repo.git", source_type="github")
source = source_manager.add_source(
name="team", git_url="https://github.com/org/repo.git", source_type="github"
)
assert source["token_env"] == "GITHUB_TOKEN"
def test_default_token_env_gitlab(self, source_manager):
"""Test GitLab sources get GITLAB_TOKEN."""
source = source_manager.add_source(name="team", git_url="https://gitlab.com/org/repo.git", source_type="gitlab")
source = source_manager.add_source(
name="team", git_url="https://gitlab.com/org/repo.git", source_type="gitlab"
)
assert source["token_env"] == "GITLAB_TOKEN"
@@ -449,7 +484,10 @@ class TestDefaultTokenEnv:
def test_override_token_env(self, source_manager):
"""Test that custom token_env overrides default."""
source = source_manager.add_source(
name="team", git_url="https://github.com/org/repo.git", source_type="github", token_env="MY_CUSTOM_TOKEN"
name="team",
git_url="https://github.com/org/repo.git",
source_type="github",
token_env="MY_CUSTOM_TOKEN",
)
assert source["token_env"] == "MY_CUSTOM_TOKEN"

View File

@@ -1311,9 +1311,10 @@ class TestSwiftErrorHandling:
detector = LanguageDetector()
# Verify error was logged
assert any("Invalid regex pattern" in str(call) for call in mock_logger.error.call_args_list), (
"Expected error log for malformed pattern"
)
assert any(
"Invalid regex pattern" in str(call)
for call in mock_logger.error.call_args_list
), "Expected error log for malformed pattern"
finally:
# Restore original patterns
@@ -1331,7 +1332,8 @@ class TestSwiftErrorHandling:
# Mock empty SWIFT_PATTERNS during import
with patch.dict(
"sys.modules", {"skill_seekers.cli.swift_patterns": type("MockModule", (), {"SWIFT_PATTERNS": {}})}
"sys.modules",
{"skill_seekers.cli.swift_patterns": type("MockModule", (), {"SWIFT_PATTERNS": {}})},
):
from skill_seekers.cli.language_detector import LanguageDetector
@@ -1368,9 +1370,9 @@ class TestSwiftErrorHandling:
detector = LanguageDetector()
# Verify TypeError was logged
assert any("not a string" in str(call) for call in mock_logger.error.call_args_list), (
"Expected error log for non-string pattern"
)
assert any(
"not a string" in str(call) for call in mock_logger.error.call_args_list
), "Expected error log for non-string pattern"
finally:
ld_module.LANGUAGE_PATTERNS = original

View File

@@ -154,7 +154,8 @@ def test_query(database):
# Check for pytest markers or tags
has_pytest_indicator = any(
"pytest" in " ".join(ex.tags).lower() or "pytest" in ex.description.lower() for ex in examples
"pytest" in " ".join(ex.tags).lower() or "pytest" in ex.description.lower()
for ex in examples
)
self.assertTrue(has_pytest_indicator or len(examples) > 0) # At least extracted something

View File

@@ -149,7 +149,11 @@ def test_detect_missing_in_docs():
{
"url": "https://example.com/api",
"apis": [
{"name": "documented_func", "parameters": [{"name": "x", "type": "int"}], "return_type": "str"}
{
"name": "documented_func",
"parameters": [{"name": "x", "type": "int"}],
"return_type": "str",
}
],
}
]
@@ -185,7 +189,13 @@ def test_detect_missing_in_code():
"pages": [
{
"url": "https://example.com/api",
"apis": [{"name": "obsolete_func", "parameters": [{"name": "x", "type": "int"}], "return_type": "str"}],
"apis": [
{
"name": "obsolete_func",
"parameters": [{"name": "x", "type": "int"}],
"return_type": "str",
}
],
}
]
}
@@ -206,7 +216,13 @@ def test_detect_signature_mismatch():
"pages": [
{
"url": "https://example.com/api",
"apis": [{"name": "func", "parameters": [{"name": "x", "type": "int"}], "return_type": "str"}],
"apis": [
{
"name": "func",
"parameters": [{"name": "x", "type": "int"}],
"return_type": "str",
}
],
}
]
}
@@ -274,7 +290,13 @@ def test_rule_based_merge_docs_only():
"pages": [
{
"url": "https://example.com/api",
"apis": [{"name": "docs_only_api", "parameters": [{"name": "x", "type": "int"}], "return_type": "str"}],
"apis": [
{
"name": "docs_only_api",
"parameters": [{"name": "x", "type": "int"}],
"return_type": "str",
}
],
}
]
}
@@ -329,7 +351,13 @@ def test_rule_based_merge_matched():
"pages": [
{
"url": "https://example.com/api",
"apis": [{"name": "matched_api", "parameters": [{"name": "x", "type": "int"}], "return_type": "str"}],
"apis": [
{
"name": "matched_api",
"parameters": [{"name": "x", "type": "int"}],
"return_type": "str",
}
],
}
]
}
@@ -339,7 +367,11 @@ def test_rule_based_merge_matched():
"analyzed_files": [
{
"functions": [
{"name": "matched_api", "parameters": [{"name": "x", "type_hint": "int"}], "return_type": "str"}
{
"name": "matched_api",
"parameters": [{"name": "x", "type_hint": "int"}],
"return_type": "str",
}
]
}
]
@@ -373,7 +405,9 @@ def test_merge_summary():
github_data = {
"code_analysis": {
"analyzed_files": [{"functions": [{"name": "api3", "parameters": [], "return_type": "bool"}]}]
"analyzed_files": [
{"functions": [{"name": "api3", "parameters": [], "return_type": "bool"}]}
]
}
}
@@ -499,7 +533,12 @@ def test_full_workflow_unified_config():
"merge_mode": "rule-based",
"sources": [
{"type": "documentation", "base_url": "https://example.com", "extract_api": True},
{"type": "github", "repo": "user/repo", "include_code": True, "code_analysis_depth": "surface"},
{
"type": "github",
"repo": "user/repo",
"include_code": True,
"code_analysis_depth": "surface",
},
],
}

View File

@@ -20,7 +20,8 @@ from skill_seekers.cli.unified_codebase_analyzer import AnalysisResult, UnifiedC
# Skip marker for tests requiring GitHub access
requires_github = pytest.mark.skipif(
not os.environ.get("GITHUB_TOKEN"), reason="GITHUB_TOKEN not set - skipping tests that require GitHub access"
not os.environ.get("GITHUB_TOKEN"),
reason="GITHUB_TOKEN not set - skipping tests that require GitHub access",
)
@@ -29,7 +30,9 @@ class TestAnalysisResult:
def test_analysis_result_basic(self):
"""Test basic AnalysisResult creation."""
result = AnalysisResult(code_analysis={"files": []}, source_type="local", analysis_depth="basic")
result = AnalysisResult(
code_analysis={"files": []}, source_type="local", analysis_depth="basic"
)
assert result.code_analysis == {"files": []}
assert result.source_type == "local"
assert result.analysis_depth == "basic"
@@ -262,7 +265,9 @@ class TestGitHubAnalysis:
(tmp_path / "main.py").write_text("print('hello')")
analyzer = UnifiedCodebaseAnalyzer()
result = analyzer.analyze(source="https://github.com/test/repo", depth="basic", fetch_github_metadata=True)
result = analyzer.analyze(
source="https://github.com/test/repo", depth="basic", fetch_github_metadata=True
)
assert result.source_type == "github"
assert result.analysis_depth == "basic"
@@ -281,7 +286,9 @@ class TestGitHubAnalysis:
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme="# README", contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
mock_fetcher.fetch.return_value = three_streams
@@ -302,14 +309,18 @@ class TestGitHubAnalysis:
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme=None, contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
mock_fetcher.fetch.return_value = three_streams
(tmp_path / "main.py").write_text("code")
analyzer = UnifiedCodebaseAnalyzer()
result = analyzer.analyze(source="https://github.com/test/repo", depth="basic", fetch_github_metadata=False)
result = analyzer.analyze(
source="https://github.com/test/repo", depth="basic", fetch_github_metadata=False
)
# Should not include GitHub docs/insights
assert result.github_docs is None
@@ -356,7 +367,9 @@ class TestTokenHandling:
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme=None, contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
mock_fetcher.fetch.return_value = three_streams
@@ -379,7 +392,9 @@ class TestTokenHandling:
code_stream = CodeStream(directory=tmp_path, files=[])
docs_stream = DocsStream(readme=None, contributing=None, docs_files=[])
insights_stream = InsightsStream(metadata={}, common_problems=[], known_solutions=[], top_labels=[])
insights_stream = InsightsStream(
metadata={}, common_problems=[], known_solutions=[], top_labels=[]
)
three_streams = ThreeStreamData(code_stream, docs_stream, insights_stream)
mock_fetcher.fetch.return_value = three_streams

View File

@@ -95,7 +95,14 @@ async def test_mcp_scrape_docs_detection():
"name": "test_mcp_unified",
"description": "Test unified via MCP",
"merge_mode": "rule-based",
"sources": [{"type": "documentation", "base_url": "https://example.com", "extract_api": True, "max_pages": 5}],
"sources": [
{
"type": "documentation",
"base_url": "https://example.com",
"extract_api": True,
"max_pages": 5,
}
],
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:

View File

@@ -98,7 +98,9 @@ class TestUploadSkillCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers", "upload", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers", "upload", "--help"], capture_output=True, text=True, timeout=5
)
# argparse may return 0 or 2 for --help
self.assertIn(result.returncode, [0, 2])
@@ -112,7 +114,9 @@ class TestUploadSkillCLI(unittest.TestCase):
import subprocess
try:
result = subprocess.run(["skill-seekers-upload", "--help"], capture_output=True, text=True, timeout=5)
result = subprocess.run(
["skill-seekers-upload", "--help"], capture_output=True, text=True, timeout=5
)
# argparse may return 0 or 2 for --help
self.assertIn(result.returncode, [0, 2])
@@ -126,7 +130,11 @@ class TestUploadSkillCLI(unittest.TestCase):
result = subprocess.run(["python3", "cli/upload_skill.py"], capture_output=True, text=True)
# Should fail or show usage
self.assertTrue(result.returncode != 0 or "usage" in result.stderr.lower() or "usage" in result.stdout.lower())
self.assertTrue(
result.returncode != 0
or "usage" in result.stderr.lower()
or "usage" in result.stdout.lower()
)
if __name__ == "__main__":