fix: Resolve 21 ruff linting errors (SIM102, SIM117, B904, SIM113, B007)
Fixed all 21 linting errors identified in GitHub Actions: SIM102 (7 errors - nested if statements): - config_extractor.py:468 - Combined nested conditions - config_validator.py (was B904, already fixed) - pattern_recognizer.py:430,538,916 - Combined nested conditions - test_example_extractor.py:365,412,460 - Combined nested conditions - unified_skill_builder.py:1070 - Combined nested conditions SIM117 (9 errors - multiple with statements): - test_install_agent.py:418 - Combined with statements - test_issue_219_e2e.py:278 - Combined with statements - test_llms_txt_downloader.py:33,88 - Combined with statements - test_skip_llms_txt.py:75,98,121,148,172,304 - Combined with statements B904 (1 error - exception handling): - config_validator.py:62 - Added 'from e' to exception chain SIM113 (1 error - enumerate usage): - doc_scraper.py:1068 - Removed unused 'completed' counter variable B007 (1 error - unused loop variable): - pdf_scraper.py:167 - Changed 'keywords' to '_' for unused variable All changes improve code quality without altering functionality. Tests: 1214 passed, 167 skipped (4 pre-existing failures unrelated) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -466,24 +466,23 @@ class ConfigParser:
|
||||
tree = ast.parse(config_file.raw_content)
|
||||
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Assign):
|
||||
# Get variable name and skip private variables
|
||||
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name) and not node.targets[0].id.startswith("_"):
|
||||
key = node.targets[0].id
|
||||
# Get variable name and skip private variables
|
||||
if isinstance(node, ast.Assign) and len(node.targets) == 1 and isinstance(node.targets[0], ast.Name) and not node.targets[0].id.startswith("_"):
|
||||
key = node.targets[0].id
|
||||
|
||||
# Extract value
|
||||
try:
|
||||
value = ast.literal_eval(node.value)
|
||||
setting = ConfigSetting(
|
||||
key=key,
|
||||
value=value,
|
||||
value_type=self._infer_type(value),
|
||||
description=self._extract_python_docstring(node),
|
||||
)
|
||||
config_file.settings.append(setting)
|
||||
except (ValueError, TypeError):
|
||||
# Can't evaluate complex expressions
|
||||
pass
|
||||
# Extract value
|
||||
try:
|
||||
value = ast.literal_eval(node.value)
|
||||
setting = ConfigSetting(
|
||||
key=key,
|
||||
value=value,
|
||||
value_type=self._infer_type(value),
|
||||
description=self._extract_python_docstring(node),
|
||||
)
|
||||
config_file.settings.append(setting)
|
||||
except (ValueError, TypeError):
|
||||
# Can't evaluate complex expressions
|
||||
pass
|
||||
|
||||
except SyntaxError as e:
|
||||
config_file.parse_errors.append(f"Python parse error: {str(e)}")
|
||||
|
||||
@@ -59,7 +59,7 @@ class ConfigValidator:
|
||||
except FileNotFoundError as e:
|
||||
raise ValueError(f"Config file not found: {self.config_path}") from e
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON in config file: {e}")
|
||||
raise ValueError(f"Invalid JSON in config file: {e}") from e
|
||||
|
||||
def _detect_format(self) -> bool:
|
||||
"""
|
||||
|
||||
@@ -1056,7 +1056,6 @@ class DocToSkillConverter:
|
||||
futures.append(future)
|
||||
|
||||
# Wait for some to complete before submitting more
|
||||
completed = 0
|
||||
for future in as_completed(futures[:batch_size]):
|
||||
# Check for exceptions
|
||||
try:
|
||||
@@ -1065,8 +1064,6 @@ class DocToSkillConverter:
|
||||
with self.lock:
|
||||
logger.warning(" ⚠️ Worker exception: %s", e)
|
||||
|
||||
completed += 1
|
||||
|
||||
with self.lock:
|
||||
self.pages_scraped += 1
|
||||
|
||||
|
||||
@@ -427,13 +427,12 @@ class SingletonDetector(BasePatternDetector):
|
||||
for method in class_sig.methods:
|
||||
# Python: __init__ or __new__
|
||||
# Java/C#: private constructor (detected by naming)
|
||||
if method.name in ["__new__", "__init__", "constructor"]:
|
||||
# Check if it has logic (not just pass)
|
||||
if method.docstring or len(method.parameters) > 1:
|
||||
evidence.append(f"Controlled initialization: {method.name}")
|
||||
confidence += 0.3
|
||||
has_init_control = True
|
||||
break
|
||||
# Check if it has logic (not just pass)
|
||||
if method.name in ["__new__", "__init__", "constructor"] and (method.docstring or len(method.parameters) > 1):
|
||||
evidence.append(f"Controlled initialization: {method.name}")
|
||||
confidence += 0.3
|
||||
has_init_control = True
|
||||
break
|
||||
|
||||
# Check for class-level instance storage
|
||||
# This would require checking class attributes (future enhancement)
|
||||
@@ -535,10 +534,9 @@ class FactoryDetector(BasePatternDetector):
|
||||
factory_method_names = ["create", "make", "build", "new", "get"]
|
||||
for method in class_sig.methods:
|
||||
method_lower = method.name.lower()
|
||||
if any(name in method_lower for name in factory_method_names):
|
||||
# Check if method returns something (has return type or is not void)
|
||||
if method.return_type or "create" in method_lower:
|
||||
return PatternInstance(
|
||||
# Check if method returns something (has return type or is not void)
|
||||
if any(name in method_lower for name in factory_method_names) and (method.return_type or "create" in method_lower):
|
||||
return PatternInstance(
|
||||
pattern_type=self.pattern_type,
|
||||
category=self.category,
|
||||
confidence=0.6,
|
||||
@@ -913,16 +911,15 @@ class DecoratorDetector(BasePatternDetector):
|
||||
|
||||
# Check __init__ for composition (takes object parameter)
|
||||
init_method = next((m for m in class_sig.methods if m.name == "__init__"), None)
|
||||
if init_method:
|
||||
# Check if takes object parameter (not just self)
|
||||
if len(init_method.parameters) > 1: # More than just 'self'
|
||||
param_names = [p.name for p in init_method.parameters if p.name != "self"]
|
||||
if any(
|
||||
name in ["wrapped", "component", "inner", "obj", "target"]
|
||||
for name in param_names
|
||||
):
|
||||
evidence.append(f"Takes wrapped object in constructor: {param_names}")
|
||||
confidence += 0.4
|
||||
# Check if takes object parameter (not just self)
|
||||
if init_method and len(init_method.parameters) > 1: # More than just 'self'
|
||||
param_names = [p.name for p in init_method.parameters if p.name != "self"]
|
||||
if any(
|
||||
name in ["wrapped", "component", "inner", "obj", "target"]
|
||||
for name in param_names
|
||||
):
|
||||
evidence.append(f"Takes wrapped object in constructor: {param_names}")
|
||||
confidence += 0.4
|
||||
|
||||
if confidence >= 0.5:
|
||||
return PatternInstance(
|
||||
|
||||
@@ -164,7 +164,7 @@ class PDFToSkillConverter:
|
||||
else:
|
||||
# Keyword-based categorization
|
||||
# Initialize categories
|
||||
for cat_key, keywords in self.categories.items():
|
||||
for cat_key, _ in self.categories.items():
|
||||
categorized[cat_key] = {"title": cat_key.replace("_", " ").title(), "pages": []}
|
||||
|
||||
# Categorize by keywords
|
||||
|
||||
@@ -361,36 +361,35 @@ class PythonTestAnalyzer:
|
||||
examples = []
|
||||
|
||||
for node in ast.walk(func_node):
|
||||
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Call):
|
||||
# Check if meaningful instantiation
|
||||
if self._is_meaningful_instantiation(node):
|
||||
code = ast.unparse(node)
|
||||
# Check if meaningful instantiation
|
||||
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Call) and self._is_meaningful_instantiation(node):
|
||||
code = ast.unparse(node)
|
||||
|
||||
# Skip trivial or mock-only
|
||||
if len(code) < 20 or "Mock()" in code:
|
||||
continue
|
||||
# Skip trivial or mock-only
|
||||
if len(code) < 20 or "Mock()" in code:
|
||||
continue
|
||||
|
||||
# Get class name
|
||||
class_name = self._get_class_name(node.value)
|
||||
# Get class name
|
||||
class_name = self._get_class_name(node.value)
|
||||
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="instantiation",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=f"Instantiate {class_name}: {description}",
|
||||
expected_behavior=self._extract_assertion_after(func_node, node),
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=node.lineno,
|
||||
line_end=node.end_lineno or node.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.8,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="instantiation",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=f"Instantiate {class_name}: {description}",
|
||||
expected_behavior=self._extract_assertion_after(func_node, node),
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=node.lineno,
|
||||
line_end=node.end_lineno or node.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.8,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
|
||||
return examples
|
||||
|
||||
@@ -408,39 +407,37 @@ class PythonTestAnalyzer:
|
||||
|
||||
statements = func_node.body
|
||||
for i, stmt in enumerate(statements):
|
||||
# Look for method calls
|
||||
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
|
||||
# Check if next statement is an assertion
|
||||
if i + 1 < len(statements):
|
||||
next_stmt = statements[i + 1]
|
||||
if self._is_assertion(next_stmt):
|
||||
method_call = ast.unparse(stmt)
|
||||
assertion = ast.unparse(next_stmt)
|
||||
# Look for method calls and check if next statement is an assertion
|
||||
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call) and i + 1 < len(statements):
|
||||
next_stmt = statements[i + 1]
|
||||
if self._is_assertion(next_stmt):
|
||||
method_call = ast.unparse(stmt)
|
||||
assertion = ast.unparse(next_stmt)
|
||||
|
||||
code = f"{method_call}\n{assertion}"
|
||||
code = f"{method_call}\n{assertion}"
|
||||
|
||||
# Skip trivial assertions
|
||||
if any(trivial in assertion for trivial in self.trivial_patterns):
|
||||
continue
|
||||
# Skip trivial assertions
|
||||
if any(trivial in assertion for trivial in self.trivial_patterns):
|
||||
continue
|
||||
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="method_call",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=description,
|
||||
expected_behavior=assertion,
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=stmt.lineno,
|
||||
line_end=next_stmt.end_lineno or next_stmt.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.85,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="method_call",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=description,
|
||||
expected_behavior=assertion,
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=stmt.lineno,
|
||||
line_end=next_stmt.end_lineno or next_stmt.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.85,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
|
||||
return examples
|
||||
|
||||
@@ -457,31 +454,30 @@ class PythonTestAnalyzer:
|
||||
examples = []
|
||||
|
||||
for node in ast.walk(func_node):
|
||||
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Dict):
|
||||
# Must have 2+ keys and be meaningful
|
||||
if len(node.value.keys) >= 2:
|
||||
code = ast.unparse(node)
|
||||
# Must have 2+ keys and be meaningful
|
||||
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Dict) and len(node.value.keys) >= 2:
|
||||
code = ast.unparse(node)
|
||||
|
||||
# Check if looks like configuration
|
||||
if self._is_config_dict(node.value):
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="config",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=f"Configuration example: {description}",
|
||||
expected_behavior=self._extract_assertion_after(func_node, node),
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=node.lineno,
|
||||
line_end=node.end_lineno or node.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.75,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
# Check if looks like configuration
|
||||
if self._is_config_dict(node.value):
|
||||
example = TestExample(
|
||||
example_id=self._generate_id(code),
|
||||
test_name=func_node.name,
|
||||
category="config",
|
||||
code=code,
|
||||
language="Python",
|
||||
description=f"Configuration example: {description}",
|
||||
expected_behavior=self._extract_assertion_after(func_node, node),
|
||||
setup_code=setup_code,
|
||||
file_path=file_path,
|
||||
line_start=node.lineno,
|
||||
line_end=node.end_lineno or node.lineno,
|
||||
complexity_score=self._calculate_complexity(code),
|
||||
confidence=0.75,
|
||||
tags=tags,
|
||||
dependencies=imports,
|
||||
)
|
||||
examples.append(example)
|
||||
|
||||
return examples
|
||||
|
||||
|
||||
@@ -1067,11 +1067,10 @@ This skill combines knowledge from multiple sources:
|
||||
languages = c3_data["architecture"].get("languages", {})
|
||||
|
||||
# If no languages from C3.7, try to get from GitHub data
|
||||
if not languages:
|
||||
# github_data already available from method scope
|
||||
if github_data.get("languages"):
|
||||
# GitHub data has languages as list, convert to dict with count 1
|
||||
languages = dict.fromkeys(github_data["languages"], 1)
|
||||
# github_data already available from method scope
|
||||
if not languages and github_data.get("languages"):
|
||||
# GitHub data has languages as list, convert to dict with count 1
|
||||
languages = dict.fromkeys(github_data["languages"], 1)
|
||||
|
||||
if languages:
|
||||
f.write("**Languages Detected**:\n")
|
||||
|
||||
@@ -415,9 +415,8 @@ class TestInstallAgentCLI:
|
||||
|
||||
def test_cli_requires_agent_flag(self):
|
||||
"""Test that CLI fails without --agent flag."""
|
||||
with pytest.raises(SystemExit) as exc_info:
|
||||
with patch("sys.argv", ["install_agent.py", str(self.skill_dir)]):
|
||||
main()
|
||||
with pytest.raises(SystemExit) as exc_info, patch("sys.argv", ["install_agent.py", str(self.skill_dir)]):
|
||||
main()
|
||||
|
||||
# Missing required argument exits with code 2
|
||||
assert exc_info.value.code == 2
|
||||
|
||||
@@ -275,46 +275,45 @@ class TestIssue219Problem3CustomAPIEndpoints(unittest.TestCase):
|
||||
except ImportError:
|
||||
self.skipTest("anthropic package not installed")
|
||||
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
|
||||
with patch("skill_seekers.cli.enhance_skill.anthropic.Anthropic") as mock_anthropic:
|
||||
enhancer = SkillEnhancer(self.skill_dir)
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}), patch("skill_seekers.cli.enhance_skill.anthropic.Anthropic") as mock_anthropic:
|
||||
enhancer = SkillEnhancer(self.skill_dir)
|
||||
|
||||
# Mock response with ThinkingBlock (newer SDK)
|
||||
# ThinkingBlock has no .text attribute
|
||||
mock_thinking_block = SimpleNamespace(type="thinking")
|
||||
# Mock response with ThinkingBlock (newer SDK)
|
||||
# ThinkingBlock has no .text attribute
|
||||
mock_thinking_block = SimpleNamespace(type="thinking")
|
||||
|
||||
# TextBlock has .text attribute
|
||||
mock_text_block = SimpleNamespace(text="# Enhanced SKILL.md\n\nContent here")
|
||||
# TextBlock has .text attribute
|
||||
mock_text_block = SimpleNamespace(text="# Enhanced SKILL.md\n\nContent here")
|
||||
|
||||
mock_message = Mock()
|
||||
mock_message.content = [mock_thinking_block, mock_text_block]
|
||||
mock_message = Mock()
|
||||
mock_message.content = [mock_thinking_block, mock_text_block]
|
||||
|
||||
mock_client = mock_anthropic.return_value
|
||||
mock_client.messages.create.return_value = mock_message
|
||||
mock_client = mock_anthropic.return_value
|
||||
mock_client.messages.create.return_value = mock_message
|
||||
|
||||
# Read references (with proper metadata structure)
|
||||
references = {
|
||||
"index.md": {
|
||||
"content": "# Index\nTest content",
|
||||
"source": "documentation",
|
||||
"confidence": "high",
|
||||
"path": "index.md",
|
||||
"truncated": False,
|
||||
"size": 23,
|
||||
"repo_id": None,
|
||||
}
|
||||
# Read references (with proper metadata structure)
|
||||
references = {
|
||||
"index.md": {
|
||||
"content": "# Index\nTest content",
|
||||
"source": "documentation",
|
||||
"confidence": "high",
|
||||
"path": "index.md",
|
||||
"truncated": False,
|
||||
"size": 23,
|
||||
"repo_id": None,
|
||||
}
|
||||
}
|
||||
|
||||
# Call enhance_skill_md (should handle ThinkingBlock gracefully)
|
||||
result = enhancer.enhance_skill_md(references, current_skill_md="# Old")
|
||||
# Call enhance_skill_md (should handle ThinkingBlock gracefully)
|
||||
result = enhancer.enhance_skill_md(references, current_skill_md="# Old")
|
||||
|
||||
# VERIFY: Should find text from TextBlock, ignore ThinkingBlock
|
||||
self.assertIsNotNone(result, "Should return enhanced content")
|
||||
self.assertEqual(
|
||||
result,
|
||||
"# Enhanced SKILL.md\n\nContent here",
|
||||
"Should extract text from TextBlock",
|
||||
)
|
||||
# VERIFY: Should find text from TextBlock, ignore ThinkingBlock
|
||||
self.assertIsNotNone(result, "Should return enhanced content")
|
||||
self.assertEqual(
|
||||
result,
|
||||
"# Enhanced SKILL.md\n\nContent here",
|
||||
"Should extract text from TextBlock",
|
||||
)
|
||||
|
||||
|
||||
class TestIssue219IntegrationAll(unittest.TestCase):
|
||||
|
||||
@@ -30,9 +30,8 @@ def test_timeout_with_retry():
|
||||
"""Test timeout scenario with retry logic"""
|
||||
downloader = LlmsTxtDownloader("https://example.com/llms.txt", max_retries=2)
|
||||
|
||||
with patch("requests.get", side_effect=requests.Timeout("Connection timeout")) as mock_get:
|
||||
with patch("time.sleep") as mock_sleep: # Mock sleep to speed up test
|
||||
content = downloader.download()
|
||||
with patch("requests.get", side_effect=requests.Timeout("Connection timeout")) as mock_get, patch("time.sleep") as mock_sleep: # Mock sleep to speed up test
|
||||
content = downloader.download()
|
||||
|
||||
assert content is None
|
||||
assert mock_get.call_count == 2 # Should retry once (2 total attempts)
|
||||
@@ -85,9 +84,8 @@ def test_exponential_backoff():
|
||||
"""Test that exponential backoff delays are correct"""
|
||||
downloader = LlmsTxtDownloader("https://example.com/llms.txt", max_retries=3)
|
||||
|
||||
with patch("requests.get", side_effect=requests.Timeout("Connection timeout")):
|
||||
with patch("time.sleep") as mock_sleep:
|
||||
content = downloader.download()
|
||||
with patch("requests.get", side_effect=requests.Timeout("Connection timeout")), patch("time.sleep") as mock_sleep:
|
||||
content = downloader.download()
|
||||
|
||||
assert content is None
|
||||
# Should sleep with delays: 1s, 2s (2^0, 2^1)
|
||||
|
||||
@@ -72,11 +72,9 @@ class TestSkipLlmsTxtSyncBehavior(unittest.TestCase):
|
||||
os.chdir(tmpdir)
|
||||
converter = DocToSkillConverter(config, dry_run=False)
|
||||
|
||||
with patch.object(converter, "_try_llms_txt", return_value=False) as mock_try:
|
||||
with patch.object(converter, "scrape_page"):
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_called_once()
|
||||
with patch.object(converter, "_try_llms_txt", return_value=False) as mock_try, patch.object(converter, "scrape_page"), patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_called_once()
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
@@ -95,11 +93,9 @@ class TestSkipLlmsTxtSyncBehavior(unittest.TestCase):
|
||||
os.chdir(tmpdir)
|
||||
converter = DocToSkillConverter(config, dry_run=False)
|
||||
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try:
|
||||
with patch.object(converter, "scrape_page"):
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try, patch.object(converter, "scrape_page"), patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
@@ -118,10 +114,9 @@ class TestSkipLlmsTxtSyncBehavior(unittest.TestCase):
|
||||
os.chdir(tmpdir)
|
||||
converter = DocToSkillConverter(config, dry_run=True)
|
||||
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try:
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try, patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
@@ -145,11 +140,9 @@ class TestSkipLlmsTxtAsyncBehavior(unittest.TestCase):
|
||||
os.chdir(tmpdir)
|
||||
converter = DocToSkillConverter(config, dry_run=False)
|
||||
|
||||
with patch.object(converter, "_try_llms_txt", return_value=False) as mock_try:
|
||||
with patch.object(converter, "scrape_page_async", return_value=None):
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_called_once()
|
||||
with patch.object(converter, "_try_llms_txt", return_value=False) as mock_try, patch.object(converter, "scrape_page_async", return_value=None), patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_called_once()
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
@@ -169,11 +162,9 @@ class TestSkipLlmsTxtAsyncBehavior(unittest.TestCase):
|
||||
os.chdir(tmpdir)
|
||||
converter = DocToSkillConverter(config, dry_run=False)
|
||||
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try:
|
||||
with patch.object(converter, "scrape_page_async", return_value=None):
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
with patch.object(converter, "_try_llms_txt") as mock_try, patch.object(converter, "scrape_page_async", return_value=None), patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
mock_try.assert_not_called()
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
@@ -301,11 +292,10 @@ class TestSkipLlmsTxtEdgeCases(unittest.TestCase):
|
||||
scrape_called.append(url)
|
||||
return None
|
||||
|
||||
with patch.object(converter, "scrape_page", side_effect=mock_scrape):
|
||||
with patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
# Should have attempted to scrape the base URL
|
||||
self.assertTrue(len(scrape_called) > 0)
|
||||
with patch.object(converter, "scrape_page", side_effect=mock_scrape), patch.object(converter, "save_summary"):
|
||||
converter.scrape_all()
|
||||
# Should have attempted to scrape the base URL
|
||||
self.assertTrue(len(scrape_called) > 0)
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user