fix: resolve all CI ruff linting errors (F401, F821, ARG001, SIM117, SIM105, C408)

- Remove unused imports (F401): os/Path/json/threading in tests; os in estimate_pages;
  Path in install_skill; pytest in test_unified_scraper_orchestration
- Fix F821 undefined 'args' in unified_scraper._scrape_local() by storing
  self._cli_args = args in run() and reading via getattr in _scrape_local()
- Fix ARG001/ARG005 unused lambda/function arguments with _ prefix or # noqa:ARG001
  where parameter names must be preserved for keyword-argument compatibility
- Fix C408 unnecessary dict() calls → dict literals in test_enhance_command
- Fix F841 unused variable 'stub' in test_enhance_command
- Fix SIM117 nested with statements → single with in test_unified_scraper_orchestration
- Fix SIM105 try/except/pass → contextlib.suppress in test_unified_scraper_orchestration
- Rewrite TestScrapeLocal to test fixed behavior (not the NameError bug)

All 2267 tests pass, 11 skipped.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
yusyus
2026-02-22 22:30:52 +03:00
parent f7117c35a9
commit efc722eeed
8 changed files with 89 additions and 94 deletions

View File

@@ -5,7 +5,6 @@ Quickly estimates how many pages a config will scrape without downloading conten
""" """
import json import json
import os
import sys import sys
import time import time
from pathlib import Path from pathlib import Path

View File

@@ -29,7 +29,6 @@ Examples:
import argparse import argparse
import asyncio import asyncio
import sys import sys
from pathlib import Path
# Import the MCP tool function (with lazy loading) # Import the MCP tool function (with lazy loading)
try: try:

View File

@@ -562,7 +562,8 @@ class UnifiedScraper:
# Note: Signal flow analysis is automatic for Godot projects (C3.10) # Note: Signal flow analysis is automatic for Godot projects (C3.10)
# AI enhancement settings (CLI --enhance-level overrides per-source config) # AI enhancement settings (CLI --enhance-level overrides per-source config)
cli_enhance_level = getattr(args, "enhance_level", None) if args is not None else None cli_args = getattr(self, "_cli_args", None)
cli_enhance_level = getattr(cli_args, "enhance_level", None) if cli_args is not None else None
enhance_level = cli_enhance_level if cli_enhance_level is not None else source.get("enhance_level", 0) enhance_level = cli_enhance_level if cli_enhance_level is not None else source.get("enhance_level", 0)
# Run codebase analysis # Run codebase analysis
@@ -953,6 +954,9 @@ class UnifiedScraper:
When provided, enhancement workflows (--enhance-workflow, When provided, enhancement workflows (--enhance-workflow,
--enhance-stage) are executed after the skill is built. --enhance-stage) are executed after the skill is built.
""" """
# Store CLI args so _scrape_local() can access --enhance-level override
self._cli_args = args
logger.info("\n" + "🚀 " * 20) logger.info("\n" + "🚀 " * 20)
logger.info(f"Unified Scraper: {self.config['name']}") logger.info(f"Unified Scraper: {self.config['name']}")
logger.info("🚀 " * 20 + "\n") logger.info("🚀 " * 20 + "\n")

View File

@@ -2,7 +2,6 @@
import argparse import argparse
import sys import sys
import types
import pytest import pytest
@@ -13,19 +12,19 @@ import pytest
def _make_args(**kwargs): def _make_args(**kwargs):
"""Build a fake Namespace with sensible defaults.""" """Build a fake Namespace with sensible defaults."""
defaults = dict( defaults = {
skill_directory="output/react", "skill_directory": "output/react",
target=None, "target": None,
api_key=None, "api_key": None,
dry_run=False, "dry_run": False,
agent=None, "agent": None,
agent_cmd=None, "agent_cmd": None,
interactive_enhancement=False, "interactive_enhancement": False,
background=False, "background": False,
daemon=False, "daemon": False,
no_force=False, "no_force": False,
timeout=600, "timeout": 600,
) }
defaults.update(kwargs) defaults.update(kwargs)
return argparse.Namespace(**defaults) return argparse.Namespace(**defaults)
@@ -169,8 +168,6 @@ class TestPickModeAutoDetect:
class TestPickModeConfigAgent: class TestPickModeConfigAgent:
def _patch_config(self, monkeypatch, agent: str | None): def _patch_config(self, monkeypatch, agent: str | None):
"""Patch get_config_manager to return a stub with get_default_agent().""" """Patch get_config_manager to return a stub with get_default_agent()."""
stub = types.SimpleNamespace(get_default_agent=lambda: agent)
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.cli.enhance_command._get_config_default_agent", "skill_seekers.cli.enhance_command._get_config_default_agent",
lambda: agent, lambda: agent,
@@ -312,7 +309,7 @@ class TestEnhanceCommandMain:
# Patch _run_api_mode to avoid real API call # Patch _run_api_mode to avoid real API call
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.cli.enhance_command._run_api_mode", "skill_seekers.cli.enhance_command._run_api_mode",
lambda args, target: 0, lambda *_: 0,
) )
sys_argv_backup = sys.argv.copy() sys_argv_backup = sys.argv.copy()

View File

@@ -1,7 +1,3 @@
import json
import os
import threading
from pathlib import Path
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
@@ -485,7 +481,7 @@ class TestRunHeadless:
mock_result.stdout = "" mock_result.stdout = ""
mock_result.stderr = "" mock_result.stderr = ""
def _fake_run(prompt_file, timeout, include_permissions_flag, quiet=False): def _fake_run(prompt_file, timeout, include_permissions_flag, quiet=False): # noqa: ARG001
# Simulate agent updating SKILL.md with more content # Simulate agent updating SKILL.md with more content
import time import time
time.sleep(0.01) time.sleep(0.01)
@@ -582,7 +578,7 @@ class TestRunBackground:
# Delay the headless run to confirm we don't block # Delay the headless run to confirm we don't block
import time import time
def _slow_run(*args, **kwargs): def _slow_run(*_args, **_kwargs):
time.sleep(0.5) time.sleep(0.5)
return True return True

View File

@@ -10,9 +10,6 @@ Covers all 5 tools:
from __future__ import annotations from __future__ import annotations
from pathlib import Path
from unittest.mock import patch
import pytest import pytest
import yaml import yaml
@@ -83,7 +80,7 @@ def bundled_fixture(monkeypatch):
) )
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: bundled.get(name), lambda _name: bundled.get(_name),
) )
@@ -96,7 +93,7 @@ class TestListWorkflowsTool:
def test_empty_returns_empty_list(self, user_dir, bundled_names_empty, monkeypatch): def test_empty_returns_empty_list(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import list_workflows_tool from skill_seekers.mcp.tools.workflow_tools import list_workflows_tool
@@ -125,7 +122,7 @@ class TestListWorkflowsTool:
def test_returns_user_workflows(self, user_dir, bundled_names_empty, monkeypatch): def test_returns_user_workflows(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
(user_dir / "my-workflow.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8") (user_dir / "my-workflow.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8")
@@ -159,7 +156,7 @@ class TestListWorkflowsTool:
def test_ignores_args_parameter(self, user_dir, bundled_names_empty, monkeypatch): def test_ignores_args_parameter(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import list_workflows_tool from skill_seekers.mcp.tools.workflow_tools import list_workflows_tool
@@ -177,7 +174,7 @@ class TestGetWorkflowTool:
def test_missing_name_returns_error(self, user_dir, bundled_names_empty, monkeypatch): def test_missing_name_returns_error(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool
@@ -188,7 +185,7 @@ class TestGetWorkflowTool:
def test_empty_name_returns_error(self, user_dir, bundled_names_empty, monkeypatch): def test_empty_name_returns_error(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool
@@ -213,7 +210,7 @@ class TestGetWorkflowTool:
def test_returns_user_workflow_content(self, user_dir, bundled_names_empty, monkeypatch): def test_returns_user_workflow_content(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
(user_dir / "my-wf.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8") (user_dir / "my-wf.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8")
@@ -237,7 +234,7 @@ class TestGetWorkflowTool:
def test_not_found_no_available_shows_none(self, user_dir, bundled_names_empty, monkeypatch): def test_not_found_no_available_shows_none(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool from skill_seekers.mcp.tools.workflow_tools import get_workflow_tool
@@ -362,7 +359,7 @@ class TestUpdateWorkflowTool:
def test_updates_existing_user_workflow(self, user_dir, bundled_names_empty, monkeypatch): def test_updates_existing_user_workflow(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
(user_dir / "existing.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8") (user_dir / "existing.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8")
@@ -389,7 +386,7 @@ class TestUpdateWorkflowTool:
def test_success_message_contains_name(self, user_dir, bundled_names_empty, monkeypatch): def test_success_message_contains_name(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
(user_dir / "my-wf.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8") (user_dir / "my-wf.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8")
@@ -429,7 +426,7 @@ class TestDeleteWorkflowTool:
def test_not_found_user_workflow_returns_error(self, user_dir, bundled_names_empty, monkeypatch): def test_not_found_user_workflow_returns_error(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import delete_workflow_tool from skill_seekers.mcp.tools.workflow_tools import delete_workflow_tool
@@ -440,7 +437,7 @@ class TestDeleteWorkflowTool:
def test_deletes_user_yaml_file(self, user_dir, bundled_names_empty, monkeypatch): def test_deletes_user_yaml_file(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
wf_file = user_dir / "to-delete.yaml" wf_file = user_dir / "to-delete.yaml"
@@ -455,7 +452,7 @@ class TestDeleteWorkflowTool:
def test_deletes_user_yml_extension(self, user_dir, bundled_names_empty, monkeypatch): def test_deletes_user_yml_extension(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
wf_file = user_dir / "to-delete.yml" wf_file = user_dir / "to-delete.yml"
@@ -470,7 +467,7 @@ class TestDeleteWorkflowTool:
def test_success_message_contains_path(self, user_dir, bundled_names_empty, monkeypatch): def test_success_message_contains_path(self, user_dir, bundled_names_empty, monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
user_dir.mkdir(parents=True) user_dir.mkdir(parents=True)
(user_dir / "bye.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8") (user_dir / "bye.yaml").write_text(VALID_WORKFLOW_YAML, encoding="utf-8")
@@ -491,7 +488,7 @@ class TestWorkflowRoundTrip:
"""Create → list → get → update → delete a workflow end-to-end.""" """Create → list → get → update → delete a workflow end-to-end."""
monkeypatch.setattr( monkeypatch.setattr(
"skill_seekers.mcp.tools.workflow_tools._read_bundled", "skill_seekers.mcp.tools.workflow_tools._read_bundled",
lambda name: None, lambda _name: None,
) )
from skill_seekers.mcp.tools.workflow_tools import ( from skill_seekers.mcp.tools.workflow_tools import (
create_workflow_tool, create_workflow_tool,

View File

@@ -663,7 +663,7 @@ class TestWorkflowJsonConfig:
"""JSON 'workflows' list is used even when args=None.""" """JSON 'workflows' list is used even when args=None."""
captured = {} captured = {}
def fake_run_workflows(args, context=None): def fake_run_workflows(args, context=None): # noqa: ARG001
captured["enhance_workflow"] = getattr(args, "enhance_workflow", None) captured["enhance_workflow"] = getattr(args, "enhance_workflow", None)
monkeypatch.setattr( monkeypatch.setattr(

View File

@@ -11,11 +11,8 @@ Covers:
""" """
import json import json
import os
from pathlib import Path from pathlib import Path
from unittest.mock import MagicMock, call, patch from unittest.mock import MagicMock, patch
import pytest
from skill_seekers.cli.unified_scraper import UnifiedScraper from skill_seekers.cli.unified_scraper import UnifiedScraper
@@ -85,10 +82,10 @@ class TestScrapeAllSourcesRouting:
calls = {"documentation": 0, "github": 0, "pdf": 0, "local": 0} calls = {"documentation": 0, "github": 0, "pdf": 0, "local": 0}
monkeypatch.setattr(scraper, "_scrape_documentation", lambda s: calls.__setitem__("documentation", calls["documentation"] + 1)) monkeypatch.setattr(scraper, "_scrape_documentation", lambda _s: calls.__setitem__("documentation", calls["documentation"] + 1))
monkeypatch.setattr(scraper, "_scrape_github", lambda s: calls.__setitem__("github", calls["github"] + 1)) monkeypatch.setattr(scraper, "_scrape_github", lambda _s: calls.__setitem__("github", calls["github"] + 1))
monkeypatch.setattr(scraper, "_scrape_pdf", lambda s: calls.__setitem__("pdf", calls["pdf"] + 1)) monkeypatch.setattr(scraper, "_scrape_pdf", lambda _s: calls.__setitem__("pdf", calls["pdf"] + 1))
monkeypatch.setattr(scraper, "_scrape_local", lambda s: calls.__setitem__("local", calls["local"] + 1)) monkeypatch.setattr(scraper, "_scrape_local", lambda _s: calls.__setitem__("local", calls["local"] + 1))
scraper.scrape_all_sources() scraper.scrape_all_sources()
return calls return calls
@@ -149,10 +146,10 @@ class TestScrapeAllSourcesRouting:
] ]
calls = {"documentation": 0, "github": 0} calls = {"documentation": 0, "github": 0}
def raise_on_doc(s): def raise_on_doc(_s):
raise RuntimeError("simulated doc failure") raise RuntimeError("simulated doc failure")
def count_github(s): def count_github(_s):
calls["github"] += 1 calls["github"] += 1
monkeypatch.setattr(scraper, "_scrape_documentation", raise_on_doc) monkeypatch.setattr(scraper, "_scrape_documentation", raise_on_doc)
@@ -217,7 +214,7 @@ class TestScrapeDocumentation:
with ( with (
patch("skill_seekers.cli.unified_scraper.subprocess.run") as mock_run, patch("skill_seekers.cli.unified_scraper.subprocess.run") as mock_run,
patch("skill_seekers.cli.unified_scraper.json.dump", side_effect=lambda obj, f, **kw: written_configs.append(obj)), patch("skill_seekers.cli.unified_scraper.json.dump", side_effect=lambda obj, _f, **_kw: written_configs.append(obj)),
): ):
mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="") mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="")
scraper._scrape_documentation(source) scraper._scrape_documentation(source)
@@ -237,7 +234,7 @@ class TestScrapeDocumentation:
with ( with (
patch("skill_seekers.cli.unified_scraper.subprocess.run") as mock_run, patch("skill_seekers.cli.unified_scraper.subprocess.run") as mock_run,
patch("skill_seekers.cli.unified_scraper.json.dump", side_effect=lambda obj, f, **kw: written_configs.append(obj)), patch("skill_seekers.cli.unified_scraper.json.dump", side_effect=lambda obj, _f, **_kw: written_configs.append(obj)),
): ):
mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="") mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="")
scraper._scrape_documentation(source) scraper._scrape_documentation(source)
@@ -275,12 +272,13 @@ class TestScrapeGithub:
mock_cls, mock_inst = self._mock_github_scraper(monkeypatch) mock_cls, mock_inst = self._mock_github_scraper(monkeypatch)
with patch("skill_seekers.cli.unified_scraper.json.dump"): (tmp_path / "output").mkdir(parents=True, exist_ok=True)
with patch("skill_seekers.cli.unified_scraper.json.dumps", return_value="{}"): with (
# Need output dir for the converter data file write patch("skill_seekers.cli.unified_scraper.json.dump"),
(tmp_path / "output").mkdir(parents=True, exist_ok=True) patch("skill_seekers.cli.unified_scraper.json.dumps", return_value="{}"),
with patch("builtins.open", MagicMock()): patch("builtins.open", MagicMock()),
scraper._scrape_github(source) ):
scraper._scrape_github(source)
mock_cls.assert_called_once() mock_cls.assert_called_once()
init_call_config = mock_cls.call_args[0][0] init_call_config = mock_cls.call_args[0][0]
@@ -427,38 +425,45 @@ class TestScrapePdf:
class TestScrapeLocal: class TestScrapeLocal:
""" """_scrape_local() delegates to analyze_codebase and populates scraped_data."""
_scrape_local() contains a known bug: it references `args` which is not in
scope (it belongs to run()). The except block logs the error then re-raises it
(line 650: `raise`), so the NameError propagates to the caller.
These tests document that behaviour.
"""
def test_args_name_error_propagates(self, tmp_path): def test_source_counter_incremented(self, tmp_path, monkeypatch):
""" """Counter is incremented when _scrape_local() is called."""
Without patching, calling _scrape_local() raises NameError on 'args'.
The except block logs and re-raises the exception.
"""
scraper = _make_scraper(tmp_path=tmp_path)
source = {"type": "local", "path": str(tmp_path)}
with pytest.raises(NameError, match="args"):
scraper._scrape_local(source)
def test_source_counter_incremented_before_failure(self, tmp_path):
"""
Counter increment happens BEFORE the try block that raises, so the
counter is incremented even when the NameError propagates.
"""
scraper = _make_scraper(tmp_path=tmp_path) scraper = _make_scraper(tmp_path=tmp_path)
source = {"type": "local", "path": str(tmp_path)} source = {"type": "local", "path": str(tmp_path)}
assert scraper._source_counters["local"] == 0 assert scraper._source_counters["local"] == 0
with pytest.raises(NameError): monkeypatch.setattr(
scraper._scrape_local(source) "skill_seekers.cli.codebase_scraper.analyze_codebase",
MagicMock(),
)
scraper._scrape_local(source)
assert scraper._source_counters["local"] == 1 assert scraper._source_counters["local"] == 1
def test_enhance_level_uses_cli_args_override(self, tmp_path, monkeypatch):
"""CLI --enhance-level overrides per-source enhance_level."""
import argparse
scraper = _make_scraper(tmp_path=tmp_path)
source = {"type": "local", "path": str(tmp_path), "enhance_level": 1}
scraper._cli_args = argparse.Namespace(enhance_level=3)
captured_kwargs = {}
def fake_analyze(**kwargs):
captured_kwargs.update(kwargs)
monkeypatch.setattr(
"skill_seekers.cli.codebase_scraper.analyze_codebase",
fake_analyze,
)
scraper._scrape_local(source)
assert captured_kwargs.get("enhance_level") == 3
# =========================================================================== # ===========================================================================
# 6. run() orchestration # 6. run() orchestration
@@ -541,9 +546,11 @@ class TestRunOrchestration:
captured = {} captured = {}
def fake_run_workflows(args, context=None): def fake_run_workflows(args, context=None): # noqa: ARG001
captured["workflows"] = getattr(args, "enhance_workflow", None) captured["workflows"] = getattr(args, "enhance_workflow", None)
import contextlib
import skill_seekers.cli.unified_scraper as us_mod import skill_seekers.cli.unified_scraper as us_mod
import skill_seekers.cli.workflow_runner as wr_mod import skill_seekers.cli.workflow_runner as wr_mod
@@ -556,18 +563,14 @@ class TestRunOrchestration:
scraper.run(args=None) scraper.run(args=None)
finally: finally:
if orig_us is None: if orig_us is None:
try: with contextlib.suppress(AttributeError):
delattr(us_mod, "run_workflows") delattr(us_mod, "run_workflows")
except AttributeError:
pass
else: else:
us_mod.run_workflows = orig_us us_mod.run_workflows = orig_us
if orig_wr is None: if orig_wr is None:
try: with contextlib.suppress(AttributeError):
delattr(wr_mod, "run_workflows") delattr(wr_mod, "run_workflows")
except AttributeError:
pass
else: else:
wr_mod.run_workflows = orig_wr wr_mod.run_workflows = orig_wr