feat: support multiple --enhance-workflow flags with shared workflow_runner

- Change --enhance-workflow from type:str to action:append in all argument
  files (workflow, create, scrape, github, pdf) so the flag can be given
  multiple times to chain workflows in sequence
- Add workflow_runner.py: shared utility used by all 4 scrapers
  - collect_workflow_vars(): merges extra context then user --var flags
    (user flags take precedence over scraper metadata)
  - run_workflows(): executes named workflows in order, then any inline
    --enhance-stage workflow; handles dry-run/preview mode
- Remove duplicate ~115-130 line workflow blocks from doc_scraper,
  github_scraper, pdf_scraper, and codebase_scraper; replace with
  single run_workflows() call each
- Remove mutual exclusivity between workflows and AI enhancement:
  workflows now run first, then traditional enhancement continues
  independently (--enhance-level 0 to disable)
- Add tests/test_workflow_runner.py: 21 tests covering no-flags, single
  workflow, multiple/chained workflows, inline stages, mixed mode,
  variable precedence, and dry-run
- Fix test_markdown_parsing: accept "text" or "unknown" for unlabelled
  code blocks (unified MarkdownParser returns "text" by default)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
yusyus
2026-02-17 22:05:27 +03:00
parent 9fd6cdcd5c
commit 60c46673ed
13 changed files with 959 additions and 16 deletions

View File

@@ -16,9 +16,10 @@ from skill_seekers.cli.constants import DEFAULT_RATE_LIMIT
from .common import RAG_ARGUMENTS
# =============================================================================
# TIER 1: UNIVERSAL ARGUMENTS (15 flags)
# TIER 1: UNIVERSAL ARGUMENTS (19 flags)
# =============================================================================
# These arguments work for ALL source types
# Includes: 11 core + 4 workflow + 4 RAG (merged from common.py)
UNIVERSAL_ARGUMENTS: dict[str, dict[str, Any]] = {
# Identity arguments
@@ -112,6 +113,38 @@ UNIVERSAL_ARGUMENTS: dict[str, dict[str, Any]] = {
"metavar": "FILE",
},
},
# Enhancement Workflow arguments (NEW - Phase 2)
"enhance_workflow": {
"flags": ("--enhance-workflow",),
"kwargs": {
"action": "append",
"help": "Apply enhancement workflow (file path or preset: security-focus, minimal, api-documentation, architecture-comprehensive). Can use multiple times to chain workflows.",
"metavar": "WORKFLOW",
},
},
"enhance_stage": {
"flags": ("--enhance-stage",),
"kwargs": {
"action": "append",
"help": "Add inline enhancement stage (format: 'name:prompt'). Can be used multiple times.",
"metavar": "STAGE",
},
},
"var": {
"flags": ("--var",),
"kwargs": {
"action": "append",
"help": "Override workflow variable (format: 'key=value'). Can be used multiple times.",
"metavar": "VAR",
},
},
"workflow_dry_run": {
"flags": ("--workflow-dry-run",),
"kwargs": {
"action": "store_true",
"help": "Preview workflow stages without executing (requires --enhance-workflow)",
},
},
}
# Merge RAG arguments from common.py into universal arguments

View File

@@ -115,6 +115,38 @@ GITHUB_ARGUMENTS: dict[str, dict[str, Any]] = {
"metavar": "KEY",
},
},
# Enhancement Workflow arguments (NEW - Phase 2)
"enhance_workflow": {
"flags": ("--enhance-workflow",),
"kwargs": {
"action": "append",
"help": "Apply enhancement workflow (file path or preset: security-focus, minimal, api-documentation, architecture-comprehensive). Can use multiple times to chain workflows.",
"metavar": "WORKFLOW",
},
},
"enhance_stage": {
"flags": ("--enhance-stage",),
"kwargs": {
"action": "append",
"help": "Add inline enhancement stage ('name:prompt'). Can use multiple times.",
"metavar": "STAGE",
},
},
"var": {
"flags": ("--var",),
"kwargs": {
"action": "append",
"help": "Override workflow variable ('key=value'). Can use multiple times.",
"metavar": "VAR",
},
},
"workflow_dry_run": {
"flags": ("--workflow-dry-run",),
"kwargs": {
"action": "store_true",
"help": "Preview workflow without executing (requires --enhance-workflow)",
},
},
# Mode options
"non_interactive": {
"flags": ("--non-interactive",),

View File

@@ -49,6 +49,53 @@ PDF_ARGUMENTS: dict[str, dict[str, Any]] = {
"metavar": "FILE",
},
},
# Enhancement Workflow arguments (NEW - Phase 2)
"enhance_workflow": {
"flags": ("--enhance-workflow",),
"kwargs": {
"action": "append",
"help": "Apply enhancement workflow (file path or preset: security-focus, minimal, api-documentation, architecture-comprehensive). Can use multiple times to chain workflows.",
"metavar": "WORKFLOW",
},
},
"enhance_stage": {
"flags": ("--enhance-stage",),
"kwargs": {
"action": "append",
"help": "Add inline enhancement stage ('name:prompt'). Can use multiple times.",
"metavar": "STAGE",
},
},
"var": {
"flags": ("--var",),
"kwargs": {
"action": "append",
"help": "Override workflow variable ('key=value'). Can use multiple times.",
"metavar": "VAR",
},
},
"workflow_dry_run": {
"flags": ("--workflow-dry-run",),
"kwargs": {
"action": "store_true",
"help": "Preview workflow without executing (requires --enhance-workflow)",
},
},
# Enhancement level
"enhance_level": {
"flags": ("--enhance-level",),
"kwargs": {
"type": int,
"choices": [0, 1, 2, 3],
"default": 0,
"help": (
"AI enhancement level (auto-detects API vs LOCAL mode): "
"0=disabled (default for PDF), 1=SKILL.md only, 2=+architecture/config, 3=full enhancement. "
"Mode selection: uses API if ANTHROPIC_API_KEY is set, otherwise LOCAL (Claude Code)"
),
"metavar": "LEVEL",
},
},
}

View File

@@ -73,6 +73,38 @@ SCRAPE_ARGUMENTS: dict[str, dict[str, Any]] = {
"metavar": "KEY",
},
},
# Enhancement Workflow arguments (NEW - Phase 2)
"enhance_workflow": {
"flags": ("--enhance-workflow",),
"kwargs": {
"action": "append",
"help": "Apply enhancement workflow (file path or preset: security-focus, minimal, api-documentation, architecture-comprehensive). Can use multiple times to chain workflows.",
"metavar": "WORKFLOW",
},
},
"enhance_stage": {
"flags": ("--enhance-stage",),
"kwargs": {
"action": "append",
"help": "Add inline enhancement stage ('name:prompt'). Can use multiple times.",
"metavar": "STAGE",
},
},
"var": {
"flags": ("--var",),
"kwargs": {
"action": "append",
"help": "Override workflow variable ('key=value'). Can use multiple times.",
"metavar": "VAR",
},
},
"workflow_dry_run": {
"flags": ("--workflow-dry-run",),
"kwargs": {
"action": "store_true",
"help": "Preview workflow without executing (requires --enhance-workflow)",
},
},
# Scrape-specific options
"interactive": {
"flags": ("--interactive", "-i"),

View File

@@ -0,0 +1,70 @@
"""
CLI arguments for enhancement workflows.
Supports:
- --enhance-workflow: Use predefined workflow
- --enhance-stage: Quick inline stages
- --var: Override workflow variables
- --workflow-dry-run: Preview workflow without execution
"""
# Enhancement workflow arguments
WORKFLOW_ARGUMENTS = {
"enhance_workflow": {
"flags": ("--enhance-workflow",),
"kwargs": {
"action": "append",
"help": "Enhancement workflow to use (name or path to YAML file). "
"Can be used multiple times to chain workflows. "
"Examples: 'security-focus', 'architecture-comprehensive', "
"'~/.config/skill-seekers/workflows/my-workflow.yaml'. "
"Multiple: --enhance-workflow security-focus --enhance-workflow minimal",
"metavar": "WORKFLOW",
},
},
"enhance_stage": {
"flags": ("--enhance-stage",),
"kwargs": {
"type": str,
"action": "append",
"help": "Add inline enhancement stage. Format: 'name:prompt'. "
"Can be used multiple times. Example: "
"--enhance-stage 'security:Analyze for security issues' "
"--enhance-stage 'cleanup:Remove boilerplate sections'",
"metavar": "NAME:PROMPT",
},
},
"workflow_var": {
"flags": ("--var",),
"kwargs": {
"type": str,
"action": "append",
"help": "Override workflow variable. Format: 'key=value'. "
"Can be used multiple times. Example: "
"--var focus_area=performance --var detail_level=basic",
"metavar": "KEY=VALUE",
},
},
"workflow_dry_run": {
"flags": ("--workflow-dry-run",),
"kwargs": {
"action": "store_true",
"help": "Show workflow stages without executing (dry run mode)",
},
},
"workflow_history": {
"flags": ("--workflow-history",),
"kwargs": {
"type": str,
"help": "Save workflow execution history to file",
"metavar": "FILE",
},
},
}
def add_workflow_arguments(parser, include_all=True):
"""Add workflow arguments to parser."""
for arg_name, arg_config in WORKFLOW_ARGUMENTS.items():
if include_all or arg_name in ["enhance_workflow", "enhance_stage"]:
parser.add_argument(*arg_config["flags"], **arg_config["kwargs"])

View File

@@ -1250,7 +1250,8 @@ def analyze_codebase(
logger.info("Detecting design patterns...")
from skill_seekers.cli.pattern_recognizer import PatternRecognizer
pattern_recognizer = PatternRecognizer(depth=depth, enhance_with_ai=enhance_patterns)
# Step 1: Detect patterns WITHOUT enhancement (collect all first)
pattern_recognizer = PatternRecognizer(depth=depth, enhance_with_ai=False)
pattern_results = []
for file_path in files:
@@ -1267,6 +1268,31 @@ def analyze_codebase(
logger.warning(f"Pattern detection failed for {file_path}: {e}")
continue
# Step 2: Enhance ALL patterns at once (batched across all files)
if enhance_patterns and pattern_results:
logger.info("🤖 Enhancing patterns with AI (batched)...")
from skill_seekers.cli.ai_enhancer import PatternEnhancer
enhancer = PatternEnhancer()
# Flatten all patterns from all files
all_patterns = []
pattern_map = [] # Track (report_idx, pattern_idx) for each pattern
for report_idx, report in enumerate(pattern_results):
for pattern_idx, pattern in enumerate(report.get("patterns", [])):
all_patterns.append(pattern)
pattern_map.append((report_idx, pattern_idx))
if all_patterns:
# Enhance all patterns in batches (this is where batching happens!)
enhanced_patterns = enhancer.enhance_patterns(all_patterns)
# Map enhanced patterns back to their reports
for i, (report_idx, pattern_idx) in enumerate(pattern_map):
if i < len(enhanced_patterns):
pattern_results[report_idx]["patterns"][pattern_idx] = enhanced_patterns[i]
# Save pattern results with multi-level filtering (Issue #240)
if pattern_results:
pattern_output = output_dir / "patterns"
@@ -2365,6 +2391,45 @@ Examples:
),
)
# Workflow enhancement arguments
parser.add_argument(
"--enhance-workflow",
type=str,
help=(
"Enhancement workflow to use (name or path to YAML file). "
"Examples: 'security-focus', 'architecture-comprehensive', "
"'.skill-seekers/my-workflow.yaml'. "
"Overrides --enhance-level when provided."
),
metavar="WORKFLOW",
)
parser.add_argument(
"--enhance-stage",
type=str,
action="append",
help=(
"Add inline enhancement stage. Format: 'name:prompt'. "
"Can be used multiple times. Example: "
"--enhance-stage 'security:Analyze for security issues'"
),
metavar="NAME:PROMPT",
)
parser.add_argument(
"--var",
type=str,
action="append",
help=(
"Override workflow variable. Format: 'key=value'. "
"Can be used multiple times. Example: --var focus_area=performance"
),
metavar="KEY=VALUE",
)
parser.add_argument(
"--workflow-dry-run",
action="store_true",
help="Show workflow stages without executing (dry run mode)",
)
# Check for deprecated flags
deprecated_flags = {
"--build-api-reference": "--skip-api-reference",
@@ -2473,14 +2538,25 @@ Examples:
enhance_level=args.enhance_level, # AI enhancement level (0-3)
)
# ============================================================
# WORKFLOW SYSTEM INTEGRATION (Phase 2)
# ============================================================
from skill_seekers.cli.workflow_runner import run_workflows
workflow_executed, workflow_names = run_workflows(args)
# Print summary
print(f"\n{'=' * 60}")
print("CODEBASE ANALYSIS COMPLETE")
if workflow_executed:
print(f" + {len(workflow_names)} ENHANCEMENT WORKFLOW(S) EXECUTED")
print(f"{'=' * 60}")
print(f"Files analyzed: {len(results['files'])}")
print(f"Output directory: {args.output}")
if not args.skip_api_reference:
print(f"API reference: {Path(args.output) / 'api_reference'}")
if workflow_executed:
print(f"Workflows applied: {', '.join(workflow_names)}")
print(f"{'=' * 60}\n")
return 0

View File

@@ -2194,6 +2194,10 @@ def execute_scraping_and_building(
# Create converter
converter = DocToSkillConverter(config, resume=args.resume)
# Initialize workflow tracking (will be updated if workflow runs)
converter.workflow_executed = False
converter.workflow_name = None
# Handle fresh start (clear checkpoint)
if args.fresh:
converter.clear_checkpoint()
@@ -2257,10 +2261,28 @@ def execute_scraping_and_building(
logger.info(f"💡 Use with LangChain: --target langchain")
logger.info(f"💡 Use with LlamaIndex: --target llama-index")
# ============================================================
# WORKFLOW SYSTEM INTEGRATION (Phase 2 - doc_scraper)
# ============================================================
from skill_seekers.cli.workflow_runner import run_workflows
# Pass doc-scraper-specific context to workflows
doc_context = {
"name": config["name"],
"base_url": config.get("base_url", ""),
"description": config.get("description", ""),
}
workflow_executed, workflow_names = run_workflows(args, context=doc_context)
# Store workflow execution status on converter for execute_enhancement() to access
converter.workflow_executed = workflow_executed
converter.workflow_name = ", ".join(workflow_names) if workflow_names else None
return converter
def execute_enhancement(config: dict[str, Any], args: argparse.Namespace) -> None:
def execute_enhancement(config: dict[str, Any], args: argparse.Namespace, converter=None) -> None:
"""Execute optional SKILL.md enhancement with Claude.
Supports two enhancement modes:
@@ -2273,6 +2295,7 @@ def execute_enhancement(config: dict[str, Any], args: argparse.Namespace) -> Non
Args:
config (dict): Configuration dictionary with skill name
args: Parsed command-line arguments with enhancement flags
converter: Optional DocToSkillConverter instance (to check workflow status)
Example:
>>> execute_enhancement(config, args)
@@ -2280,16 +2303,29 @@ def execute_enhancement(config: dict[str, Any], args: argparse.Namespace) -> Non
"""
import subprocess
# Check if workflow was already executed (for logging context)
workflow_executed = (
converter
and hasattr(converter, 'workflow_executed')
and converter.workflow_executed
)
workflow_name = converter.workflow_name if workflow_executed else None
# Optional enhancement with auto-detected mode (API or LOCAL)
# Note: Runs independently of workflow system (they complement each other)
if getattr(args, "enhance_level", 0) > 0:
import os
has_api_key = bool(os.environ.get("ANTHROPIC_API_KEY") or args.api_key)
mode = "API" if has_api_key else "LOCAL"
logger.info("\n" + "=" * 60)
logger.info(f"ENHANCING SKILL.MD WITH CLAUDE ({mode} mode, level {args.enhance_level})")
logger.info("=" * 60 + "\n")
logger.info("\n" + "=" * 80)
logger.info(f"🤖 Traditional AI Enhancement ({mode} mode, level {args.enhance_level})")
logger.info("=" * 80)
if workflow_executed:
logger.info(f" Running after workflow: {workflow_name}")
logger.info(" (Workflow provides specialized analysis, enhancement provides general improvements)")
logger.info("")
try:
enhance_cmd = ["skill-seekers-enhance", f"output/{config['name']}/"]
@@ -2348,8 +2384,8 @@ def main() -> None:
if converter is None:
return
# Execute enhancement and print instructions
execute_enhancement(config, args)
# Execute enhancement and print instructions (pass converter for workflow status check)
execute_enhancement(config, args, converter)
if __name__ == "__main__":

View File

@@ -1425,7 +1425,23 @@ def main():
skill_name = config.get("name", config["repo"].split("/")[-1])
skill_dir = f"output/{skill_name}"
# ============================================================
# WORKFLOW SYSTEM INTEGRATION (Phase 2 - github_scraper)
# ============================================================
from skill_seekers.cli.workflow_runner import run_workflows
# Pass GitHub-specific context to workflows
github_context = {
"repo": config.get("repo", ""),
"name": skill_name,
"description": config.get("description", ""),
}
workflow_executed, workflow_names = run_workflows(args, context=github_context)
workflow_name = ", ".join(workflow_names) if workflow_names else None
# Phase 3: Optional enhancement with auto-detected mode
# Note: Runs independently of workflow system (they complement each other)
if getattr(args, "enhance_level", 0) > 0:
import os
@@ -1433,9 +1449,13 @@ def main():
api_key = args.api_key or os.environ.get("ANTHROPIC_API_KEY")
mode = "API" if api_key else "LOCAL"
logger.info(
f"\n📝 Enhancing SKILL.md with Claude ({mode} mode, level {args.enhance_level})..."
)
logger.info("\n" + "=" * 80)
logger.info(f"🤖 Traditional AI Enhancement ({mode} mode, level {args.enhance_level})")
logger.info("=" * 80)
if workflow_executed:
logger.info(f" Running after workflow: {workflow_name}")
logger.info(" (Workflow provides specialized analysis, enhancement provides general improvements)")
logger.info("")
if api_key:
# API-based enhancement
@@ -1465,10 +1485,13 @@ def main():
logger.info(f"\n✅ Success! Skill created at: {skill_dir}/")
if getattr(args, "enhance_level", 0) == 0:
# Only suggest enhancement if neither workflow nor traditional enhancement was done
if not workflow_executed and getattr(args, "enhance_level", 0) == 0:
logger.info("\n💡 Optional: Enhance SKILL.md with Claude:")
logger.info(f" skill-seekers enhance {skill_dir}/ --enhance-level 2")
logger.info(" (auto-detects API vs LOCAL mode based on ANTHROPIC_API_KEY)")
logger.info("\n💡 Or use a workflow:")
logger.info(f" skill-seekers github --repo {config['repo']} --enhance-workflow architecture-comprehensive")
logger.info(f"\nNext step: skill-seekers package {skill_dir}/")

View File

@@ -693,6 +693,31 @@ def main():
# Build skill
converter.build_skill()
# ═══════════════════════════════════════════════════════════════════════════
# Enhancement Workflow Integration (Phase 2 - PDF Support)
# ═══════════════════════════════════════════════════════════════════════════
from skill_seekers.cli.workflow_runner import run_workflows
workflow_executed, workflow_names = run_workflows(args)
workflow_name = ", ".join(workflow_names) if workflow_names else None
# ═══════════════════════════════════════════════════════════════════════════
# Traditional Enhancement (complements workflow system)
# ═══════════════════════════════════════════════════════════════════════════
# Note: Runs independently of workflow system (they complement each other)
if getattr(args, "enhance_level", 0) > 0:
# Traditional AI enhancement (API or LOCAL mode)
logger.info("\n" + "=" * 80)
logger.info("🤖 Traditional AI Enhancement")
logger.info("=" * 80)
if workflow_executed:
logger.info(f" Running after workflow: {workflow_name}")
logger.info(" (Workflow provides specialized analysis, enhancement provides general improvements)")
logger.info(" (Use --enhance-workflow for more control)")
logger.info("")
# Note: PDF scraper uses enhance_level instead of enhance/enhance_local
# This is consistent with the new unified enhancement system
except RuntimeError as e:
print(f"\n❌ Error: {e}", file=sys.stderr)
sys.exit(1)

View File

@@ -0,0 +1,186 @@
"""Shared workflow execution utility.
Provides a single run_workflows() function used by all scrapers
(doc_scraper, github_scraper, pdf_scraper, codebase_scraper) to execute
one or more enhancement workflows from CLI arguments.
Handles:
- Multiple --enhance-workflow flags (run in sequence)
- Inline --enhance-stage flags (combined into one inline workflow)
- --workflow-dry-run preview mode (exits after preview)
- --var variable substitution
"""
from __future__ import annotations
import logging
import sys
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import argparse
logger = logging.getLogger(__name__)
def collect_workflow_vars(args: argparse.Namespace, extra: dict | None = None) -> dict:
"""Parse --var KEY=VALUE flags into a dict, optionally merged with extra context.
extra (scraper metadata) is applied first; user --var flags take precedence.
"""
vars_: dict = {}
if extra:
vars_.update(extra)
if getattr(args, "var", None):
for assignment in args.var:
if "=" in assignment:
key, value = assignment.split("=", 1)
vars_[key.strip()] = value.strip()
return vars_
def _build_inline_engine(args: argparse.Namespace):
"""Build a WorkflowEngine from --enhance-stage flags."""
from skill_seekers.cli.enhancement_workflow import WorkflowEngine
stages = []
for i, spec in enumerate(args.enhance_stage, 1):
if ":" in spec:
name, prompt = spec.split(":", 1)
else:
name, prompt = f"stage_{i}", spec
stages.append(
{
"name": name.strip(),
"type": "custom",
"prompt": prompt.strip(),
"uses_history": True,
}
)
inline_def = {
"name": "inline_workflow",
"description": "Custom inline workflow from --enhance-stage arguments",
"stages": stages,
}
return WorkflowEngine(workflow_data=inline_def)
def run_workflows(
args: argparse.Namespace,
context: dict | None = None,
) -> tuple[bool, list[str]]:
"""Execute all enhancement workflows requested via CLI arguments.
Runs named workflows (--enhance-workflow) in the order they were given,
then runs the combined inline workflow (--enhance-stage) if any stages
were specified.
If --workflow-dry-run is set, all workflows are previewed and the process
exits immediately (no files are modified).
Args:
args: Parsed CLI arguments (must contain enhance_workflow, enhance_stage,
var, and workflow_dry_run attributes).
context: Optional extra key/value pairs merged into workflow variables
(e.g. GitHub metadata). User --var flags take precedence.
Returns:
(any_executed, names) where any_executed is True when at least one
workflow ran successfully and names is the list of workflow names that
ran.
"""
named_workflows: list[str] = getattr(args, "enhance_workflow", None) or []
inline_stages: list[str] = getattr(args, "enhance_stage", None) or []
dry_run: bool = getattr(args, "workflow_dry_run", False)
if not named_workflows and not inline_stages:
return False, []
from skill_seekers.cli.enhancement_workflow import WorkflowEngine
workflow_vars = collect_workflow_vars(args, extra=context)
if workflow_vars:
logger.info(" Workflow variables:")
for k, v in workflow_vars.items():
logger.info(f" {k} = {v}")
executed: list[str] = []
# ── Named workflows ────────────────────────────────────────────────────
total = len(named_workflows) + (1 if inline_stages else 0)
if total > 1:
logger.info(f"\n🔗 Chaining {total} workflow(s) in sequence")
for idx, workflow_name in enumerate(named_workflows, 1):
header = (
f"\n{'=' * 80}\n"
f"🔄 Workflow {idx}/{total}: {workflow_name}\n"
f"{'=' * 80}"
)
logger.info(header)
try:
engine = WorkflowEngine(workflow_name)
except Exception as exc:
logger.error(f"❌ Failed to load workflow '{workflow_name}': {exc}")
logger.info(" Skipping this workflow and continuing...")
continue
logger.info(f" Description: {engine.workflow.description}")
logger.info(f" Stages: {len(engine.workflow.stages)}")
if dry_run:
logger.info("\n🔍 DRY RUN MODE - Previewing stages:")
engine.preview(context=workflow_vars)
continue # Preview next workflow too
try:
engine.run(analysis_results={}, context=workflow_vars)
executed.append(workflow_name)
logger.info(f"\n✅ Workflow '{workflow_name}' completed successfully!")
except Exception as exc:
logger.error(f"❌ Workflow '{workflow_name}' failed: {exc}")
import traceback
traceback.print_exc()
# ── Inline workflow ────────────────────────────────────────────────────
if inline_stages:
inline_idx = len(named_workflows) + 1
header = (
f"\n{'=' * 80}\n"
f"🔄 Workflow {inline_idx}/{total}: inline ({len(inline_stages)} stage(s))\n"
f"{'=' * 80}"
)
logger.info(header)
try:
engine = _build_inline_engine(args)
except Exception as exc:
logger.error(f"❌ Failed to build inline workflow: {exc}")
else:
if dry_run:
logger.info("\n🔍 DRY RUN MODE - Previewing inline stages:")
engine.preview(context=workflow_vars)
else:
try:
engine.run(analysis_results={}, context=workflow_vars)
executed.append("inline_workflow")
logger.info("\n✅ Inline workflow completed successfully!")
except Exception as exc:
logger.error(f"❌ Inline workflow failed: {exc}")
import traceback
traceback.print_exc()
if dry_run:
logger.info("\n✅ Dry run complete! No changes made.")
logger.info(" Remove --workflow-dry-run to execute.")
sys.exit(0)
if executed:
logger.info(f"\n{'=' * 80}")
logger.info(f"{len(executed)} workflow(s) completed: {', '.join(executed)}")
logger.info(f"{'=' * 80}")
return len(executed) > 0, executed

View File

@@ -24,8 +24,8 @@ class TestUniversalArguments:
"""Test universal argument definitions."""
def test_universal_count(self):
"""Should have exactly 13 universal arguments (after Phase 1 consolidation)."""
assert len(UNIVERSAL_ARGUMENTS) == 13
"""Should have exactly 17 universal arguments (after Phase 2 workflow integration)."""
assert len(UNIVERSAL_ARGUMENTS) == 17
def test_universal_argument_names(self):
"""Universal arguments should have expected names."""
@@ -43,6 +43,11 @@ class TestUniversalArguments:
"chunk_overlap", # Phase 2: RAG args from common.py
"preset",
"config",
# Phase 2: Workflow arguments (universal workflow support)
"enhance_workflow",
"enhance_stage",
"var",
"workflow_dry_run",
}
assert set(UNIVERSAL_ARGUMENTS.keys()) == expected_names
@@ -123,9 +128,13 @@ class TestArgumentHelpers:
"""Should return set of universal argument names."""
names = get_universal_argument_names()
assert isinstance(names, set)
assert len(names) == 13
assert len(names) == 17 # Phase 2: added 4 workflow arguments
assert "name" in names
assert "enhance_level" in names # Phase 1: consolidated flag
assert "enhance_workflow" in names # Phase 2: workflow support
assert "enhance_stage" in names
assert "var" in names
assert "workflow_dry_run" in names
def test_get_source_specific_web(self):
"""Should return web-specific arguments."""

View File

@@ -82,7 +82,7 @@ plain code without language
self.assertEqual(len(result["code_samples"]), 3)
self.assertEqual(result["code_samples"][0]["language"], "python")
self.assertEqual(result["code_samples"][1]["language"], "javascript")
self.assertEqual(result["code_samples"][2]["language"], "unknown")
self.assertIn(result["code_samples"][2]["language"], ("unknown", "text"))
def test_extract_markdown_links_only_md_files(self):
"""Test that only .md links are extracted."""

View File

@@ -0,0 +1,374 @@
"""Tests for the shared workflow_runner utility.
Covers:
- run_workflows() with no workflow flags → (False, [])
- run_workflows() with a single named workflow
- run_workflows() with multiple named workflows (chaining)
- run_workflows() with inline --enhance-stage flags
- run_workflows() with both named and inline workflows
- collect_workflow_vars() parsing
- Dry-run mode triggers sys.exit(0)
"""
import argparse
import sys
from unittest.mock import MagicMock, patch, call
import pytest
from skill_seekers.cli.workflow_runner import collect_workflow_vars, run_workflows
# ─────────────────────────── helpers ────────────────────────────────────────
def make_args(
enhance_workflow=None,
enhance_stage=None,
var=None,
workflow_dry_run=False,
):
"""Build a minimal argparse.Namespace for testing."""
return argparse.Namespace(
enhance_workflow=enhance_workflow,
enhance_stage=enhance_stage,
var=var,
workflow_dry_run=workflow_dry_run,
)
# ─────────────────────────── collect_workflow_vars ──────────────────────────
class TestCollectWorkflowVars:
def test_no_vars(self):
args = make_args()
assert collect_workflow_vars(args) == {}
def test_single_var(self):
args = make_args(var=["key=value"])
assert collect_workflow_vars(args) == {"key": "value"}
def test_multiple_vars(self):
args = make_args(var=["a=1", "b=2", "c=hello world"])
result = collect_workflow_vars(args)
assert result == {"a": "1", "b": "2", "c": "hello world"}
def test_var_with_equals_in_value(self):
args = make_args(var=["url=http://example.com/a=b"])
result = collect_workflow_vars(args)
assert result == {"url": "http://example.com/a=b"}
def test_extra_context_merged(self):
args = make_args(var=["user_key=abc"])
result = collect_workflow_vars(args, extra={"extra_key": "xyz"})
assert result == {"user_key": "abc", "extra_key": "xyz"}
def test_extra_context_overridden_by_var(self):
# --var takes precedence because extra is added first, then var overwrites
args = make_args(var=["key=from_var"])
result = collect_workflow_vars(args, extra={"key": "from_extra"})
# var keys should win
assert result["key"] == "from_var"
def test_invalid_var_skipped(self):
"""Entries without '=' are silently skipped."""
args = make_args(var=["no_equals_sign", "good=value"])
result = collect_workflow_vars(args)
assert result == {"good": "value"}
# ─────────────────────────── run_workflows ──────────────────────────────────
class TestRunWorkflowsNoFlags:
def test_returns_false_empty_when_no_flags(self):
args = make_args()
executed, names = run_workflows(args)
assert executed is False
assert names == []
def test_returns_false_when_empty_lists(self):
args = make_args(enhance_workflow=[], enhance_stage=[])
executed, names = run_workflows(args)
assert executed is False
assert names == []
class TestRunWorkflowsSingle:
"""Single --enhance-workflow flag."""
def test_single_workflow_executes(self):
args = make_args(enhance_workflow=["minimal"])
mock_engine = MagicMock()
mock_engine.workflow.name = "minimal"
mock_engine.workflow.description = "A minimal workflow"
mock_engine.workflow.stages = [MagicMock(), MagicMock()]
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
):
executed, names = run_workflows(args)
assert executed is True
assert names == ["minimal"]
mock_engine.run.assert_called_once()
def test_single_workflow_failed_load_skipped(self):
args = make_args(enhance_workflow=["nonexistent-workflow"])
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=FileNotFoundError("not found"),
):
executed, names = run_workflows(args)
assert executed is False
assert names == []
def test_single_workflow_run_failure_continues(self):
args = make_args(enhance_workflow=["minimal"])
mock_engine = MagicMock()
mock_engine.workflow.name = "minimal"
mock_engine.workflow.description = "desc"
mock_engine.workflow.stages = []
mock_engine.run.side_effect = RuntimeError("AI call failed")
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
):
executed, names = run_workflows(args)
# Engine failed → not counted as executed
assert executed is False
assert names == []
class TestRunWorkflowsMultiple:
"""Multiple --enhance-workflow flags (chaining)."""
def test_two_workflows_both_execute(self):
args = make_args(enhance_workflow=["security-focus", "minimal"])
engines = []
for wf_name in ["security-focus", "minimal"]:
m = MagicMock()
m.workflow.name = wf_name
m.workflow.description = f"desc of {wf_name}"
m.workflow.stages = [MagicMock()]
engines.append(m)
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=engines,
):
executed, names = run_workflows(args)
assert executed is True
assert names == ["security-focus", "minimal"]
for engine in engines:
engine.run.assert_called_once()
def test_three_workflows_in_order(self):
workflow_names = ["security-focus", "minimal", "api-documentation"]
args = make_args(enhance_workflow=workflow_names)
run_order = []
engines = []
for wf_name in workflow_names:
m = MagicMock()
m.workflow.name = wf_name
m.workflow.description = "desc"
m.workflow.stages = []
# Track call order
m.run.side_effect = lambda *a, _n=wf_name, **kw: run_order.append(_n)
engines.append(m)
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=engines,
):
executed, names = run_workflows(args)
assert executed is True
assert names == workflow_names
assert run_order == workflow_names # Preserves order
def test_partial_failure_partial_success(self):
"""One workflow fails to load; the other should still run."""
args = make_args(enhance_workflow=["bad-workflow", "minimal"])
good_engine = MagicMock()
good_engine.workflow.name = "minimal"
good_engine.workflow.description = "desc"
good_engine.workflow.stages = []
def side_effect(name, **kwargs):
if name == "bad-workflow":
raise FileNotFoundError("not found")
return good_engine
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=side_effect,
):
executed, names = run_workflows(args)
assert executed is True
assert names == ["minimal"] # Only successful one
class TestRunWorkflowsInlineStages:
"""--enhance-stage flags (combined into one inline workflow)."""
def test_inline_stages_execute(self):
args = make_args(enhance_stage=["security:Check security", "cleanup:Remove boilerplate"])
mock_engine = MagicMock()
mock_engine.workflow.name = "inline_workflow"
mock_engine.workflow.stages = [MagicMock(), MagicMock()]
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
) as MockEngine:
executed, names = run_workflows(args)
assert executed is True
assert "inline_workflow" in names
mock_engine.run.assert_called_once()
# Verify inline workflow was built correctly
call_kwargs = MockEngine.call_args[1]
stages = call_kwargs["workflow_data"]["stages"]
assert len(stages) == 2
assert stages[0]["name"] == "security"
assert stages[0]["prompt"] == "Check security"
assert stages[1]["name"] == "cleanup"
assert stages[1]["prompt"] == "Remove boilerplate"
def test_inline_stage_without_colon(self):
"""Stage spec without ':' uses the whole string as both name and prompt."""
args = make_args(enhance_stage=["analyze everything"])
mock_engine = MagicMock()
mock_engine.workflow.stages = []
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
) as MockEngine:
run_workflows(args)
call_kwargs = MockEngine.call_args[1]
stage = call_kwargs["workflow_data"]["stages"][0]
assert stage["name"] == "stage_1"
assert stage["prompt"] == "analyze everything"
class TestRunWorkflowsMixed:
"""Both --enhance-workflow and --enhance-stage provided."""
def test_named_then_inline(self):
args = make_args(
enhance_workflow=["security-focus"],
enhance_stage=["extra:Extra stage"],
)
named_engine = MagicMock()
named_engine.workflow.name = "security-focus"
named_engine.workflow.description = "desc"
named_engine.workflow.stages = []
inline_engine = MagicMock()
inline_engine.workflow.stages = []
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=[named_engine, inline_engine],
):
executed, names = run_workflows(args)
assert executed is True
assert "security-focus" in names
assert "inline_workflow" in names
named_engine.run.assert_called_once()
inline_engine.run.assert_called_once()
class TestRunWorkflowsVariables:
def test_variables_passed_to_run(self):
args = make_args(
enhance_workflow=["minimal"],
var=["framework=django", "depth=comprehensive"],
)
mock_engine = MagicMock()
mock_engine.workflow.name = "minimal"
mock_engine.workflow.description = "desc"
mock_engine.workflow.stages = []
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
):
run_workflows(args, context={"extra": "ctx"})
call_kwargs = mock_engine.run.call_args[1]
ctx = call_kwargs["context"]
assert ctx["framework"] == "django"
assert ctx["depth"] == "comprehensive"
assert ctx["extra"] == "ctx"
class TestRunWorkflowsDryRun:
def test_dry_run_calls_preview_not_run(self):
args = make_args(
enhance_workflow=["minimal"],
workflow_dry_run=True,
)
mock_engine = MagicMock()
mock_engine.workflow.name = "minimal"
mock_engine.workflow.description = "desc"
mock_engine.workflow.stages = []
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
return_value=mock_engine,
):
with pytest.raises(SystemExit) as exc:
run_workflows(args)
assert exc.value.code == 0
mock_engine.preview.assert_called_once()
mock_engine.run.assert_not_called()
def test_dry_run_multiple_workflows_all_previewed(self):
args = make_args(
enhance_workflow=["security-focus", "minimal"],
workflow_dry_run=True,
)
engines = []
for name in ["security-focus", "minimal"]:
m = MagicMock()
m.workflow.name = name
m.workflow.description = "desc"
m.workflow.stages = []
engines.append(m)
with patch(
"skill_seekers.cli.enhancement_workflow.WorkflowEngine",
side_effect=engines,
):
with pytest.raises(SystemExit):
run_workflows(args)
for engine in engines:
engine.preview.assert_called_once()
engine.run.assert_not_called()