fix: Fix 2 critical CLI issues blocking production (Kimi QA)
**Critical Issues Fixed:** Issue #1: CLI Commands Were BROKEN ⚠️ CRITICAL - Problem: 4 CLI commands existed but failed at runtime with ImportError - Root Cause: Modules had example_usage() instead of main() functions - Impact: Users couldn't use quality, stream, update, multilang features **Fixed Files:** - src/skill_seekers/cli/quality_metrics.py - Renamed example_usage() → main() - Added argparse with --report, --output flags - Proper exit codes and error handling - src/skill_seekers/cli/streaming_ingest.py - Renamed example_usage() → main() - Added argparse with --chunk-size, --batch-size, --checkpoint flags - Supports both file and directory inputs - src/skill_seekers/cli/incremental_updater.py - Renamed example_usage() → main() - Added argparse with --check-changes, --generate-package, --apply-update flags - Proper error handling and exit codes - src/skill_seekers/cli/multilang_support.py - Renamed example_usage() → main() - Added argparse with --detect, --report, --export flags - Loads skill documents from directory Issue #2: Haystack Missing from Package Choices ⚠️ CRITICAL - Problem: Haystack adaptor worked but couldn't be used via CLI - Root Cause: package_skill.py missing "haystack" in --target choices - Impact: Users got "invalid choice" error when packaging for Haystack **Fixed:** - src/skill_seekers/cli/package_skill.py:188 - Added "haystack" to --target choices list - Now matches main.py choices (all 11 platforms) **Verification:** ✅ All 4 CLI commands now work: $ skill-seekers quality --help $ skill-seekers stream --help $ skill-seekers update --help $ skill-seekers multilang --help ✅ Haystack now available: $ skill-seekers package output/skill --target haystack ✅ All 164 adaptor tests still passing ✅ No regressions detected **Credits:** - Issues identified by: Kimi QA Review - Fixes implemented by: Claude Sonnet 4.5 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -411,15 +411,37 @@ class IncrementalUpdater:
|
||||
return False
|
||||
|
||||
|
||||
def example_usage():
|
||||
"""Example usage of incremental updater."""
|
||||
def main():
|
||||
"""CLI entry point for incremental updates."""
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
skill_dir = Path("output/react")
|
||||
parser = argparse.ArgumentParser(description="Detect and apply incremental skill updates")
|
||||
parser.add_argument("skill_dir", help="Path to skill directory")
|
||||
parser.add_argument("--check-changes", action="store_true", help="Check for changes only")
|
||||
parser.add_argument("--generate-package", help="Generate update package at specified path")
|
||||
parser.add_argument("--apply-update", help="Apply update package from specified path")
|
||||
args = parser.parse_args()
|
||||
|
||||
skill_dir = Path(args.skill_dir)
|
||||
if not skill_dir.exists():
|
||||
print(f"❌ Error: Directory not found: {skill_dir}")
|
||||
return 1
|
||||
|
||||
# Initialize updater
|
||||
updater = IncrementalUpdater(skill_dir)
|
||||
|
||||
# Apply update if specified
|
||||
if args.apply_update:
|
||||
update_path = Path(args.apply_update)
|
||||
if not update_path.exists():
|
||||
print(f"❌ Error: Update package not found: {update_path}")
|
||||
return 1
|
||||
|
||||
print(f"📥 Applying update from: {update_path}")
|
||||
success = updater.apply_update_package(update_path)
|
||||
return 0 if success else 1
|
||||
|
||||
# Detect changes
|
||||
print("🔍 Detecting changes...")
|
||||
change_set = updater.detect_changes()
|
||||
@@ -428,13 +450,18 @@ def example_usage():
|
||||
report = updater.generate_diff_report(change_set)
|
||||
print(report)
|
||||
|
||||
if args.check_changes:
|
||||
return 0 if not change_set.has_changes else 1
|
||||
|
||||
if change_set.has_changes:
|
||||
# Generate update package
|
||||
# Generate update package if specified
|
||||
if args.generate_package:
|
||||
package_path = Path(args.generate_package)
|
||||
else:
|
||||
package_path = skill_dir.parent / f"{skill_dir.name}-update.json"
|
||||
|
||||
print("\n📦 Generating update package...")
|
||||
package_path = updater.generate_update_package(
|
||||
change_set,
|
||||
skill_dir.parent / f"{skill_dir.name}-update.json"
|
||||
)
|
||||
package_path = updater.generate_update_package(change_set, package_path)
|
||||
print(f"✅ Package created: {package_path}")
|
||||
|
||||
# Save versions
|
||||
@@ -443,6 +470,9 @@ def example_usage():
|
||||
else:
|
||||
print("\n✅ No changes detected - skill is up to date!")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
example_usage()
|
||||
import sys
|
||||
sys.exit(main())
|
||||
|
||||
@@ -397,40 +397,68 @@ class MultiLanguageManager:
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def example_usage():
|
||||
"""Example usage of multi-language support."""
|
||||
def main():
|
||||
"""CLI entry point for multi-language support."""
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser(description="Manage multi-language skill documents")
|
||||
parser.add_argument("skill_dir", help="Path to skill directory")
|
||||
parser.add_argument("--detect", action="store_true", help="Detect languages in skill")
|
||||
parser.add_argument("--report", action="store_true", help="Generate translation report")
|
||||
parser.add_argument("--export", help="Export by language to specified directory")
|
||||
args = parser.parse_args()
|
||||
|
||||
skill_dir = Path(args.skill_dir)
|
||||
if not skill_dir.exists():
|
||||
print(f"❌ Error: Directory not found: {skill_dir}")
|
||||
return 1
|
||||
|
||||
manager = MultiLanguageManager()
|
||||
|
||||
# Add documents in different languages
|
||||
manager.add_document(
|
||||
"README.md",
|
||||
"# Getting Started\n\nThis is an English document about the project.",
|
||||
{"category": "overview"}
|
||||
)
|
||||
# Load skill documents
|
||||
print("📥 Loading skill documents...")
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
if skill_md.exists():
|
||||
manager.add_document(
|
||||
"SKILL.md",
|
||||
skill_md.read_text(encoding="utf-8"),
|
||||
{"category": "overview"}
|
||||
)
|
||||
|
||||
manager.add_document(
|
||||
"README.es.md",
|
||||
"# Empezando\n\nEste es un documento en español sobre el proyecto.",
|
||||
{"category": "overview"}
|
||||
)
|
||||
# Load reference files
|
||||
refs_dir = skill_dir / "references"
|
||||
if refs_dir.exists():
|
||||
for ref_file in refs_dir.glob("*.md"):
|
||||
manager.add_document(
|
||||
ref_file.name,
|
||||
ref_file.read_text(encoding="utf-8"),
|
||||
{"category": ref_file.stem}
|
||||
)
|
||||
|
||||
manager.add_document(
|
||||
"README.fr.md",
|
||||
"# Commencer\n\nCeci est un document en français sur le projet.",
|
||||
{"category": "overview"}
|
||||
)
|
||||
# Detect languages
|
||||
if args.detect:
|
||||
detected = manager.detect_languages()
|
||||
print(f"\n🌍 Detected languages: {', '.join(detected.keys())}")
|
||||
for lang, count in detected.items():
|
||||
print(f" {lang}: {count} documents")
|
||||
|
||||
# Generate report
|
||||
print(manager.generate_translation_report())
|
||||
if args.report:
|
||||
print(manager.generate_translation_report())
|
||||
|
||||
# Export by language
|
||||
exports = manager.export_by_language(Path("output/multilang"))
|
||||
print(f"\n✅ Exported {len(exports)} language files:")
|
||||
for lang, path in exports.items():
|
||||
print(f" {lang}: {path}")
|
||||
if args.export:
|
||||
output_dir = Path(args.export)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
exports = manager.export_by_language(output_dir)
|
||||
print(f"\n✅ Exported {len(exports)} language files:")
|
||||
for lang, path in exports.items():
|
||||
print(f" {lang}: {path}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
example_usage()
|
||||
import sys
|
||||
sys.exit(main())
|
||||
|
||||
@@ -185,7 +185,7 @@ Examples:
|
||||
|
||||
parser.add_argument(
|
||||
"--target",
|
||||
choices=["claude", "gemini", "openai", "markdown", "langchain", "llama-index", "weaviate", "chroma", "faiss", "qdrant"],
|
||||
choices=["claude", "gemini", "openai", "markdown", "langchain", "llama-index", "haystack", "weaviate", "chroma", "faiss", "qdrant"],
|
||||
default="claude",
|
||||
help="Target LLM platform (default: claude)",
|
||||
)
|
||||
|
||||
@@ -516,26 +516,44 @@ class QualityAnalyzer:
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def example_usage():
|
||||
"""Example usage of quality metrics."""
|
||||
def main():
|
||||
"""CLI entry point for quality metrics."""
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser(description="Analyze skill quality metrics")
|
||||
parser.add_argument("skill_dir", help="Path to skill directory")
|
||||
parser.add_argument("--report", action="store_true", help="Generate detailed report")
|
||||
parser.add_argument("--output", help="Output path for JSON report")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Analyze skill
|
||||
skill_dir = Path("output/ansible")
|
||||
skill_dir = Path(args.skill_dir)
|
||||
if not skill_dir.exists():
|
||||
print(f"❌ Error: Directory not found: {skill_dir}")
|
||||
return 1
|
||||
|
||||
analyzer = QualityAnalyzer(skill_dir)
|
||||
|
||||
# Generate report
|
||||
report = analyzer.generate_report()
|
||||
|
||||
# Display report
|
||||
formatted = analyzer.format_report(report)
|
||||
print(formatted)
|
||||
if args.report:
|
||||
formatted = analyzer.format_report(report)
|
||||
print(formatted)
|
||||
|
||||
# Save report
|
||||
report_path = skill_dir / "quality_report.json"
|
||||
if args.output:
|
||||
report_path = Path(args.output)
|
||||
else:
|
||||
report_path = skill_dir / "quality_report.json"
|
||||
|
||||
report_path.write_text(json.dumps(asdict(report), indent=2, default=str))
|
||||
print(f"\n✅ Report saved: {report_path}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
example_usage()
|
||||
import sys
|
||||
sys.exit(main())
|
||||
|
||||
@@ -379,14 +379,23 @@ class StreamingIngester:
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def example_usage():
|
||||
"""Example usage of streaming ingestion."""
|
||||
def main():
|
||||
"""CLI entry point for streaming ingestion."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Stream and chunk skill documents")
|
||||
parser.add_argument("input", help="Input file or directory path")
|
||||
parser.add_argument("--chunk-size", type=int, default=4000, help="Chunk size in characters")
|
||||
parser.add_argument("--chunk-overlap", type=int, default=200, help="Chunk overlap in characters")
|
||||
parser.add_argument("--batch-size", type=int, default=100, help="Batch size for processing")
|
||||
parser.add_argument("--checkpoint", help="Checkpoint file path")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize ingester
|
||||
ingester = StreamingIngester(
|
||||
chunk_size=4000,
|
||||
chunk_overlap=200,
|
||||
batch_size=100
|
||||
chunk_size=args.chunk_size,
|
||||
chunk_overlap=args.chunk_overlap,
|
||||
batch_size=args.batch_size
|
||||
)
|
||||
|
||||
# Progress callback
|
||||
@@ -395,26 +404,36 @@ def example_usage():
|
||||
print(f"Progress: {progress.progress_percent:.1f}% - "
|
||||
f"{progress.processed_chunks}/{progress.total_chunks} chunks")
|
||||
|
||||
# Stream skill directory
|
||||
skill_dir = Path("output/react")
|
||||
chunks = ingester.stream_skill_directory(skill_dir, callback=on_progress)
|
||||
# Stream input
|
||||
input_path = Path(args.input)
|
||||
if not input_path.exists():
|
||||
print(f"❌ Error: Path not found: {input_path}")
|
||||
return 1
|
||||
|
||||
if input_path.is_dir():
|
||||
chunks = ingester.stream_skill_directory(input_path, callback=on_progress)
|
||||
else:
|
||||
chunks = ingester.stream_file(input_path, callback=on_progress)
|
||||
|
||||
# Process in batches
|
||||
all_chunks = []
|
||||
for batch in ingester.batch_iterator(chunks, batch_size=50):
|
||||
for batch in ingester.batch_iterator(chunks, batch_size=args.batch_size):
|
||||
print(f"\nProcessing batch of {len(batch)} chunks...")
|
||||
all_chunks.extend(batch)
|
||||
|
||||
# Save checkpoint every batch
|
||||
ingester.save_checkpoint(
|
||||
Path("output/.checkpoints/react.json"),
|
||||
{"processed_batches": len(all_chunks) // 50}
|
||||
)
|
||||
# Save checkpoint if specified
|
||||
if args.checkpoint:
|
||||
ingester.save_checkpoint(
|
||||
Path(args.checkpoint),
|
||||
{"processed_batches": len(all_chunks) // args.batch_size}
|
||||
)
|
||||
|
||||
# Final progress
|
||||
print("\n" + ingester.format_progress())
|
||||
print(f"\n✅ Processed {len(all_chunks)} total chunks")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
example_usage()
|
||||
import sys
|
||||
sys.exit(main())
|
||||
|
||||
Reference in New Issue
Block a user