fix: resolve CI failures across all GitHub Actions workflows
- Fix ruff format issue in doc_scraper.py - Add pytest skip markers for browser renderer tests when Playwright is not installed in CI - Replace broken Python heredocs in 4 workflow YAML files (scheduled-updates, vector-db-export, quality-metrics, test-vector-dbs) with python3 -c calls to fix YAML parsing errors Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
61
.github/workflows/quality-metrics.yml
vendored
61
.github/workflows/quality-metrics.yml
vendored
@@ -88,48 +88,43 @@ jobs:
|
||||
echo "🔍 Analyzing $SKILL_NAME..."
|
||||
|
||||
# Run quality analysis
|
||||
python3 << 'EOF' "$skill_dir" "$THRESHOLD" "$SKILL_NAME"
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, 'src')
|
||||
python3 -c "
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from skill_seekers.cli.quality_metrics import QualityAnalyzer
|
||||
from skill_seekers.cli.quality_metrics import QualityAnalyzer
|
||||
|
||||
skill_dir = Path(sys.argv[1])
|
||||
threshold = float(sys.argv[2])
|
||||
skill_name = sys.argv[3]
|
||||
skill_dir = Path('$skill_dir')
|
||||
threshold = float('$THRESHOLD')
|
||||
skill_name = '$SKILL_NAME'
|
||||
|
||||
analyzer = QualityAnalyzer(skill_dir)
|
||||
report = analyzer.generate_report()
|
||||
analyzer = QualityAnalyzer(skill_dir)
|
||||
report = analyzer.generate_report()
|
||||
|
||||
# Print formatted report
|
||||
formatted = analyzer.format_report(report)
|
||||
print(formatted)
|
||||
formatted = analyzer.format_report(report)
|
||||
print(formatted)
|
||||
|
||||
# Save individual report
|
||||
with open(f'quality_{skill_name}.txt', 'w') as f:
|
||||
f.write(formatted)
|
||||
with open(f'quality_{skill_name}.txt', 'w') as f:
|
||||
f.write(formatted)
|
||||
|
||||
# Add to summary
|
||||
score = report.overall_score.total_score
|
||||
grade = report.overall_score.grade
|
||||
status = "✅" if score >= threshold else "❌"
|
||||
score = report.overall_score.total_score
|
||||
grade = report.overall_score.grade
|
||||
status = 'PASS' if score >= threshold else 'FAIL'
|
||||
|
||||
summary_line = f"{status} **{skill_name}**: {grade} ({score:.1f}/100)"
|
||||
print(f"\n{summary_line}")
|
||||
summary_line = f'{status} **{skill_name}**: {grade} ({score:.1f}/100)'
|
||||
print(f'\n{summary_line}')
|
||||
|
||||
with open('quality_summary.md', 'a') as f:
|
||||
f.write(f"{summary_line}\n")
|
||||
with open('quality_summary.md', 'a') as f:
|
||||
f.write(f'{summary_line}\n')
|
||||
|
||||
# Set metrics as annotations
|
||||
if score < threshold:
|
||||
print(f"::error file={skill_dir}/SKILL.md::Quality score {score:.1f} is below threshold {threshold}")
|
||||
sys.exit(1)
|
||||
elif score < 80:
|
||||
print(f"::warning file={skill_dir}/SKILL.md::Quality score {score:.1f} could be improved")
|
||||
else:
|
||||
print(f"::notice file={skill_dir}/SKILL.md::Quality score {score:.1f} - Excellent!")
|
||||
EOF
|
||||
if score < threshold:
|
||||
print(f'::error file={skill_dir}/SKILL.md::Quality score {score:.1f} is below threshold {threshold}')
|
||||
sys.exit(1)
|
||||
elif score < 80:
|
||||
print(f'::warning file={skill_dir}/SKILL.md::Quality score {score:.1f} could be improved')
|
||||
else:
|
||||
print(f'::notice file={skill_dir}/SKILL.md::Quality score {score:.1f} - Excellent!')
|
||||
"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
ALL_PASSED=false
|
||||
|
||||
Reference in New Issue
Block a user