Release v1.9.0: Add video-comparer skill and enhance transcript-fixer
## New Skill: video-comparer v1.0.0 - Compare original and compressed videos with interactive HTML reports - Calculate quality metrics (PSNR, SSIM) for compression analysis - Generate frame-by-frame visual comparisons (slider, side-by-side, grid) - Extract video metadata (codec, resolution, bitrate, duration) - Multi-platform FFmpeg support with security features ## transcript-fixer Enhancements - Add async AI processor for parallel processing - Add connection pool management for database operations - Add concurrency manager and rate limiter - Add audit log retention and database migrations - Add health check and metrics monitoring - Add comprehensive test suite (8 new test files) - Enhance security with domain and path validators ## Marketplace Updates - Update marketplace version from 1.8.0 to 1.9.0 - Update skills count from 15 to 16 - Update documentation (README.md, CLAUDE.md, CHANGELOG.md) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -2,14 +2,26 @@
|
||||
"""
|
||||
Logging Configuration for Transcript Fixer
|
||||
|
||||
CRITICAL FIX: Enhanced with structured logging and error tracking
|
||||
ISSUE: Critical-4 in Engineering Excellence Plan
|
||||
|
||||
Provides structured logging with rotation, levels, and audit trails.
|
||||
Added: Error rate monitoring, performance tracking, context enrichment
|
||||
|
||||
Author: Chief Engineer
|
||||
Date: 2025-10-28
|
||||
Priority: P0 - Critical
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict, Any
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def setup_logging(
|
||||
@@ -114,6 +126,156 @@ def get_audit_logger() -> logging.Logger:
|
||||
return logging.getLogger('audit')
|
||||
|
||||
|
||||
class ErrorCounter:
|
||||
"""
|
||||
Track error rates for failure threshold monitoring.
|
||||
|
||||
CRITICAL FIX: Added for Critical-4
|
||||
Prevents silent failures by monitoring error rates.
|
||||
|
||||
Usage:
|
||||
counter = ErrorCounter(threshold=0.3)
|
||||
for item in items:
|
||||
try:
|
||||
process(item)
|
||||
counter.success()
|
||||
except Exception:
|
||||
counter.failure()
|
||||
if counter.should_abort():
|
||||
logger.error("Error rate too high, aborting")
|
||||
break
|
||||
"""
|
||||
|
||||
def __init__(self, threshold: float = 0.3, window_size: int = 100):
|
||||
"""
|
||||
Initialize error counter.
|
||||
|
||||
Args:
|
||||
threshold: Failure rate threshold (0.3 = 30%)
|
||||
window_size: Number of recent operations to track
|
||||
"""
|
||||
self.threshold = threshold
|
||||
self.window_size = window_size
|
||||
self.results: list[bool] = [] # True = success, False = failure
|
||||
self.total_successes = 0
|
||||
self.total_failures = 0
|
||||
|
||||
def success(self) -> None:
|
||||
"""Record a successful operation"""
|
||||
self.results.append(True)
|
||||
self.total_successes += 1
|
||||
if len(self.results) > self.window_size:
|
||||
self.results.pop(0)
|
||||
|
||||
def failure(self) -> None:
|
||||
"""Record a failed operation"""
|
||||
self.results.append(False)
|
||||
self.total_failures += 1
|
||||
if len(self.results) > self.window_size:
|
||||
self.results.pop(0)
|
||||
|
||||
def failure_rate(self) -> float:
|
||||
"""Calculate current failure rate (rolling window)"""
|
||||
if not self.results:
|
||||
return 0.0
|
||||
failures = sum(1 for r in self.results if not r)
|
||||
return failures / len(self.results)
|
||||
|
||||
def should_abort(self) -> bool:
|
||||
"""Check if failure rate exceeds threshold"""
|
||||
# Need minimum sample size before aborting
|
||||
if len(self.results) < 10:
|
||||
return False
|
||||
return self.failure_rate() > self.threshold
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Get error statistics"""
|
||||
window_total = len(self.results)
|
||||
window_failures = sum(1 for r in self.results if not r)
|
||||
window_successes = window_total - window_failures
|
||||
|
||||
return {
|
||||
"window_total": window_total,
|
||||
"window_successes": window_successes,
|
||||
"window_failures": window_failures,
|
||||
"window_failure_rate": self.failure_rate(),
|
||||
"total_successes": self.total_successes,
|
||||
"total_failures": self.total_failures,
|
||||
"threshold": self.threshold,
|
||||
"should_abort": self.should_abort(),
|
||||
}
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset counters"""
|
||||
self.results.clear()
|
||||
self.total_successes = 0
|
||||
self.total_failures = 0
|
||||
|
||||
|
||||
class TimedLogger:
|
||||
"""
|
||||
Logger wrapper with automatic performance tracking.
|
||||
|
||||
CRITICAL FIX: Added for Critical-4
|
||||
Automatically logs execution time for operations.
|
||||
|
||||
Usage:
|
||||
logger = TimedLogger(logging.getLogger(__name__))
|
||||
with logger.timed("chunk_processing", chunk_id=5):
|
||||
process_chunk()
|
||||
# Automatically logs: "chunk_processing completed in 123ms"
|
||||
"""
|
||||
|
||||
def __init__(self, logger: logging.Logger):
|
||||
"""
|
||||
Initialize with a logger instance.
|
||||
|
||||
Args:
|
||||
logger: Logger to wrap
|
||||
"""
|
||||
self.logger = logger
|
||||
|
||||
@contextmanager
|
||||
def timed(self, operation_name: str, **context: Any):
|
||||
"""
|
||||
Context manager for timing operations.
|
||||
|
||||
Args:
|
||||
operation_name: Name of operation
|
||||
**context: Additional context to log
|
||||
|
||||
Yields:
|
||||
None
|
||||
|
||||
Example:
|
||||
>>> with logger.timed("api_call", chunk_id=5):
|
||||
... call_api()
|
||||
# Logs: "api_call completed in 123ms (chunk_id=5)"
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Format context for logging
|
||||
context_str = ", ".join(f"{k}={v}" for k, v in context.items())
|
||||
if context_str:
|
||||
context_str = f" ({context_str})"
|
||||
|
||||
self.logger.info(f"{operation_name} started{context_str}")
|
||||
|
||||
try:
|
||||
yield
|
||||
except Exception as e:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.error(
|
||||
f"{operation_name} failed in {duration_ms:.1f}ms{context_str}: {e}"
|
||||
)
|
||||
raise
|
||||
else:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.info(
|
||||
f"{operation_name} completed in {duration_ms:.1f}ms{context_str}"
|
||||
)
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
setup_logging(level="DEBUG")
|
||||
@@ -127,3 +289,21 @@ if __name__ == "__main__":
|
||||
|
||||
audit_logger = get_audit_logger()
|
||||
audit_logger.info("User 'admin' added correction: '错误' → '正确'")
|
||||
|
||||
# Test ErrorCounter
|
||||
print("\n--- Testing ErrorCounter ---")
|
||||
counter = ErrorCounter(threshold=0.3)
|
||||
for i in range(20):
|
||||
if i % 4 == 0:
|
||||
counter.failure()
|
||||
else:
|
||||
counter.success()
|
||||
|
||||
stats = counter.get_stats()
|
||||
print(f"Stats: {json.dumps(stats, indent=2)}")
|
||||
|
||||
# Test TimedLogger
|
||||
print("\n--- Testing TimedLogger ---")
|
||||
timed_logger = TimedLogger(logger)
|
||||
with timed_logger.timed("test_operation", item_count=100):
|
||||
time.sleep(0.1)
|
||||
|
||||
Reference in New Issue
Block a user