From 35872b9e771384243b12c866ae40c19ac1ed3f06 Mon Sep 17 00:00:00 2001 From: daymade Date: Thu, 22 Jan 2026 22:05:14 +0800 Subject: [PATCH] Release v1.24.0: Add claude-skills-troubleshooting - Add claude-skills-troubleshooting v1.0.0 - Plugin installation and enablement debugging - Diagnostic scripts for installed vs enabled mismatch - Known GitHub issues tracking (#17832, #19696, #17089, #13543, #16260) - Skills vs Commands architecture documentation - Batch enable script for missing plugins - Add i18n-expert v1.0.0 - Complete i18n/l10n setup for React/Next.js/Vue - Key architecture and locale file organization - Translation generation strategy (AI, professional, manual) - Key parity validation between en-US and zh-CN - Bundled i18n_audit.py script - Update marketplace to v1.24.0 (32 skills) - Update README.md badges (skills count, version) - Update README.zh-CN.md badges (skills count, version) - Update CLAUDE.md skills count and Available Skills list - Update youtube-downloader with PO token enhancements Co-Authored-By: Claude --- .claude-plugin/marketplace.json | 49 +- CHANGELOG.md | 51 + CLAUDE.md | 23 +- README.md | 165 ++- README.zh-CN.md | 167 ++- claude-skills-troubleshooting/SKILL.md | 148 +++ .../references/architecture.md | 198 ++++ .../references/known_issues.md | 72 ++ .../scripts/diagnose_plugins.py | 182 ++++ .../scripts/enable_all_plugins.py | 102 ++ i18n-expert/.security-scan-passed | 4 + i18n-expert/SKILL.md | 128 +++ i18n-expert/scripts/i18n_audit.py | 163 +++ youtube-downloader/SKILL.md | 167 ++- .../references/po-token-setup.md | 51 +- youtube-downloader/scripts/download_video.py | 962 +++++++++++++++++- 16 files changed, 2577 insertions(+), 55 deletions(-) create mode 100644 claude-skills-troubleshooting/SKILL.md create mode 100644 claude-skills-troubleshooting/references/architecture.md create mode 100644 claude-skills-troubleshooting/references/known_issues.md create mode 100755 claude-skills-troubleshooting/scripts/diagnose_plugins.py create mode 100755 claude-skills-troubleshooting/scripts/enable_all_plugins.py create mode 100644 i18n-expert/.security-scan-passed create mode 100644 i18n-expert/SKILL.md create mode 100644 i18n-expert/scripts/i18n_audit.py diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 29f45c9..1570d6d 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -5,8 +5,8 @@ "email": "daymadev89@gmail.com" }, "metadata": { - "description": "Professional Claude Code skills for GitHub operations, document conversion, diagram generation, statusline customization, Teams communication, repomix utilities, skill creation, CLI demo generation, LLM icon access, Cloudflare troubleshooting, UI design system extraction, professional presentation creation, YouTube video downloading, secure repomix packaging, ASR transcription correction, video comparison quality analysis, comprehensive QA testing infrastructure, prompt optimization with EARS methodology, session history recovery, documentation cleanup, PDF generation with Chinese font support, CLAUDE.md progressive disclosure optimization, CCPM skill registry search and management, Promptfoo LLM evaluation framework, iOS app development with XcodeGen and SwiftUI, fact-checking with automated corrections, Twitter/X content fetching, intelligent macOS disk space recovery, skill quality review and improvement, and GitHub contribution strategy", - "version": "1.22.0", + "description": "Professional Claude Code skills for GitHub operations, document conversion, diagram generation, statusline customization, Teams communication, repomix utilities, skill creation, CLI demo generation, LLM icon access, Cloudflare troubleshooting, UI design system extraction, professional presentation creation, YouTube video downloading, secure repomix packaging, ASR transcription correction, video comparison quality analysis, comprehensive QA testing infrastructure, prompt optimization with EARS methodology, session history recovery, documentation cleanup, PDF generation with Chinese font support, CLAUDE.md progressive disclosure optimization, CCPM skill registry search and management, Promptfoo LLM evaluation framework, iOS app development with XcodeGen and SwiftUI, fact-checking with automated corrections, Twitter/X content fetching, intelligent macOS disk space recovery, skill quality review and improvement, GitHub contribution strategy, complete internationalization/localization setup, and plugin/skill troubleshooting with diagnostic tools", + "version": "1.24.0", "homepage": "https://github.com/daymade/claude-code-skills" }, "plugins": [ @@ -619,6 +619,51 @@ "skills": [ "./github-contributor" ] + }, + { + "name": "i18n-expert", + "description": "Complete internationalization/localization setup and auditing for UI codebases. Configure i18n frameworks, replace hard-coded strings with translation keys, ensure locale parity between en-US and zh-CN, and validate pluralization and formatting. Use when setting up i18n for React/Next.js/Vue apps, auditing existing implementations, replacing hard-coded strings, ensuring proper error code mapping, or validating pluralization and date/time/number formatting across locales", + "source": "./", + "strict": false, + "version": "1.0.0", + "category": "developer-tools", + "keywords": [ + "i18n", + "internationalization", + "localization", + "translation", + "react-i18next", + "next-intl", + "vue-i18n", + "locale", + "multilingual", + "globalization" + ], + "skills": [ + "./i18n-expert" + ] + }, + { + "name": "claude-skills-troubleshooting", + "description": "Diagnose and resolve Claude Code plugin and skill configuration issues. Debug plugin installation, enablement, and activation problems with systematic workflows. Use when plugins are installed but not showing in available skills list, skills are not activating as expected, troubleshooting enabledPlugins configuration in settings.json, debugging 'plugin not working' or 'skill not showing' issues, or understanding plugin state architecture and lifecycle", + "source": "./", + "strict": false, + "version": "1.0.0", + "category": "utilities", + "keywords": [ + "troubleshooting", + "debugging", + "plugins", + "skills", + "diagnostics", + "configuration", + "enabledPlugins", + "settings", + "marketplace" + ], + "skills": [ + "./claude-skills-troubleshooting" + ] } ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index 97929c6..932bcc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,57 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - None +## [1.24.0] - 2026-01-22 + +### Added +- **New Skill**: claude-skills-troubleshooting - Diagnose and resolve Claude Code plugin and skill configuration issues + - Plugin installation and enablement debugging + - installed_plugins.json vs settings.json enabledPlugins diagnosis + - Marketplace cache freshness detection + - Plugin state architecture documentation + - Bundled diagnostic script (diagnose_plugins.py) + - Batch enable script for missing plugins (enable_all_plugins.py) + - Known GitHub issues tracking (#17832, #19696, #17089, #13543, #16260) + - Skills vs Commands architecture explanation + +### Changed +- Updated marketplace skills count from 31 to 32 +- Updated marketplace version from 1.23.0 to 1.24.0 +- Updated README.md badges (skills count, version) +- Updated README.md to include claude-skills-troubleshooting in skills listing +- Updated README.zh-CN.md badges (skills count, version) +- Updated README.zh-CN.md to include claude-skills-troubleshooting in skills listing +- Updated CLAUDE.md skills count from 31 to 32 +- Added claude-skills-troubleshooting use case section to README.md +- Added claude-skills-troubleshooting use case section to README.zh-CN.md + +## [1.23.0] - 2026-01-22 + +### Added +- **New Skill**: i18n-expert - Complete internationalization/localization setup and auditing for UI codebases + - Library selection and setup (react-i18next, next-intl, vue-i18n) + - Key architecture and locale file organization (JSON, YAML, PO, XLIFF) + - Translation generation strategy (AI, professional, manual) + - Routing and language detection/switching + - SEO and metadata localization + - RTL support for applicable locales + - Key parity validation between en-US and zh-CN + - Pluralization and formatting validation + - Error code mapping to localized messages + - Bundled i18n_audit.py script for key usage extraction + - Scope inputs: framework, existing i18n state, target locales, translation quality needs + +### Changed +- Updated marketplace skills count from 30 to 31 +- Updated marketplace version from 1.22.0 to 1.23.0 +- Updated README.md badges (skills count, version) +- Updated README.md to include i18n-expert in skills listing +- Updated README.zh-CN.md badges (skills count, version) +- Updated README.zh-CN.md to include i18n-expert in skills listing +- Updated CLAUDE.md skills count from 30 to 31 +- Added i18n-expert use case section to README.md +- Added i18n-expert use case section to README.zh-CN.md + ### Changed - None diff --git a/CLAUDE.md b/CLAUDE.md index 6b97861..3b7cbfc 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Repository Overview -This is a Claude Code skills marketplace containing 30 production-ready skills organized in a plugin marketplace structure. Each skill is a self-contained package that extends Claude's capabilities with specialized knowledge, workflows, and bundled resources. +This is a Claude Code skills marketplace containing 32 production-ready skills organized in a plugin marketplace structure. Each skill is a self-contained package that extends Claude's capabilities with specialized knowledge, workflows, and bundled resources. **Essential Skill**: `skill-creator` is the most important skill in this marketplace - it's a meta-skill that enables users to create their own skills. Always recommend it first for users interested in extending Claude Code. @@ -134,7 +134,7 @@ Skills for public distribution must NOT contain: ## Marketplace Configuration The marketplace is configured in `.claude-plugin/marketplace.json`: -- Contains 30 plugins, each mapping to one skill +- Contains 32 plugins, each mapping to one skill - Each plugin has: name, description, version, category, keywords, skills array - Marketplace metadata: name, owner, version, homepage @@ -144,7 +144,7 @@ The marketplace is configured in `.claude-plugin/marketplace.json`: 1. **Marketplace Version** (`.claude-plugin/marketplace.json` → `metadata.version`) - Tracks the marketplace catalog as a whole - - Current: v1.22.0 + - Current: v1.23.0 - Bump when: Adding/removing skills, major marketplace restructuring - Semantic versioning: MAJOR.MINOR.PATCH @@ -172,7 +172,7 @@ The marketplace is configured in `.claude-plugin/marketplace.json`: 10. **cloudflare-troubleshooting** - API-driven Cloudflare diagnostics and debugging 11. **ui-designer** - Design system extraction from UI mockups 12. **ppt-creator** - Professional presentation creation with dual-path PPTX generation -13. **youtube-downloader** - YouTube video and audio downloading with yt-dlp error handling +13. **youtube-downloader** - YouTube video/audio downloads with PO token handling, cookies, and proxy-aware retries 14. **repomix-safe-mixer** - Secure repomix packaging with automatic credential detection 15. **transcript-fixer** - ASR/STT transcription error correction with dictionary and AI learning 16. **video-comparer** - Video comparison and quality analysis with interactive HTML reports @@ -190,9 +190,24 @@ The marketplace is configured in `.claude-plugin/marketplace.json`: 28. **macos-cleaner** - Intelligent macOS disk space analysis and cleanup with safety-first philosophy, risk categorization, and interactive confirmation 29. **skill-reviewer** - Reviews and improves Claude Code skills against official best practices with self-review, external review, and auto-PR modes 30. **github-contributor** - Strategic guide for becoming an effective GitHub contributor with opportunity discovery, project selection, and reputation building +31. **i18n-expert** - Complete internationalization/localization setup and auditing for UI codebases with framework support, key architecture, and parity validation +32. **claude-skills-troubleshooting** - Diagnose and resolve Claude Code plugin and skill configuration issues with diagnostic scripts and architecture documentation **Recommendation**: Always suggest `skill-creator` first for users interested in creating skills or extending Claude Code. +## YouTube Downloader SOP (Internal) + +Use this SOP to avoid common yt-dlp failures and confusion: + +1. Quote YouTube URLs in shell commands (zsh treats `?` as glob). Example: `'https://www.youtube.com/watch?v=VIDEO_ID'`. +2. Ensure proxy is active for both yt-dlp and PO Token providers (HTTP_PROXY/HTTPS_PROXY/ALL_PROXY). +3. If you see “Sign in to confirm you’re not a bot”, request cookie permission and use browser cookies. +4. Start the PO Token provider before downloading. Prefer Docker bgutil; fall back to browser-based WPC when Docker is unavailable or fails. +5. Use `web_safari` client when cookies are present; otherwise use `mweb` for PO tokens. +6. Keep the browser window open while WPC is minting tokens and make sure it can reach YouTube through the same proxy. +7. If you see “Only images are available” or “Requested format is not available”, treat it as PO token failure and retry after fixing provider/browser state. +8. If you see SSL EOF or fragment errors, treat it as proxy instability. Retry with progressive formats or switch to a more stable proxy. + ## Python Development All Python scripts in this repository: diff --git a/README.md b/README.md index 75c6b42..647c52a 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,15 @@ [![简体中文](https://img.shields.io/badge/语言-简体中文-red)](./README.zh-CN.md) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -[![Skills](https://img.shields.io/badge/skills-30-blue.svg)](https://github.com/daymade/claude-code-skills) -[![Version](https://img.shields.io/badge/version-1.22.0-green.svg)](https://github.com/daymade/claude-code-skills) +[![Skills](https://img.shields.io/badge/skills-32-blue.svg)](https://github.com/daymade/claude-code-skills) +[![Version](https://img.shields.io/badge/version-1.24.0-green.svg)](https://github.com/daymade/claude-code-skills) [![Claude Code](https://img.shields.io/badge/Claude%20Code-2.0.13+-purple.svg)](https://claude.com/code) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](./CONTRIBUTING.md) [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/daymade/claude-code-skills/graphs/commit-activity) -Professional Claude Code skills marketplace featuring 30 production-ready skills for enhanced development workflows. +Professional Claude Code skills marketplace featuring 32 production-ready skills for enhanced development workflows. ## 📑 Table of Contents @@ -526,12 +526,12 @@ Download YouTube videos and audio using yt-dlp with robust error handling and au - Working with YouTube content in regions with access restrictions **Key features:** -- Android client workaround for nsig extraction issues (automatic) +- Auto PO Token provider (Docker-first, browser fallback) for high-quality access +- Browser-cookie verification for “not a bot” prompts (privacy-friendly) - Audio-only download with MP3 conversion - Format listing and custom format selection - Output directory customization -- Network error handling for proxy/restricted environments -- Availability check for yt-dlp dependency +- Proxy-aware downloads for restricted environments **🎬 Live Demo** @@ -1153,7 +1153,52 @@ Recommendation: Start with 🟢 Safe items (95 GB), then review 🟡 items toget --- -### 27. **skill-reviewer** - Skill Quality Review & Improvement +### 27. **fact-checker** - Document Fact-Checking + +Verify factual claims in documents using web search and official sources, then propose corrections with user confirmation. + +**When to use:** +- Fact-checking documents for accuracy +- Verifying AI model specifications and technical documentation +- Updating outdated information in documents +- Validating statistical claims and benchmarks +- Checking API capabilities and version numbers + +**Key features:** +- Web search integration with authoritative sources +- AI model specification verification +- Technical documentation accuracy checks +- Statistical data validation +- Automated correction reports with user confirmation +- Supports general factual statements and technical claims + +**Example usage:** +```bash +# Install the skill +claude plugin install fact-checker@daymade-skills + +# Fact-check a document +"Please fact-check this section about AI model capabilities" + +# Verify technical specs +"Check if these Claude model specifications are still accurate" + +# Update outdated info +"Verify and update the version numbers in this documentation" +``` + +**🎬 Live Demo** + +*Coming soon* + +📚 **Documentation**: See [fact-checker/SKILL.md](./fact-checker/SKILL.md) for full workflow and claim types. + +**Requirements**: +- Web search access (via Claude Code) + +--- + +### 28. **skill-reviewer** - Skill Quality Review & Improvement Review and improve Claude Code skills against official best practices with three powerful modes. @@ -1198,7 +1243,7 @@ claude plugin install skill-reviewer@daymade-skills --- -### 28. **github-contributor** - GitHub Contribution Strategy +### 29. **github-contributor** - GitHub Contribution Strategy Strategic guide for becoming an effective GitHub contributor and building your open-source reputation. @@ -1255,6 +1300,99 @@ claude plugin install github-contributor@daymade-skills --- +### 31. **i18n-expert** - Internationalization & Localization + +Complete internationalization/localization setup and auditing for UI codebases. Configure i18n frameworks, replace hard-coded strings with translation keys, ensure locale parity between en-US and zh-CN, and validate pluralization and formatting. + +**When to use:** +- Setting up i18n for new React/Next.js/Vue applications +- Auditing existing i18n implementations for key parity and completeness +- Replacing hard-coded strings with translation keys +- Ensuring proper error code mapping to localized messages +- Validating pluralization, date/time/number formatting across locales +- Implementing language switching and SEO metadata localization + +**Key features:** +- Library selection and setup (react-i18next, next-intl, vue-i18n) +- Key architecture and locale file organization (JSON, YAML, PO, XLIFF) +- Translation generation strategy (AI, professional, manual) +- Routing and language detection/switching +- SEO and metadata localization +- RTL support for applicable locales +- Key parity validation between en-US and zh-CN +- Pluralization and formatting validation +- Error code mapping to localized messages +- Bundled i18n_audit.py script for key usage extraction + +**Example usage:** +```bash +# Install the skill +claude plugin install i18n-expert@daymade-skills + +# Setup i18n for a new project +"Set up i18n for my React app with English and Chinese support" + +# Audit existing i18n implementation +"Audit the i18n setup and find missing translation keys" + +# Replace hard-coded strings +"Replace all hard-coded strings in this component with i18n keys" +``` + +**🎬 Live Demo** + +*Coming soon* + +📚 **Documentation**: See [i18n-expert/SKILL.md](./i18n-expert/SKILL.md) for complete workflow and architecture guidance. + +**Requirements**: +- **Python 3.6+** (for audit script) +- **React/Next.js/Vue** (framework-specific i18n library) + +--- + +### 32. **claude-skills-troubleshooting** - Plugin & Skill Troubleshooting + +Diagnose and resolve Claude Code plugin and skill configuration issues. Debug plugin installation, enablement, and activation problems with systematic workflows. + +**When to use:** +- Plugins installed but not showing in available skills list +- Skills not activating as expected despite installation +- Troubleshooting enabledPlugins configuration in settings.json +- Debugging "plugin not working" or "skill not showing" issues +- Understanding plugin state architecture and lifecycle + +**Key features:** +- Quick diagnosis via diagnostic script (detects installed vs enabled mismatch) +- Plugin state architecture documentation (installed_plugins.json vs settings.json) +- Marketplace cache freshness detection and update guidance +- Known GitHub issues tracking (#17832, #19696, #17089, #13543, #16260) +- Batch enable script for missing plugins from a marketplace +- Skills vs Commands architecture explanation +- Comprehensive diagnostic commands reference + +**Example usage:** +```bash +# Install the skill +claude plugin install claude-skills-troubleshooting@daymade-skills + +# Run diagnostic +python3 scripts/diagnose_plugins.py + +# Batch enable missing plugins +python3 scripts/enable_all_plugins.py daymade-skills +``` + +**🎬 Live Demo** + +*Coming soon* + +📚 **Documentation**: See [claude-skills-troubleshooting/SKILL.md](./claude-skills-troubleshooting/SKILL.md) for complete troubleshooting workflow and architecture guidance. + +**Requirements**: None (uses Claude Code built-in Python) + +--- + ## 🎬 Interactive Demo Gallery Want to see all demos in one place with click-to-enlarge functionality? Check out our [interactive demo gallery](./demos/index.html) or browse the [demos directory](./demos/). @@ -1324,6 +1462,12 @@ Use **twitter-reader** to fetch tweet content without JavaScript rendering or au ### For Skill Quality & Open-Source Contributions Use **skill-reviewer** to validate your own skills against best practices before publishing, or to review and improve others' skill repositories. Combine with **github-contributor** to find high-impact open-source projects, create professional PRs, and build your contributor reputation. Perfect for developers who want to contribute to the Claude Code ecosystem or any GitHub project systematically. +### For Internationalization & Localization +Use **i18n-expert** to set up complete i18n infrastructure for React/Next.js/Vue applications, audit existing implementations for missing translation keys, and ensure locale parity between en-US and zh-CN. Perfect for teams launching products to global markets, maintaining multi-language UIs, or replacing hard-coded strings with proper i18n keys. Combine with **skill-creator** to create locale-aware skills, or with **docs-cleaner** to consolidate documentation across multiple languages. + +### For Plugin & Skill Troubleshooting +Use **claude-skills-troubleshooting** to diagnose and resolve Claude Code plugin and skill configuration issues. Debug why plugins appear installed but don't show in available skills, understand the installed_plugins.json vs settings.json enabledPlugins architecture, and batch-enable missing plugins from a marketplace. Essential for marketplace maintainers debugging installation issues, developers troubleshooting skill activation, or anyone confused by the GitHub #17832 auto-enable bug. + ## 📚 Documentation Each skill includes: @@ -1363,6 +1507,9 @@ Each skill includes: - **macos-cleaner**: See `macos-cleaner/references/cleanup_targets.md` for detailed cleanup target explanations, `macos-cleaner/references/mole_integration.md` for Mole visual tool integration, and `macos-cleaner/references/safety_rules.md` for comprehensive safety guidelines - **skill-reviewer**: See `skill-reviewer/references/evaluation_checklist.md` for complete evaluation criteria, `skill-reviewer/references/pr_template.md` for PR templates, and `skill-reviewer/references/marketplace_template.json` for marketplace configuration - **github-contributor**: See `github-contributor/references/pr_checklist.md` for PR quality checklist, `github-contributor/references/project_evaluation.md` for project evaluation criteria, and `github-contributor/references/communication_templates.md` for issue/PR templates +- **i18n-expert**: See `i18n-expert/SKILL.md` for complete i18n setup workflow, key architecture guidance, and audit procedures +- **claude-skills-troubleshooting**: See `claude-skills-troubleshooting/SKILL.md` for plugin troubleshooting workflow and architecture +- **fact-checker**: See `fact-checker/SKILL.md` for fact-checking workflow and claim verification process ## 🛠️ Requirements @@ -1467,4 +1614,4 @@ If you find these skills useful, please: **Built with ❤️ using the skill-creator skill for Claude Code** -Last updated: 2026-01-15 | Marketplace version 1.22.0 +Last updated: 2026-01-22 | Marketplace version 1.23.0 diff --git a/README.zh-CN.md b/README.zh-CN.md index 9b82e41..659ad44 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -6,15 +6,15 @@ [![简体中文](https://img.shields.io/badge/语言-简体中文-red)](./README.zh-CN.md) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -[![Skills](https://img.shields.io/badge/skills-30-blue.svg)](https://github.com/daymade/claude-code-skills) -[![Version](https://img.shields.io/badge/version-1.22.0-green.svg)](https://github.com/daymade/claude-code-skills) +[![Skills](https://img.shields.io/badge/skills-32-blue.svg)](https://github.com/daymade/claude-code-skills) +[![Version](https://img.shields.io/badge/version-1.24.0-green.svg)](https://github.com/daymade/claude-code-skills) [![Claude Code](https://img.shields.io/badge/Claude%20Code-2.0.13+-purple.svg)](https://claude.com/code) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](./CONTRIBUTING.md) [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/daymade/claude-code-skills/graphs/commit-activity) -专业的 Claude Code 技能市场,提供 30 个生产就绪的技能,用于增强开发工作流。 +专业的 Claude Code 技能市场,提供 32 个生产就绪的技能,用于增强开发工作流。 ## 📑 目录 @@ -561,12 +561,12 @@ CC-Switch 支持以下中国 AI 服务提供商: - 在受限环境中下载视频 **主要功能:** -- 视频和播放列表下载 +- 自动 PO Token 提供器(优先 Docker,失败自动切换浏览器方案) +- 通过浏览器 Cookie 处理“不是机器人”验证(更友好) - 仅音频下载并转换为 MP3 -- Android 客户端绕过 nsig 提取问题(自动) - 格式列表和自定义格式选择 -- 代理/受限环境的网络错误处理 -- 常见 yt-dlp 问题的综合故障排除 +- 输出目录自定义 +- 代理/受限环境的下载支持 **示例用法:** ```bash @@ -1195,7 +1195,52 @@ claude plugin install macos-cleaner@daymade-skills --- -### 27. **skill-reviewer** - 技能质量审查与改进 +### 27. **fact-checker** - 文档事实核查 + +使用网络搜索和权威来源验证文档中的事实声明,然后提议更正并等待用户确认。 + +**使用场景:** +- 核实文档准确性 +- 验证 AI 模型规格和技术文档 +- 更新文档中的过时信息 +- 验证统计声明和基准测试 +- 检查 API 功能和版本号 + +**主要功能:** +- 集成权威来源的网络搜索 +- AI 模型规格验证 +- 技术文档准确性检查 +- 统计数据验证 +- 自动更正报告(需用户确认) +- 支持一般事实声明和技术声明 + +**示例用法:** +```bash +# 安装技能 +claude plugin install fact-checker@daymade-skills + +# 核实文档 +"请核查这部分关于 AI 模型功能的内容" + +# 验证技术规格 +"检查这些 Claude 模型规格是否仍然准确" + +# 更新过时信息 +"验证并更新此文档中的版本号" +``` + +**🎬 实时演示** + +*即将推出* + +📚 **文档**:参见 [fact-checker/SKILL.md](./fact-checker/SKILL.md) 了解完整的工作流程和声明类型。 + +**要求**: +- 网络搜索访问(通过 Claude Code) + +--- + +### 28. **skill-reviewer** - 技能质量审查与改进 以三种强大模式审查和改进 Claude Code 技能,确保符合官方最佳实践。 @@ -1240,7 +1285,7 @@ claude plugin install skill-reviewer@daymade-skills --- -### 28. **github-contributor** - GitHub 贡献策略 +### 29. **github-contributor** - GitHub 贡献策略 成为高效 GitHub 贡献者并建立开源声誉的战略指南。 @@ -1297,6 +1342,99 @@ claude plugin install github-contributor@daymade-skills --- +### 31. **i18n-expert** - 国际化与本地化 + +为 UI 代码库提供完整的国际化/本地化设置和审计。配置 i18n 框架、将硬编码字符串替换为翻译键、确保 en-US 和 zh-CN 之间的语言环境一致性,并验证复数形式和格式设置。 + +**使用场景:** +- 为新的 React/Next.js/Vue 应用程序设置 i18n +- 审计现有 i18n 实现的键一致性和完整性 +- 将硬编码字符串替换为翻译键 +- 确保错误代码正确映射到本地化消息 +- 验证跨语言环境的复数形式、日期/时间/数字格式设置 +- 实现语言切换和 SEO 元数据本地化 + +**主要功能:** +- 库选择和设置(react-i18next、next-intl、vue-i18n) +- 键架构和语言环境文件组织(JSON、YAML、PO、XLIFF) +- 翻译生成策略(AI、专业、手动) +- 路由和语言检测/切换 +- SEO 和元数据本地化 +- 适用语言环境的 RTL 支持 +- en-US 和 zh-CN 之间的键一致性验证 +- 复数形式和格式设置验证 +- 错误代码映射到本地化消息 +- 捆绑的 i18n_audit.py 脚本用于键使用提取 + +**示例用法:** +```bash +# 安装技能 +claude plugin install i18n-expert@daymade-skills + +# 为新项目设置 i18n +"为我的 React 应用设置支持英文和中文的 i18n" + +# 审计现有 i18n 实现 +"审计 i18n 设置并查找缺失的翻译键" + +# 替换硬编码字符串 +"将此组件中的所有硬编码字符串替换为 i18n 键" +``` + +**🎬 实时演示** + +*即将推出* + +📚 **文档**:参见 [i18n-expert/SKILL.md](./i18n-expert/SKILL.md) 了解完整的工作流程和架构指导。 + +**要求**: +- **Python 3.6+**(用于审计脚本) +- **React/Next.js/Vue**(框架特定的 i18n 库) + +--- + +### 32. **claude-skills-troubleshooting** - 插件与技能故障排除 + +诊断和解决 Claude Code 插件和技能配置问题。通过系统化工作流程调试插件安装、启用和激活问题。 + +**使用场景:** +- 插件已安装但未显示在可用技能列表中 +- 尽管已安装,技能仍未按预期激活 +- 调试 settings.json 中的 enabledPlugins 配置 +- 调试"插件不工作"或"技能未显示"问题 +- 了解插件状态架构和生命周期 + +**主要功能:** +- 通过诊断脚本快速诊断(检测已安装但未启用的不匹配) +- 插件状态架构文档(installed_plugins.json vs settings.json) +- 市场缓存新鲜度检测和更新指导 +- 已知 GitHub 问题跟踪(#17832、#19696、#17089、#13543、#16260) +- 用于批量启用市场缺失插件的脚本 +- 技能与命令架构解释 +- 全面的诊断命令参考 + +**示例用法:** +```bash +# 安装技能 +claude plugin install claude-skills-troubleshooting@daymade-skills + +# 运行诊断 +python3 scripts/diagnose_plugins.py + +# 批量启用缺失的插件 +python3 scripts/enable_all_plugins.py daymade-skills +``` + +**🎬 实时演示** + +*即将推出* + +📚 **文档**:参见 [claude-skills-troubleshooting/SKILL.md](./claude-skills-troubleshooting/SKILL.md) 了解完整的故障排除工作流程和架构指导。 + +**要求**:无(使用 Claude Code 内置 Python) + +--- + ## 🎬 交互式演示画廊 想要在一个地方查看所有演示并具有点击放大功能?访问我们的[交互式演示画廊](./demos/index.html)或浏览[演示目录](./demos/)。 @@ -1366,6 +1504,12 @@ claude plugin install github-contributor@daymade-skills ### 技能质量与开源贡献 使用 **skill-reviewer** 在发布前验证你的技能是否符合最佳实践,或审查并改进他人的技能仓库。与 **github-contributor** 结合使用,寻找高影响力的开源项目、创建专业的 PR,并系统性地建立贡献者声誉。非常适合希望为 Claude Code 生态系统或任何 GitHub 项目做出贡献的开发者。 +### 国际化与本地化 +使用 **i18n-expert** 为 React/Next.js/Vue 应用程序设置完整的 i18n 基础设施、审计现有实现中缺失的翻译键,并确保 en-US 和 zh-CN 之间的语言环境一致性。非常适合向全球市场推出产品的团队、维护多语言 UI,或将硬编码字符串替换为正确的 i18n 键。与 **skill-creator** 结合使用可创建支持语言环境的技能,或与 **docs-cleaner** 结合使用可整合多种语言的文档。 + +### 插件与技能故障排除 +使用 **claude-skills-troubleshooting** 诊断和解决 Claude Code 插件和技能配置问题。调试为什么插件显示已安装但未显示在可用技能列表中、了解 installed_plugins.json 与 settings.json enabledPlugins 架构,以及批量启用市场中缺失的插件。非常适合市场维护者调试安装问题、开发者调试技能激活,或任何对 GitHub #17832 自动启用 bug 感到困惑的人。 + ## 📚 文档 每个技能包括: @@ -1405,6 +1549,9 @@ claude plugin install github-contributor@daymade-skills - **macos-cleaner**:参见 `macos-cleaner/references/cleanup_targets.md` 了解详细清理目标说明、`macos-cleaner/references/mole_integration.md` 了解 Mole 可视化工具集成、`macos-cleaner/references/safety_rules.md` 了解全面安全指南 - **skill-reviewer**:参见 `skill-reviewer/references/evaluation_checklist.md` 了解完整评估标准、`skill-reviewer/references/pr_template.md` 了解 PR 模板、`skill-reviewer/references/marketplace_template.json` 了解 marketplace 配置 - **github-contributor**:参见 `github-contributor/references/pr_checklist.md` 了解 PR 质量清单、`github-contributor/references/project_evaluation.md` 了解项目评估标准、`github-contributor/references/communication_templates.md` 了解 issue/PR 沟通模板 +- **i18n-expert**:参见 `i18n-expert/SKILL.md` 了解完整的 i18n 设置工作流程、键架构指导和审计程序 +- **claude-skills-troubleshooting**:参见 `claude-skills-troubleshooting/SKILL.md` 了解插件故障排除工作流程和架构 +- **fact-checker**:参见 `fact-checker/SKILL.md` 了解事实核查工作流程和声明验证过程 ## 🛠️ 系统要求 @@ -1506,4 +1653,4 @@ claude plugin install skill-name@daymade-skills **使用 skill-creator 技能为 Claude Code 精心打造 ❤️** -最后更新:2026-01-15 | 市场版本 1.22.0 +最后更新:2026-01-22 | 市场版本 1.23.0 diff --git a/claude-skills-troubleshooting/SKILL.md b/claude-skills-troubleshooting/SKILL.md new file mode 100644 index 0000000..d8c51d6 --- /dev/null +++ b/claude-skills-troubleshooting/SKILL.md @@ -0,0 +1,148 @@ +--- +name: claude-skills-troubleshooting +description: Diagnose and resolve Claude Code plugin and skill issues. This skill should be used when plugins are installed but not showing in available skills list, skills are not activating as expected, or when troubleshooting enabledPlugins configuration in settings.json. Triggers include "plugin not working", "skill not showing", "installed but disabled", or "enabledPlugins" issues. +--- + +# Claude Skills Troubleshooting + +## Overview + +Diagnose and resolve common Claude Code plugin and skill configuration issues. This skill provides systematic debugging workflows for plugin installation, enablement, and activation problems. + +## Quick Diagnosis + +Run the diagnostic script to identify common issues: + +```bash +python3 scripts/diagnose_plugins.py +``` + +The script checks: +- Installed vs enabled plugins mismatch +- Missing enabledPlugins entries in settings.json +- Stale marketplace cache +- Invalid plugin configurations + +## Common Issues + +### Issue 1: Plugin Installed But Not Showing in Available Skills + +**Symptoms:** +- `/plugin` shows plugin as installed +- Skill not appearing in Skill tool's available list +- Plugin metadata exists in `installed_plugins.json` + +**Root Cause:** Known bug ([GitHub #17832](https://github.com/anthropics/claude-code/issues/17832)) - plugins are added to `installed_plugins.json` but NOT automatically added to `enabledPlugins` in `settings.json`. + +**Diagnosis:** +```bash +# Check if plugin is in installed_plugins.json +cat ~/.claude/plugins/installed_plugins.json | grep "plugin-name" + +# Check if plugin is enabled in settings.json +cat ~/.claude/settings.json | grep "plugin-name" +``` + +**Solution:** +```bash +# Option 1: Use CLI to enable +claude plugin enable plugin-name@marketplace-name + +# Option 2: Manually edit settings.json +# Add to enabledPlugins section: +# "plugin-name@marketplace-name": true +``` + +### Issue 2: Understanding Plugin State Architecture + +**Key files:** + +| File | Purpose | +|------|---------| +| `~/.claude/plugins/installed_plugins.json` | Registry of ALL plugins (installed + disabled) | +| `~/.claude/settings.json` → `enabledPlugins` | Controls which plugins are ACTIVE | +| `~/.claude/plugins/known_marketplaces.json` | Registered marketplace sources | +| `~/.claude/plugins/cache/` | Actual plugin files | + +**A plugin is active ONLY when:** +1. Exists in `installed_plugins.json` (registered) +2. Listed in `settings.json` → `enabledPlugins` with value `true` + +### Issue 3: Marketplace Cache Stale + +**Symptoms:** +- GitHub has latest changes +- Install finds plugin but gets old version +- Newly added plugins not visible + +**Solution:** +```bash +# Update marketplace cache +claude plugin marketplace update marketplace-name + +# Or clear and re-fetch +rm -rf ~/.claude/plugins/cache/marketplace-name +claude plugin marketplace update marketplace-name +``` + +### Issue 4: Plugin Not Found in Marketplace + +**Common causes (in order of likelihood):** + +1. **Local changes not pushed to GitHub** - Most common! + ```bash + git status + git push + claude plugin marketplace update marketplace-name + ``` + +2. **marketplace.json configuration error** + ```bash + python3 -m json.tool .claude-plugin/marketplace.json + ``` + +3. **Skill directory missing** + ```bash + ls -la skill-name/SKILL.md + ``` + +## Diagnostic Commands Reference + +| Purpose | Command | +|---------|---------| +| List marketplaces | `claude plugin marketplace list` | +| Update marketplace | `claude plugin marketplace update {name}` | +| Install plugin | `claude plugin install {plugin}@{marketplace}` | +| Enable plugin | `claude plugin enable {plugin}@{marketplace}` | +| Disable plugin | `claude plugin disable {plugin}@{marketplace}` | +| Uninstall plugin | `claude plugin uninstall {plugin}@{marketplace}` | +| Check installed | `cat ~/.claude/plugins/installed_plugins.json \| jq '.plugins \| keys'` | +| Check enabled | `cat ~/.claude/settings.json \| jq '.enabledPlugins'` | + +## Batch Enable Missing Plugins + +To enable all installed but disabled plugins from a marketplace: + +```bash +python3 scripts/enable_all_plugins.py marketplace-name +``` + +## Skills vs Commands Architecture + +Claude Code has two types of user-invocable extensions: + +1. **Skills** (in `skills/` directory) + - Auto-activated based on description matching + - Loaded when user request matches skill description + +2. **Commands** (in `commands/` directory) + - Explicitly invocable via `/command-name` + - Appears in Skill tool's available list + - Requires command file (e.g., `commands/seer.md`) + +If a skill should be explicitly invocable, add a corresponding command file. + +## References + +- See `references/known_issues.md` for GitHub issue tracking +- See `references/architecture.md` for detailed plugin architecture diff --git a/claude-skills-troubleshooting/references/architecture.md b/claude-skills-troubleshooting/references/architecture.md new file mode 100644 index 0000000..4c2e131 --- /dev/null +++ b/claude-skills-troubleshooting/references/architecture.md @@ -0,0 +1,198 @@ +# Claude Code Plugin Architecture + +## Directory Structure + +``` +~/.claude/ +├── settings.json # User settings including enabledPlugins +├── plugins/ +│ ├── installed_plugins.json # Registry of ALL plugins (enabled + disabled) +│ ├── known_marketplaces.json # Registered marketplace sources +│ ├── marketplaces/ # Marketplace git clones +│ │ ├── marketplace-name/ +│ │ │ └── .claude-plugin/ +│ │ │ └── marketplace.json # Plugin definitions +│ │ └── ... +│ └── cache/ # Installed plugin files +│ └── marketplace-name/ +│ └── plugin-name/ +│ └── version/ +│ └── skill-name/ +│ ├── SKILL.md +│ ├── scripts/ +│ └── references/ +└── skills/ # Personal skills (not from marketplace) +``` + +## Plugin Lifecycle + +### Installation Flow + +``` +1. User runs: claude plugin install plugin@marketplace + ↓ +2. CLI reads marketplace.json from marketplace directory + ↓ +3. Plugin files copied to cache: + ~/.claude/plugins/cache/marketplace/plugin/version/ + ↓ +4. Entry added to installed_plugins.json: + { "plugin@marketplace": [{ "version": "1.0.0", ... }] } + ↓ +5. ⚠️ BUG: Entry NOT automatically added to settings.json enabledPlugins + ↓ +6. User must manually enable: + claude plugin enable plugin@marketplace + ↓ +7. Entry added to settings.json: + { "enabledPlugins": { "plugin@marketplace": true } } +``` + +### Activation Flow + +``` +1. Claude Code starts + ↓ +2. Reads settings.json → enabledPlugins + ↓ +3. For each enabled plugin: + - Loads skill metadata (name + description) + - Metadata added to system prompt + ↓ +4. User sends message + ↓ +5. Claude matches message against skill descriptions + ↓ +6. Matching skill's SKILL.md loaded into context + ↓ +7. Claude uses skill instructions to respond +``` + +## Key Files Explained + +### installed_plugins.json + +**Purpose:** Registry of all plugins ever installed (NOT just active ones). + +**Structure:** +```json +{ + "version": 2, + "plugins": { + "plugin-name@marketplace": [ + { + "scope": "user", + "installPath": "~/.claude/plugins/cache/...", + "version": "1.0.0", + "installedAt": "2025-01-01T00:00:00.000Z" + } + ] + } +} +``` + +**Note:** A plugin listed here is NOT necessarily active. Check `settings.json` for actual enabled state. + +### settings.json + +**Purpose:** User preferences and enabled plugins. + +**Relevant section:** +```json +{ + "enabledPlugins": { + "plugin-name@marketplace": true, + "another-plugin@marketplace": true + } +} +``` + +**Important:** Only plugins with `true` value are loaded at startup. + +### known_marketplaces.json + +**Purpose:** Registry of marketplace sources. + +**Structure:** +```json +{ + "marketplace-name": { + "source": { + "source": "github", + "repo": "owner/repo" + }, + "installLocation": "~/.claude/plugins/marketplaces/marketplace-name", + "lastUpdated": "2025-01-01T00:00:00.000Z" + } +} +``` + +### marketplace.json (in marketplace repo) + +**Purpose:** Defines available plugins in a marketplace. + +**Location:** `.claude-plugin/marketplace.json` + +**Structure:** +```json +{ + "name": "marketplace-name", + "metadata": { + "version": "1.0.0", + "description": "..." + }, + "plugins": [ + { + "name": "plugin-name", + "description": "...", + "version": "1.0.0", + "skills": ["./skill-directory"] + } + ] +} +``` + +## Plugin vs Skill vs Command + +### Plugin + +- Distribution unit that packages one or more skills +- Defined in marketplace.json +- Installed via `claude plugin install` + +### Skill + +- Functional unit with SKILL.md and optional resources +- Auto-activates based on description matching +- Located in `skills/` directory + +### Command + +- Explicit slash command (e.g., `/seer`) +- Defined in `commands/` directory +- Appears in Skill tool's available list +- Must be explicitly invoked by user + +## Scopes + +Plugins can be installed in different scopes: + +| Scope | Location | Visibility | +|-------|----------|------------| +| user | `~/.claude/settings.json` | All projects for current user | +| project | `.claude/settings.json` | Team members via git | +| local | `.claude/settings.local.json` | Only local machine | + +## Common Misconceptions + +1. **installed_plugins.json = active plugins** + - Reality: It's a registry of ALL plugins, including disabled ones + +2. **Plugins auto-enable after install** + - Reality: Bug prevents auto-enable; manual step required + +3. **Updating local files updates the plugin** + - Reality: Must push to GitHub, then update marketplace cache + +4. **Cache is just for performance** + - Reality: Cache IS where plugins live; deleting it uninstalls plugins diff --git a/claude-skills-troubleshooting/references/known_issues.md b/claude-skills-troubleshooting/references/known_issues.md new file mode 100644 index 0000000..68012ec --- /dev/null +++ b/claude-skills-troubleshooting/references/known_issues.md @@ -0,0 +1,72 @@ +# Known Claude Code Plugin Issues + +This document tracks known bugs and issues related to Claude Code plugins. + +## Open Issues + +### GitHub #17832 - Plugins Not Auto-Enabled After Install + +**Status:** OPEN +**URL:** https://github.com/anthropics/claude-code/issues/17832 + +**Problem:** When installing a plugin from a marketplace, Claude Code adds the plugin to `installed_plugins.json` but does NOT add it to `settings.json` `enabledPlugins`. + +**Impact:** Plugins appear "installed" but don't function. Skills silently fail to load. + +**Workaround:** Manually enable via CLI or edit settings.json: +```bash +claude plugin enable plugin-name@marketplace +``` + +--- + +### GitHub #19696 - installed_plugins.json Naming Misleading + +**Status:** OPEN +**URL:** https://github.com/anthropics/claude-code/issues/19696 + +**Problem:** The file `installed_plugins.json` contains ALL plugins ever registered, including disabled ones. The actual enabled state is tracked separately in `settings.json`. + +**Impact:** Confusing for developers - file shows many plugins but only some are active. + +**Note:** This is a naming/documentation issue, not a functional bug. + +--- + +### GitHub #17089 - Local Plugins Breaking After 2.1.x Update + +**Status:** Reported +**URL:** https://github.com/anthropics/claude-code/issues/17089 + +**Problem:** Local plugins no longer persist after the 2.1.x update. + +**Workaround:** Create a marketplace wrapper structure for local plugins. + +--- + +### GitHub #13543 - MCP Servers from Marketplace Not Available + +**Status:** Reported +**URL:** https://github.com/anthropics/claude-code/issues/13543 + +**Problem:** After updating to Claude Code 2.0.64, MCP servers defined in marketplace plugins were no longer available. + +--- + +### GitHub #16260 - Contradictory Scope Error Messages + +**Status:** Reported +**URL:** https://github.com/anthropics/claude-code/issues/16260 + +**Problem:** CLI gives contradictory error messages about plugin scope. + +**Workaround:** Manually edit settings.json to fix scope issues. + +## Resolved Issues + +(Add resolved issues here as they are fixed) + +## Related Documentation + +- [Plugins Reference](https://code.claude.com/docs/en/plugins-reference) +- [Claude Code Settings](https://code.claude.com/docs/en/settings) diff --git a/claude-skills-troubleshooting/scripts/diagnose_plugins.py b/claude-skills-troubleshooting/scripts/diagnose_plugins.py new file mode 100755 index 0000000..c20e1aa --- /dev/null +++ b/claude-skills-troubleshooting/scripts/diagnose_plugins.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python3 +""" +Diagnose Claude Code plugin and skill configuration issues. + +This script checks for common problems: +- Installed plugins not enabled in settings.json +- Stale marketplace cache +- Missing plugin files +- Configuration inconsistencies +""" + +import json +import os +from pathlib import Path +from datetime import datetime + + +def get_claude_dir(): + """Get the Claude configuration directory.""" + return Path.home() / ".claude" + + +def load_json_file(path): + """Load a JSON file, return None if not found.""" + try: + with open(path, 'r') as f: + return json.load(f) + except (FileNotFoundError, json.JSONDecodeError) as e: + return None + + +def check_installed_plugins(): + """Check installed_plugins.json.""" + claude_dir = get_claude_dir() + installed_path = claude_dir / "plugins" / "installed_plugins.json" + + data = load_json_file(installed_path) + if not data: + print("❌ Cannot read installed_plugins.json") + return {} + + plugins = data.get("plugins", {}) + print(f"📦 Found {len(plugins)} registered plugins in installed_plugins.json") + return plugins + + +def check_enabled_plugins(): + """Check enabledPlugins in settings.json.""" + claude_dir = get_claude_dir() + settings_path = claude_dir / "settings.json" + + data = load_json_file(settings_path) + if not data: + print("❌ Cannot read settings.json") + return {} + + enabled = data.get("enabledPlugins", {}) + enabled_count = sum(1 for v in enabled.values() if v) + print(f"✅ Found {enabled_count} enabled plugins in settings.json") + return enabled + + +def check_marketplaces(): + """Check registered marketplaces.""" + claude_dir = get_claude_dir() + marketplaces_path = claude_dir / "plugins" / "known_marketplaces.json" + + data = load_json_file(marketplaces_path) + if not data: + print("❌ Cannot read known_marketplaces.json") + return {} + + print(f"🏪 Found {len(data)} registered marketplaces:") + for name, info in data.items(): + last_updated = info.get("lastUpdated", "unknown") + print(f" - {name} (updated: {last_updated[:10] if len(last_updated) > 10 else last_updated})") + return data + + +def find_missing_enabled(installed, enabled): + """Find plugins that are installed but not enabled.""" + missing = [] + + for plugin_name in installed.keys(): + if plugin_name not in enabled or not enabled.get(plugin_name): + missing.append(plugin_name) + + return missing + + +def check_cache_freshness(marketplaces): + """Check if marketplace caches are stale.""" + claude_dir = get_claude_dir() + cache_dir = claude_dir / "plugins" / "cache" + + stale = [] + for name, info in marketplaces.items(): + marketplace_cache = cache_dir / name + if marketplace_cache.exists(): + # Check modification time + mtime = datetime.fromtimestamp(marketplace_cache.stat().st_mtime) + age_days = (datetime.now() - mtime).days + if age_days > 7: + stale.append((name, age_days)) + + return stale + + +def main(): + print("=" * 60) + print("Claude Code Plugin Diagnostics") + print("=" * 60) + print() + + # Check installed plugins + installed = check_installed_plugins() + print() + + # Check enabled plugins + enabled = check_enabled_plugins() + print() + + # Check marketplaces + marketplaces = check_marketplaces() + print() + + # Find missing enabled + missing = find_missing_enabled(installed, enabled) + if missing: + print("=" * 60) + print(f"⚠️ WARNING: {len(missing)} plugins installed but NOT enabled!") + print("=" * 60) + print() + print("These plugins exist in installed_plugins.json but are missing") + print("from enabledPlugins in settings.json:") + print() + for plugin in sorted(missing): + print(f" - {plugin}") + print() + print("To enable, run:") + print(" claude plugin enable ") + print() + print("Or add to ~/.claude/settings.json under enabledPlugins:") + print(' "plugin-name@marketplace": true') + print() + else: + print("✅ All installed plugins are enabled!") + print() + + # Check cache freshness + stale = check_cache_freshness(marketplaces) + if stale: + print("=" * 60) + print("⚠️ Stale marketplace caches detected:") + print("=" * 60) + for name, days in stale: + print(f" - {name}: {days} days old") + print() + print("To update, run:") + print(" claude plugin marketplace update ") + print() + + # Summary + print("=" * 60) + print("Summary") + print("=" * 60) + print(f" Registered plugins: {len(installed)}") + print(f" Enabled plugins: {sum(1 for v in enabled.values() if v)}") + print(f" Missing enabled: {len(missing)}") + print(f" Marketplaces: {len(marketplaces)}") + print() + + if missing: + print("🔧 Action needed: Enable missing plugins to make them available") + return 1 + else: + print("✅ No issues detected!") + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/claude-skills-troubleshooting/scripts/enable_all_plugins.py b/claude-skills-troubleshooting/scripts/enable_all_plugins.py new file mode 100755 index 0000000..149f0b1 --- /dev/null +++ b/claude-skills-troubleshooting/scripts/enable_all_plugins.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +""" +Enable all installed but disabled plugins from a specific marketplace. + +Usage: + python3 enable_all_plugins.py + +Example: + python3 enable_all_plugins.py daymade-skills +""" + +import json +import sys +from pathlib import Path + + +def get_claude_dir(): + """Get the Claude configuration directory.""" + return Path.home() / ".claude" + + +def load_json_file(path): + """Load a JSON file.""" + with open(path, 'r') as f: + return json.load(f) + + +def save_json_file(path, data): + """Save data to a JSON file.""" + with open(path, 'w') as f: + json.dump(data, f, indent=2) + + +def main(): + if len(sys.argv) < 2: + print("Usage: python3 enable_all_plugins.py ") + print("Example: python3 enable_all_plugins.py daymade-skills") + return 1 + + marketplace = sys.argv[1] + claude_dir = get_claude_dir() + + # Load installed plugins + installed_path = claude_dir / "plugins" / "installed_plugins.json" + try: + installed_data = load_json_file(installed_path) + except FileNotFoundError: + print(f"❌ Cannot find {installed_path}") + return 1 + + # Load settings + settings_path = claude_dir / "settings.json" + try: + settings = load_json_file(settings_path) + except FileNotFoundError: + print(f"❌ Cannot find {settings_path}") + return 1 + + # Get current enabled plugins + enabled = settings.get("enabledPlugins", {}) + + # Find plugins from the specified marketplace + plugins_to_enable = [] + for plugin_name in installed_data.get("plugins", {}).keys(): + if plugin_name.endswith(f"@{marketplace}"): + if plugin_name not in enabled or not enabled[plugin_name]: + plugins_to_enable.append(plugin_name) + + if not plugins_to_enable: + print(f"✅ All plugins from {marketplace} are already enabled!") + return 0 + + print(f"Found {len(plugins_to_enable)} plugins to enable from {marketplace}:") + for plugin in sorted(plugins_to_enable): + print(f" - {plugin}") + + # Confirm + print() + response = input("Enable all these plugins? [y/N] ") + if response.lower() != 'y': + print("Cancelled.") + return 0 + + # Enable plugins + if "enabledPlugins" not in settings: + settings["enabledPlugins"] = {} + + for plugin in plugins_to_enable: + settings["enabledPlugins"][plugin] = True + + # Save settings + save_json_file(settings_path, settings) + + print() + print(f"✅ Enabled {len(plugins_to_enable)} plugins!") + print() + print("⚠️ Restart Claude Code for changes to take effect.") + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/i18n-expert/.security-scan-passed b/i18n-expert/.security-scan-passed new file mode 100644 index 0000000..e2ec453 --- /dev/null +++ b/i18n-expert/.security-scan-passed @@ -0,0 +1,4 @@ +Security scan passed +Scanned at: 2026-01-22T21:40:43.577494 +Tool: gitleaks + pattern-based validation +Content hash: 45d5d760e2e39bb19208667a84307dca17a9ee85e1a6b07accc664adf6c45a94 diff --git a/i18n-expert/SKILL.md b/i18n-expert/SKILL.md new file mode 100644 index 0000000..59e1ba4 --- /dev/null +++ b/i18n-expert/SKILL.md @@ -0,0 +1,128 @@ +--- +name: i18n-expert +description: This skill should be used when setting up, auditing, or enforcing internationalization/localization in UI codebases (React/TS, i18next or similar, JSON locales), including installing/configuring the i18n framework, replacing hard-coded strings, ensuring en-US/zh-CN coverage, mapping error codes to localized messages, and validating key parity, pluralization, and formatting. +--- + +# I18n Expert + +## Overview + +Deliver a complete i18n setup + audit pass: configure the i18n framework, replace user-facing strings with keys, ensure locale parity, and validate pluralization/formatting for en-US and zh-CN. + +## Core Capabilities + +- Library selection and setup (React, Next.js, Vue). +- Key architecture and locale file organization. +- Translation generation and quality strategy (AI, professional, manual). +- Routing and language detection/switching. +- SEO and metadata localization (when applicable). +- RTL support (only if RTL locales are in scope). + +## Scope Inputs (ask if unclear) + +- Framework and routing style. +- Existing i18n state (none, partial, legacy). +- Target locales (default: en-US + zh-CN). +- Translation quality needs (AI vs professional vs manual). +- Locale formats in use (JSON, YAML, PO, XLIFF). +- Formality/cultural requirements (if any). + +## Workflow (Audit -> Fix -> Validate) + +1) Confirm scope and locale targets +- Identify the i18n framework and locale locations. +- Confirm locales; default to en-US + zh-CN when specified. + +2) Setup i18n baseline (if missing) +- Choose a framework-appropriate library (e.g., React: react-i18next; Next.js: next-intl; Vue: vue-i18n). +- Install packages and create the i18n entry/config file. +- Wire the provider at the app root and load locale resources. +- Add a language switcher and persistence (route/param/localStorage) as appropriate. +- Establish locale file layout and key namespaces. +- If routing is locale-aware, define the locale segment strategy early (subpath, subdomain, query param). + - If metadata is user-facing, include translation for titles/descriptions. + +3) Audit key usage and locale parity +- Run: + ```bash + python scripts/i18n_audit.py --src --locale --locale + ``` +- Treat missing keys/parity gaps as blockers. +- Manually verify dynamic keys (`t(var)`). + +4) Find raw user-facing strings +- Search: + ```bash + rg -n --glob '/**/*.{ts,tsx,js,jsx}' "<[^>]+>[^<{]*[A-Za-z][^<{]*<" + rg -n --glob '/**/*.{ts,tsx,js,jsx}' "aria-label=\"[^\"]+\"|title=\"[^\"]+\"|placeholder=\"[^\"]+\"" + ``` +- Localize accessibility labels. + +5) Replace strings with keys +- Use `t('namespace.key')` for UI text. +- For plurals use `t('key', { count })` + `_one/_other` keys. +- Use Intl/app formatters for time/date/number. + +6) Localize error handling (critical) +- Map error codes to localized keys; show localized UI only. +- Log raw error details only. +- Provide localized fallback for unknown codes. + +7) Update locale files +- Add missing keys in both locales. +- Keep placeholders consistent; avoid renames unless requested. +- Generate translations using the agreed method; preserve placeholders and plural rules. + +8) Validate +- Re-run the audit until missing/parity issues are zero. +- Validate JSON (e.g., `python -m json.tool `). +- Update tests asserting visible text. + +## Guardrails + +- Never expose raw `error.message` to UI; show localized strings only. +- Do not add extra locales unless explicitly requested. +- Prefer structured namespaces (e.g., `errors.*`, `buttons.*`, `workspace.*`). +- Keep translations concise and consistent. +- Some technical/brand terms should remain untranslated (e.g., product name, API, MCP, Bash). + +## Deliverables (expected outputs) + +- i18n config/provider wiring. +- Locale files for each target language. +- Replaced UI strings with stable keys. +- Language switcher and persistence (if applicable). +- Updated tests for visible text. + +## Architecture Guidance (keep concise) + +- Key structure: prefer nested namespaces by area (e.g., `common.buttons.save`, `pricing.tier.pro`). +- File layout: one file per locale or per-locale namespaces; keep keys in sync across locales. +- Placeholders: preserve `{name}`/`{{name}}` exactly; validate plurals by locale rules. +- Formatting: use Intl/app helpers for date, time, number, and list formatting. +- SEO/metadata: localize titles and descriptions if the app exposes them. +- RTL: only needed for RTL locales; use logical CSS properties and test layout. +- Non-web surfaces (Electron main-process dialogs, CLI prompts, native menus) need localization too. + +## Performance Notes (short) + +- Lazy-load locale bundles when the app supports it. +- Split large locale files by namespace. + +## Failure Modes (watchlist) + +- Missing translations: fall back to default locale and log warnings. +- RTL layout issues: verify logical CSS and test pages. +- SEO missing: ensure alternates and metadata are localized when applicable. + +## Validation Checklist (short) + +- No missing keys and no raw UI strings. +- Locale switching works and persists. +- Plurals and formatting verified in both locales. + - Fallback locale configured. + +## Resources + +### scripts/ +- `scripts/i18n_audit.py`: Extracts `t('key')` usage and compares against locale JSON files. diff --git a/i18n-expert/scripts/i18n_audit.py b/i18n-expert/scripts/i18n_audit.py new file mode 100644 index 0000000..ad3f83a --- /dev/null +++ b/i18n-expert/scripts/i18n_audit.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +"""Audit i18n key usage vs locale JSON files. + +Usage: + python scripts/i18n_audit.py --src ./src --locale ./locales/en-US/common.json --locale ./locales/zh-CN/common.json +""" + +from __future__ import annotations + +import argparse +import json +import re +import sys +from pathlib import Path +from typing import Dict, Iterable, List, Set + +KEY_PATTERNS = [ + re.compile(r"(? Iterable[Path]: + for path in root.rglob("*"): + if path.is_dir(): + continue + if "node_modules" in path.parts or ".git" in path.parts: + continue + if path.suffix.lstrip(".") in exts: + yield path + + +def extract_keys(paths: Iterable[Path]) -> Set[str]: + keys: Set[str] = set() + for path in paths: + try: + text = path.read_text(encoding="utf-8") + except UnicodeDecodeError: + continue + for pattern in KEY_PATTERNS: + for match in pattern.finditer(text): + keys.add(match.group(1)) + return keys + + +def flatten_json(obj, prefix: str = "") -> Dict[str, object]: + keys: Dict[str, object] = {} + if isinstance(obj, dict): + for key, value in obj.items(): + path = f"{prefix}.{key}" if prefix else key + keys.update(flatten_json(value, path)) + else: + keys[prefix] = obj + return keys + + +def locale_name(path: Path) -> str: + name = path.stem + if path.name in {"common.json", "translation.json", "messages.json"}: + parent = path.parent.name + if parent: + return parent + return name + + +def has_plural_variant(key: str, key_set: Set[str]) -> bool: + return any(f"{key}{suffix}" in key_set for suffix in PLURAL_SUFFIXES) + + +def load_locale(path: Path) -> Set[str]: + with path.open(encoding="utf-8") as handle: + data = json.load(handle) + return set(flatten_json(data).keys()) + + +def compute_missing(used: Set[str], locale_keys: Set[str]) -> List[str]: + missing = [] + for key in sorted(used): + if key in locale_keys: + continue + if has_plural_variant(key, locale_keys): + continue + missing.append(key) + return missing + + +def main() -> int: + parser = argparse.ArgumentParser(description="Audit i18n keys against locale JSON files.") + parser.add_argument("--src", default=".", help="Source root to scan") + parser.add_argument( + "--locale", + action="append", + required=True, + help="Path to locale JSON (repeatable)", + ) + parser.add_argument( + "--ext", + action="append", + default=["ts", "tsx", "js", "jsx"], + help="File extensions to scan (repeatable)", + ) + parser.add_argument("--json", action="store_true", help="Emit JSON output") + args = parser.parse_args() + + src_root = Path(args.src).resolve() + locale_paths = [Path(p).resolve() for p in args.locale] + + source_files = list(iter_source_files(src_root, args.ext)) + used_keys = extract_keys(source_files) + + locale_key_map: Dict[str, Set[str]] = {} + for path in locale_paths: + locale_key_map[locale_name(path)] = load_locale(path) + + missing_by_locale = { + name: compute_missing(used_keys, keys) for name, keys in locale_key_map.items() + } + + unused_by_locale = { + name: sorted(keys - used_keys) for name, keys in locale_key_map.items() + } + + parity_missing: Dict[str, List[str]] = {} + locale_names = list(locale_key_map.keys()) + for name in locale_names: + other_names = [n for n in locale_names if n != name] + other_keys = set().union(*(locale_key_map[n] for n in other_names)) + parity_missing[name] = sorted(other_keys - locale_key_map[name]) + + output = { + "used_keys": sorted(used_keys), + "missing_by_locale": missing_by_locale, + "unused_by_locale": unused_by_locale, + "parity_missing": parity_missing, + } + + if args.json: + json.dump(output, sys.stdout, indent=2, ensure_ascii=False) + sys.stdout.write("\n") + return 0 + + print(f"Scanned {len(source_files)} files") + print(f"Used keys: {len(used_keys)}") + for name in locale_names: + missing = missing_by_locale[name] + unused = unused_by_locale[name] + parity = parity_missing[name] + print(f"\nLocale: {name}") + print(f" Missing: {len(missing)}") + for key in missing[:50]: + print(f" - {key}") + if len(missing) > 50: + print(" ...") + print(f" Unused: {len(unused)}") + print(f" Parity missing: {len(parity)}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/youtube-downloader/SKILL.md b/youtube-downloader/SKILL.md index 1a65ace..b0438cc 100644 --- a/youtube-downloader/SKILL.md +++ b/youtube-downloader/SKILL.md @@ -12,6 +12,95 @@ Enable reliable video and audio downloads from YouTube and HLS streaming platfor - HLS stream downloads with authentication headers - Handling protected content and troubleshooting common download failures +## Non-Technical User Experience (Default) + +Assume the user is non-technical. Do not ask them to run commands. Execute everything yourself and report progress in plain language. Avoid mentioning tooling unless the user asks. + +**Default flow:** +1. Ask for the URL (if not provided). +2. Fetch video metadata (title/uploader/duration/thumbnail) and confirm it matches the user's intent. + - If yt-dlp is blocked by “confirm you’re not a bot”, fall back to YouTube oEmbed for title/uploader/thumbnail (duration may be unknown). +3. Offer simple choices (video vs. audio-only, quality, subtitles, save location). +4. Proceed with sensible defaults if the user does not specify: + - Video download at best quality + - MP4 merged output + - Single video only (no playlists) +5. Download and report the final file path, file size, and resolution (if video). + +**Offer choices in user-friendly terms:** +- “Download the video in best quality (default)” +- “Download audio only (MP3)” +- “Pick a quality: 1080p / 720p / 480p / 360p” +- “Include subtitles (if available)” +- “Save to the Downloads folder (default) or tell me another folder” + +**Always render the thumbnail when available:** +- If metadata includes a thumbnail URL, include it using Markdown image syntax: `![Thumbnail](URL)`. + +**Ask before doing extra work:** +- Confirm playlist downloads (can be large). +- Confirm installing/upgrading dependencies if missing. +- Ask before extracting browser cookies. +- If using cookies, never mention cookie counts or raw cookie details in user-facing responses. Say “used your Chrome login session”. +- If verification is required, automatically set up a local PO Token helper (no user actions). If Docker is missing or fails, do **not** attempt to install Docker—switch to the browser-based PO Token provider instead. + +**Legal/Safety reminder (brief):** +- Proceed only if the user has the rights or permission to download the content. + +**Response template (use plain language, no commands):** +``` +![Thumbnail](THUMBNAIL_URL) + +Title: … +Channel: … +Duration: … + +I can help you: +1) Download the video (best quality, MP4) +2) Download audio only (MP3) +3) Pick a specific quality (1080p/720p/480p/360p) +4) Include subtitles (if available) + +Where should I save it? (Default: Downloads folder) +``` + +**If the user says “just download”:** +- Proceed with defaults and confirm when the download finishes. + - If blocked by a 403, automatically set up the verification helper and retry. + +## Reliable Download SOP (Internal) + +Follow this SOP to avoid common failures and confusion: + +1. Quote URLs in shell commands (zsh treats `?` as a glob). Example: `'https://www.youtube.com/watch?v=VIDEO_ID'`. +2. Ensure proxy is active for both yt-dlp and PO Token providers (HTTP_PROXY/HTTPS_PROXY/ALL_PROXY). +3. If you see “Sign in to confirm you’re not a bot”, request permission and use browser cookies. Do not proceed without cookies. +4. Start a PO Token provider before downloading (fail fast if it cannot start). + - Use Docker bgutil provider when available. + - If Docker is missing or fails, switch to browser-based WPC provider. +5. If cookies are in use, prefer the `web_safari` player client. Otherwise prefer `mweb` for PO tokens. +6. Keep the browser window open while WPC is minting tokens. Ensure Chrome can reach YouTube through the same proxy. +7. If you get “Only images are available” or “Requested format is not available”, treat it as a PO Token failure and retry after fixing token provider/browser state. +8. If you get SSL EOF or fragment errors, treat it as a proxy/network issue. Retry with progressive formats and/or a better proxy. + +## Agent Execution Checklist (Internal) + +- Run `scripts/download_video.py URL --info` (add `--cookies-from-browser chrome` if permission granted) to fetch metadata and thumbnail. +- If yt-dlp metadata fails, rely on the script’s oEmbed fallback for title/uploader/thumbnail and note that duration may be unavailable. +- If a thumbnail URL is present, render it in the response with Markdown image syntax. +- Ask the user to choose video vs. audio-only and (optionally) a quality preset. +- Use a friendly default save location (Downloads folder) unless the user specifies a folder. +- For subtitles, run with `--subtitles` and the requested `--sub-lang`. +- After download, report file name, size, and resolution (if video) in plain language. +- If download fails with 403/fragment errors, retry once with non-m3u8 progressive formats. +- If “Sign in to confirm you’re not a bot” appears, request cookie access and retry with cookies + `web_safari`. +- If “Only images are available” appears, treat it as PO Token failure and retry after fixing provider/browser state. +- Start the PO Token provider before downloads (`--auto-po-token` default). Fail fast if it cannot start. +- If Docker-based provider fails (common in China), automatically fall back to the browser-based WPC provider (it may briefly open a browser window). +- If the WPC provider is used, keep the browser window open until download starts. If the browser fails to launch, set the Chrome path explicitly. +- If the PO Token provider times out, restart it once and retry. +- If a system proxy is configured, pass it into the provider container. If the proxy points to 127.0.0.1/localhost, rewrite it to `host.docker.internal` for Docker. + ## When to Use This Skill This skill should be invoked when users: @@ -27,7 +116,7 @@ This skill should be invoked when users: ## Prerequisites -### 1. Verify yt-dlp Installation +### 1. Verify yt-dlp Installation (Run this yourself) ```bash which yt-dlp @@ -44,7 +133,7 @@ pip install --upgrade yt-dlp # Cross-platform **Critical**: Outdated yt-dlp versions cause nsig extraction failures and missing formats. -### 2. Check Current Quality Access +### 2. Check Current Quality Access (Run this yourself) Before downloading, check available formats: @@ -61,11 +150,11 @@ yt-dlp -F "https://youtu.be/VIDEO_ID" For 1080p/1440p/4K access, install a PO token provider plugin into yt-dlp's Python environment: ```bash -# Find yt-dlp's Python path +# Find yt-dlp's Python path (interpreter used by yt-dlp) head -1 $(which yt-dlp) -# Install plugin (adjust path to match yt-dlp version) -/opt/homebrew/Cellar/yt-dlp/$(yt-dlp --version)/libexec/bin/python -m pip install bgutil-ytdlp-pot-provider +# Install plugin using the interpreter from the line above + -m pip install bgutil-ytdlp-pot-provider ``` **Verification**: Run `yt-dlp -F "VIDEO_URL"` again. Look for formats 137 (1080p), 271 (1440p), or 313 (4K). @@ -111,11 +200,14 @@ yt-dlp --cookies-from-browser chrome -f "bestvideo[height<=1080]+bestaudio/best" ``` **Benefits**: Access to age-restricted and members-only content. -**Requirement**: Must be logged into YouTube in the specified browser. +**Requirements**: +- Must be logged into YouTube in the specified browser. +- Browser and yt-dlp must use the same IP/proxy. +- Do not use Android client with cookies (Android client does not support cookies). ## Common Tasks -### Audio-Only Download +### Audio-Only Download (Run this yourself) Extract audio as MP3: @@ -123,25 +215,25 @@ Extract audio as MP3: yt-dlp -x --audio-format mp3 "VIDEO_URL" ``` -### Custom Output Directory +### Custom Output Directory (Run this yourself) ```bash yt-dlp -P ~/Downloads/YouTube "VIDEO_URL" ``` -### Download with Subtitles +### Download with Subtitles (Run this yourself) ```bash yt-dlp --write-subs --sub-lang en "VIDEO_URL" ``` -### Playlist Download +### Playlist Download (Run this yourself) ```bash yt-dlp -f "bestvideo[height<=1080]+bestaudio/best" "PLAYLIST_URL" ``` -### Convert WebM to MP4 +### Convert WebM to MP4 (Run this yourself) YouTube high-quality downloads often use WebM format (VP9 codec). Convert to MP4 for wider compatibility: @@ -173,6 +265,24 @@ ffmpeg -i "video.webm" -c:v libx264 -preset medium -crf 23 -c:a aac -b:a 128k "v 2. Install PO token provider (see Step 1 above) 3. Or use browser cookies method +### Sign in to Confirm You’re Not a Bot + +**Cause**: YouTube requires authentication to proceed. + +**Solution**: +1. Request permission and use browser cookies (`--cookies-from-browser chrome`). +2. Ensure the browser and yt-dlp use the same IP/proxy. +3. Retry with `web_safari` client if needed. + +### Only Images Available / Requested Format Not Available + +**Cause**: PO tokens not applied or provider/browser verification failed. + +**Solution**: +1. Verify PO Token provider is running before download. +2. Keep the browser window open if using WPC. +3. If cookies are in use, prefer `web_safari` client and retry. + ### nsig Extraction Failed **Symptoms**: @@ -183,7 +293,16 @@ WARNING: [youtube] nsig extraction failed: Some formats may be missing **Solution**: 1. Update yt-dlp to latest version 2. Install PO token provider -3. If still failing, use Android client: `yt-dlp --extractor-args "youtube:player_client=android" "VIDEO_URL"` +3. If still failing and PO tokens are disabled, use Android client: `yt-dlp --extractor-args "youtube:player_client=android" "VIDEO_URL"` + +### SSL EOF / Fragment Errors + +**Cause**: Proxy or network instability. + +**Solution**: +1. Retry with progressive formats (non-m3u8). +2. Switch to a more stable proxy/node. +3. Avoid closing the PO token browser window during download. ### Slow Downloads or Network Errors @@ -204,7 +323,7 @@ WARNING: android client https formats require a GVS PO Token ### scripts/download_video.py -A convenience wrapper that applies Android client workaround by default: +Use this convenience wrapper to auto-start a PO Token provider by default for high-quality downloads. Use it yourself and report results to the user without asking them to run commands. **Basic usage:** ```bash @@ -214,20 +333,34 @@ scripts/download_video.py "VIDEO_URL" **Arguments:** - `url` - YouTube video URL (required) - `-o, --output-dir` - Output directory +- `--output-template` - Output filename template (yt-dlp syntax) - `-f, --format` - Format specification +- `-q, --quality` - Quality preset (best, 1080p, 720p, 480p, 360p, worst). Default: best (skipped for `--audio-only`) - `-a, --audio-only` - Extract audio as MP3 +- `--subtitles` - Download subtitles if available +- `--sub-lang` - Subtitle languages (comma-separated, default: en) +- `--cookies-from-browser` - Load cookies from a browser (e.g., chrome, firefox) +- `--cookies-file` - Load cookies from a cookies.txt file +- `--player-client` - Use a specific YouTube player client (e.g., web_safari) +- `--auto-po-token` - Auto-start PO Token provider (default; uses Docker if available, otherwise switches to browser-based provider) +- `--no-auto-po-token` - Disable auto PO Token setup +- `--proxy` - Proxy URL for yt-dlp and the PO Token provider (e.g., http://127.0.0.1:1082) +- `--wpc-browser-path` - Browser executable path for WPC provider - `-F, --list-formats` - List available formats -- `--no-android-client` - Disable Android client workaround +- `--merge-format` - Merge output container (e.g., mp4, mkv). Default: mp4 +- `--playlist` - Allow playlist downloads (default: single video only) +- `--info` - Print title/uploader/duration/thumbnail and exit +- `--no-android-client` - Disable Android client fallback -**Note**: This script uses Android client (360p only without PO tokens). For high quality, use yt-dlp directly with PO token provider. +**Note**: Use the Android client only when PO tokens are disabled. Keep PO tokens enabled for high quality. ## Quality Expectations | Setup | 360p | 720p | 1080p | 1440p | 4K | |-------|------|------|-------|-------|-----| -| No setup (default) | ✗ | ✗ | ✗ | ✗ | ✗ | +| **Auto PO token (default)** | ✓ | ✓ | ✓ | ✓ | ✓ | | Android client only | ✓ | ✗ | ✗ | ✗ | ✗ | -| **PO token provider** | ✓ | ✓ | ✓ | ✓ | ✓ | +| PO token provider (manual) | ✓ | ✓ | ✓ | ✓ | ✓ | | Browser cookies | ✓ | ✓ | ✓ | ✓ | ✓ | ## HLS Stream Downloads (m3u8) diff --git a/youtube-downloader/references/po-token-setup.md b/youtube-downloader/references/po-token-setup.md index b33544a..efed7f9 100644 --- a/youtube-downloader/references/po-token-setup.md +++ b/youtube-downloader/references/po-token-setup.md @@ -16,6 +16,41 @@ With PO token provider: **Full access** to all quality levels including 4K. ## Recommended Solution: PO Token Provider Plugin +## Operational SOP (Internal) + +Use this checklist to prevent common failures: + +1. Quote URLs in shell commands to avoid zsh globbing (`'https://www.youtube.com/watch?v=VIDEO_ID'`). +2. Ensure proxy is active for yt-dlp and token providers (HTTP_PROXY/HTTPS_PROXY/ALL_PROXY). +3. If YouTube asks to confirm you’re not a bot, use browser cookies. Do not proceed without cookies. +4. Start the PO token provider before downloading. + - Prefer Docker bgutil when available. + - Fall back to WPC (browser) if Docker is missing or fails. +5. Use `web_safari` when cookies are present; use `mweb` otherwise for PO tokens. +6. Keep the browser window open during WPC token minting. +7. If you see “Only images are available” or “Requested format is not available”, treat it as PO token failure and retry after fixing provider/browser state. +8. If you see SSL EOF/fragment errors, treat it as proxy instability and retry with progressive formats or a better proxy. + +### Automatic Setup (Preferred for non-technical users) + +If Docker is available, you can start the PO token provider automatically: + +1. Install the plugin into yt-dlp's Python environment (one-time): +```bash + -m pip install bgutil-ytdlp-pot-provider +``` +In China, prefer a local PyPI mirror: +```bash + -m pip install bgutil-ytdlp-pot-provider -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +2. Start the provider (Docker): +```bash +docker run -d --name bgutil-pot-provider -p 4416:4416 --init brainicism/bgutil-ytdlp-pot-provider +``` + +3. Retry yt-dlp downloads using a web client (e.g., `mweb`) so PO tokens apply. + ### Installation Install a PO token provider plugin to handle token generation automatically. The plugin must be installed into yt-dlp's own Python environment. @@ -31,7 +66,7 @@ head -1 $(which yt-dlp) ```bash # For Homebrew-installed yt-dlp (macOS) -/opt/homebrew/Cellar/yt-dlp/$(yt-dlp --version)/libexec/bin/python -m pip install bgutil-ytdlp-pot-provider + -m pip install bgutil-ytdlp-pot-provider # For pip-installed yt-dlp python3 -m pip install bgutil-ytdlp-pot-provider --user @@ -61,7 +96,19 @@ Look for high-quality formats (137, 248, 271, 313) in the output. If present, th - **Method**: Launches browser to mint tokens - **Best for**: Users comfortable with browser automation -### 3. yt-dlp-get-pot-rustypipe +### 3. yt-dlp-getpot-wpc (Browser-based, no Docker) + +- **Installation**: `pip install yt-dlp-getpot-wpc` +- **Requires**: yt-dlp 2025.09.26 or above +- **Method**: Uses a browser window to mint tokens +- **Best for**: Environments without Docker or restricted networks + +In China, prefer a local PyPI mirror: +```bash +pip install yt-dlp-getpot-wpc -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +### 4. yt-dlp-get-pot-rustypipe - **Installation**: `pip install yt-dlp-get-pot-rustypipe` - **Method**: Uses rustypipe-botguard diff --git a/youtube-downloader/scripts/download_video.py b/youtube-downloader/scripts/download_video.py index 9041af4..33cd4e1 100755 --- a/youtube-downloader/scripts/download_video.py +++ b/youtube-downloader/scripts/download_video.py @@ -13,26 +13,691 @@ Requirements: Usage: scripts/download_video.py "https://youtu.be/VIDEO_ID" scripts/download_video.py "https://youtu.be/VIDEO_ID" --audio-only + scripts/download_video.py "https://youtu.be/VIDEO_ID" --quality 1080p scripts/download_video.py "https://youtu.be/VIDEO_ID" -o ~/Downloads Note: - This script uses Android client workaround, which provides 360p quality only. - For 1080p/4K quality, use yt-dlp directly with PO token provider installed. + This script auto-starts a PO Token provider for high-quality downloads. + If PO tokens are disabled, it can fall back to the Android client (360p only). """ import argparse +import json import subprocess import sys +import shutil +import time +import os from pathlib import Path +from typing import Iterable, Optional +from urllib.parse import quote, urlparse, urlunparse +from urllib.request import urlopen +from urllib.error import URLError + + +PYPI_MIRROR = "https://pypi.tuna.tsinghua.edu.cn/simple" + +QUALITY_PRESETS = { + "best": "bestvideo+bestaudio/best", + "1080p": "bestvideo[height<=1080]+bestaudio/best", + "720p": "bestvideo[height<=720]+bestaudio/best", + "480p": "bestvideo[height<=480]+bestaudio/best", + "360p": "bestvideo[height<=360]+bestaudio/best", + "worst": "worstvideo+worstaudio/worst", +} + + +def build_output_template(output_dir: str, template: Optional[str]) -> str: + if template: + template_path = Path(template) + if template_path.is_absolute(): + return template + return str(Path(output_dir).expanduser().resolve() / template) + + return str(Path(output_dir).expanduser().resolve() / "%(title)s.%(ext)s") + + +def list_files(root: Path) -> set: + return {path for path in root.rglob("*") if path.is_file()} + + +def human_size(num_bytes: int) -> str: + if num_bytes < 1024: + return f"{num_bytes} B" + for unit in ["KB", "MB", "GB", "TB"]: + num_bytes /= 1024.0 + if num_bytes < 1024: + return f"{num_bytes:.1f} {unit}" + return f"{num_bytes:.1f} PB" + + +def pick_primary_file(files: Iterable[Path], audio_only: bool) -> Optional[Path]: + video_exts = {".mp4", ".webm", ".mkv", ".mov", ".m4v"} + audio_exts = {".mp3", ".m4a", ".opus", ".aac", ".flac", ".wav"} + candidates = [] + for path in files: + if path.suffix.lower() in {".part", ".ytdl", ".tmp"}: + continue + if audio_only: + if path.suffix.lower() in audio_exts: + candidates.append(path) + else: + if path.suffix.lower() in video_exts: + candidates.append(path) + + if not candidates: + candidates = [path for path in files if path.suffix.lower() not in {".part", ".ytdl", ".tmp"}] + + if not candidates: + return None + + return max(candidates, key=lambda p: p.stat().st_size) + + +def get_video_resolution(path: Path) -> Optional[str]: + check = subprocess.run(["which", "ffprobe"], capture_output=True, text=True) + if check.returncode != 0: + return None + + cmd = [ + "ffprobe", + "-v", + "error", + "-select_streams", + "v:0", + "-show_entries", + "stream=width,height", + "-of", + "csv=p=0:s=x", + str(path), + ] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + return None + value = result.stdout.strip() + return value or None + + +def filter_cookie_lines(text: str) -> str: + if not text: + return "" + filtered = [] + for line in text.splitlines(): + lowered = line.lower() + if "extracting cookies" in lowered: + continue + if "extracted" in lowered and "cookies" in lowered: + continue + filtered.append(line) + return "\n".join(filtered) + + +def run_yt_dlp(cmd: list, hide_cookie_logs: bool = False) -> subprocess.CompletedProcess: + if not hide_cookie_logs: + return subprocess.run(cmd) + + result = subprocess.run(cmd, capture_output=True, text=True) + stdout = filter_cookie_lines(result.stdout) + stderr = filter_cookie_lines(result.stderr) + if stdout: + print(stdout) + if stderr: + print(stderr, file=sys.stderr) + return result + + +def has_403_error(result: subprocess.CompletedProcess) -> bool: + text = "" + if hasattr(result, "stdout") and result.stdout: + text += result.stdout + if hasattr(result, "stderr") and result.stderr: + text += result.stderr + text = text.lower() + return "http error 403" in text or "403: forbidden" in text or "fragment 1 not found" in text + + +def has_pot_error(result: subprocess.CompletedProcess) -> bool: + text = "" + if hasattr(result, "stdout") and result.stdout: + text += result.stdout + if hasattr(result, "stderr") and result.stderr: + text += result.stderr + text = text.lower() + return "pot" in text and "error" in text + + +def has_wpc_error(result: subprocess.CompletedProcess) -> bool: + text = "" + if hasattr(result, "stdout") and result.stdout: + text += result.stdout + if hasattr(result, "stderr") and result.stderr: + text += result.stderr + text = text.lower() + return "pot:wpc" in text or "webpoclient" in text + + +def with_player_client(cmd: list, client: str) -> list: + rebuilt = [] + skip_next = False + for token in cmd: + if skip_next: + skip_next = False + continue + if token == "--extractor-args": + skip_next = True + continue + rebuilt.append(token) + rebuilt.extend(["--extractor-args", f"youtube:player_client={client}"]) + return rebuilt + + +def get_proxy_settings(proxy_arg: Optional[str]) -> tuple[Optional[str], Optional[str]]: + if proxy_arg: + proxy = proxy_arg + else: + proxy = ( + os.environ.get("ALL_PROXY") + or os.environ.get("all_proxy") + or os.environ.get("HTTPS_PROXY") + or os.environ.get("https_proxy") + or os.environ.get("HTTP_PROXY") + or os.environ.get("http_proxy") + ) + no_proxy = os.environ.get("NO_PROXY") or os.environ.get("no_proxy") + return proxy, no_proxy + + +def normalize_proxy_for_docker(proxy_url: str) -> str: + parsed = urlparse(proxy_url) + if parsed.hostname in {"127.0.0.1", "localhost"}: + host = "host.docker.internal" + netloc = "" + if parsed.username or parsed.password: + userinfo = parsed.username or "" + if parsed.password: + userinfo += f":{parsed.password}" + netloc = f"{userinfo}@" + if parsed.port: + netloc += f"{host}:{parsed.port}" + else: + netloc += host + parsed = parsed._replace(netloc=netloc) + return urlunparse(parsed) + return proxy_url + + +def is_localhost_proxy(proxy_url: str) -> bool: + parsed = urlparse(proxy_url) + return parsed.hostname in {"127.0.0.1", "localhost"} + + +def find_chrome_path() -> Optional[str]: + candidates = [ + "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", + "/Applications/Chromium.app/Contents/MacOS/Chromium", + ] + for candidate in candidates: + if Path(candidate).exists(): + return candidate + for name in ["google-chrome", "chromium", "chromium-browser", "chrome"]: + path = shutil.which(name) + if path: + return path + return None + + +def with_wpc_browser(cmd: list, browser_path: Optional[str]) -> list: + if not browser_path: + return cmd + return cmd + ["--extractor-args", f"youtubepot-wpc:browser_path={browser_path}"] + + +def provider_ping(url: str = "http://127.0.0.1:4416/ping") -> bool: + try: + with urlopen(url, timeout=3) as response: + return response.status == 200 + except (URLError, ConnectionResetError, TimeoutError): + return False + + +def docker_available() -> bool: + result = subprocess.run(["docker", "--version"], capture_output=True, text=True) + return result.returncode == 0 + + +def docker_daemon_ready() -> bool: + result = subprocess.run(["docker", "info"], capture_output=True, text=True) + return result.returncode == 0 + + +def wait_for_provider(timeout: int = 10) -> bool: + deadline = time.time() + timeout + while time.time() < deadline: + if provider_ping(): + return True + time.sleep(1) + return False + + +def container_exists(name: str) -> bool: + result = subprocess.run( + ["docker", "ps", "-a", "--filter", f"name={name}", "--format", "{{.Names}}"], + capture_output=True, + text=True, + ) + return result.returncode == 0 and name in result.stdout.split() + + +def parse_yt_dlp_version() -> Optional[str]: + result = subprocess.run(["yt-dlp", "--version"], capture_output=True, text=True) + if result.returncode != 0: + return None + return result.stdout.strip() + + +def version_at_least(version: str, minimum: str) -> bool: + def parse(value: str) -> list: + return [int(part) for part in value.split(".") if part.isdigit()] + + current = parse(version) + required = parse(minimum) + if not current or not required: + return False + while len(current) < len(required): + current.append(0) + while len(required) < len(current): + required.append(0) + return current >= required + + +def yt_dlp_python() -> Optional[str]: + yt_dlp_path = shutil.which("yt-dlp") + if not yt_dlp_path: + return None + try: + with open(yt_dlp_path, "r", encoding="utf-8") as handle: + first = handle.readline().strip() + except OSError: + return None + if not first.startswith("#!"): + return None + shebang = first[2:].strip() + if shebang.endswith("env python3") or shebang.endswith("env python"): + return "python3" + return shebang + + +def ensure_pot_plugin_installed(proxy_url: Optional[str]) -> bool: + version = parse_yt_dlp_version() + if not version or not version_at_least(version, "2025.05.22"): + print("⚠️ yt-dlp needs to be updated before enabling PO Token provider.") + return False + + python_bin = yt_dlp_python() + if not python_bin: + print("⚠️ Unable to locate yt-dlp's Python interpreter for plugin install.") + return False + + check = subprocess.run( + [python_bin, "-m", "pip", "show", "bgutil-ytdlp-pot-provider"], + capture_output=True, + text=True, + ) + if check.returncode == 0: + return True + + print("⚠️ Installing PO Token provider plugin (one-time setup)...") + install_cmd = [python_bin, "-m", "pip", "install", "bgutil-ytdlp-pot-provider", "-i", PYPI_MIRROR] + if proxy_url: + install_cmd.extend(["--proxy", proxy_url]) + install = subprocess.run(install_cmd, capture_output=True, text=True) + return install.returncode == 0 + + +def ensure_wpc_provider(proxy_url: Optional[str]) -> bool: + version = parse_yt_dlp_version() + if not version or not version_at_least(version, "2025.09.26"): + print("⚠️ yt-dlp needs to be updated before enabling the WPC PO Token provider.") + return False + + python_bin = yt_dlp_python() + if not python_bin: + print("⚠️ Unable to locate yt-dlp's Python interpreter for WPC provider install.") + return False + + check = subprocess.run( + [python_bin, "-m", "pip", "show", "yt-dlp-getpot-wpc"], + capture_output=True, + text=True, + ) + if check.returncode == 0: + return True + + print("⚠️ Installing WPC PO Token provider (one-time setup)...") + install_cmd = [python_bin, "-m", "pip", "install", "-U", "yt-dlp-getpot-wpc", "-i", PYPI_MIRROR] + if proxy_url: + install_cmd.extend(["--proxy", proxy_url]) + install = subprocess.run(install_cmd, capture_output=True, text=True) + return install.returncode == 0 + + +def ensure_po_token_provider(proxy_url: Optional[str], no_proxy: Optional[str]) -> Optional[str]: + if not ensure_pot_plugin_installed(proxy_url): + return "wpc" if ensure_wpc_provider(proxy_url) else None + + if provider_ping(): + return "bgutil" + + if not docker_available(): + print("⚠️ Docker is not available. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + + if not docker_daemon_ready(): + print("⚠️ Docker daemon is not running. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + + name = "bgutil-pot-provider" + if container_exists(name): + start = subprocess.run(["docker", "start", name], capture_output=True, text=True) + if start.returncode != 0: + print("⚠️ Docker container failed to start. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + else: + env_args = [] + use_host_network = False + docker_proxy = None + if proxy_url: + use_host_network = is_localhost_proxy(proxy_url) + docker_proxy = proxy_url if use_host_network else normalize_proxy_for_docker(proxy_url) + env_args.extend( + [ + "-e", + f"HTTP_PROXY={docker_proxy}", + "-e", + f"HTTPS_PROXY={docker_proxy}", + "-e", + f"ALL_PROXY={docker_proxy}", + "-e", + f"http_proxy={docker_proxy}", + "-e", + f"https_proxy={docker_proxy}", + "-e", + f"all_proxy={docker_proxy}", + ] + ) + if no_proxy: + env_args.extend( + [ + "-e", + f"NO_PROXY={no_proxy}", + "-e", + f"no_proxy={no_proxy}", + ] + ) + run_cmd = ["docker", "run", "-d", "--name", name] + if use_host_network: + run_cmd.extend(["--network", "host"]) + run_cmd.extend( + [ + "-p", + "4416:4416", + *env_args, + "--init", + "brainicism/bgutil-ytdlp-pot-provider", + ] + ) + + run = subprocess.run(run_cmd, capture_output=True, text=True) + if run.returncode != 0: + if use_host_network and proxy_url: + # Retry without host network using host.docker.internal proxy + docker_proxy = normalize_proxy_for_docker(proxy_url) + env_args = [ + "-e", + f"HTTP_PROXY={docker_proxy}", + "-e", + f"HTTPS_PROXY={docker_proxy}", + "-e", + f"ALL_PROXY={docker_proxy}", + "-e", + f"http_proxy={docker_proxy}", + "-e", + f"https_proxy={docker_proxy}", + "-e", + f"all_proxy={docker_proxy}", + ] + if no_proxy: + env_args.extend( + [ + "-e", + f"NO_PROXY={no_proxy}", + "-e", + f"no_proxy={no_proxy}", + ] + ) + retry = subprocess.run( + [ + "docker", + "run", + "-d", + "--name", + name, + "-p", + "4416:4416", + *env_args, + "--init", + "brainicism/bgutil-ytdlp-pot-provider", + ], + capture_output=True, + text=True, + ) + if retry.returncode == 0: + run = retry + else: + print("⚠️ Docker provider failed to start. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + else: + print("⚠️ Docker provider failed to start. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + + if wait_for_provider(): + print("✓ PO Token provider is running.") + return "bgutil" + + # If container started but not responding, recreate with proxy settings + if restart_po_token_provider(proxy_url, no_proxy) and provider_ping(): + print("✓ PO Token provider is running.") + return "bgutil" + + print("⚠️ Docker-based provider failed. Switching to browser-based PO Token provider...") + return "wpc" if ensure_wpc_provider(proxy_url) else None + + +def restart_po_token_provider(proxy_url: Optional[str], no_proxy: Optional[str]) -> bool: + name = "bgutil-pot-provider" + if not docker_available() or not docker_daemon_ready(): + return False + if container_exists(name): + subprocess.run(["docker", "rm", "-f", name], capture_output=True, text=True) + env_args = [] + use_host_network = False + docker_proxy = None + if proxy_url: + use_host_network = is_localhost_proxy(proxy_url) + docker_proxy = proxy_url if use_host_network else normalize_proxy_for_docker(proxy_url) + env_args.extend( + [ + "-e", + f"HTTP_PROXY={docker_proxy}", + "-e", + f"HTTPS_PROXY={docker_proxy}", + "-e", + f"ALL_PROXY={docker_proxy}", + "-e", + f"http_proxy={docker_proxy}", + "-e", + f"https_proxy={docker_proxy}", + "-e", + f"all_proxy={docker_proxy}", + ] + ) + if no_proxy: + env_args.extend( + [ + "-e", + f"NO_PROXY={no_proxy}", + "-e", + f"no_proxy={no_proxy}", + ] + ) + run_cmd = ["docker", "run", "-d", "--name", name] + if use_host_network: + run_cmd.extend(["--network", "host"]) + run_cmd.extend( + [ + "-p", + "4416:4416", + *env_args, + "--init", + "brainicism/bgutil-ytdlp-pot-provider", + ] + ) + + run = subprocess.run(run_cmd, capture_output=True, text=True) + if run.returncode != 0 and use_host_network: + docker_proxy = normalize_proxy_for_docker(proxy_url) + env_args = [ + "-e", + f"HTTP_PROXY={docker_proxy}", + "-e", + f"HTTPS_PROXY={docker_proxy}", + "-e", + f"ALL_PROXY={docker_proxy}", + "-e", + f"http_proxy={docker_proxy}", + "-e", + f"https_proxy={docker_proxy}", + "-e", + f"all_proxy={docker_proxy}", + ] + if no_proxy: + env_args.extend( + [ + "-e", + f"NO_PROXY={no_proxy}", + "-e", + f"no_proxy={no_proxy}", + ] + ) + subprocess.run( + [ + "docker", + "run", + "-d", + "--name", + name, + "-p", + "4416:4416", + *env_args, + "--init", + "brainicism/bgutil-ytdlp-pot-provider", + ], + capture_output=True, + text=True, + ) + return wait_for_provider() + + +def fallback_format_for_quality(quality: Optional[str]) -> str: + if not quality or quality == "best": + return "best[protocol!*=m3u8][ext=mp4]/best[protocol!*=m3u8]/best" + if quality.endswith("p") and quality[:-1].isdigit(): + height = quality[:-1] + return ( + f"best[height<={height}][protocol!*=m3u8][ext=mp4]/" + f"best[height<={height}][protocol!*=m3u8]/best[protocol!*=m3u8]" + ) + return "best[protocol!*=m3u8][ext=mp4]/best[protocol!*=m3u8]/best" + + +def print_video_info(cmd_base: list, url: str, hide_cookie_logs: bool = False) -> int: + info_cmd = cmd_base + ["--skip-download", "--dump-json", "--no-playlist", url] + result = subprocess.run(info_cmd, capture_output=True, text=True) + if result.returncode != 0: + print("✗ Failed to fetch video metadata") + if result.stderr: + print(filter_cookie_lines(result.stderr).strip()) + fallback = fetch_oembed_info(url) + if fallback: + print("\n✓ Retrieved metadata via YouTube oEmbed (limited fields).") + render_oembed_info(fallback) + return 0 + return result.returncode + + first_line = result.stdout.strip().splitlines()[0] if result.stdout else "" + if not first_line: + print("✗ No metadata returned") + return 1 + + try: + info = json.loads(first_line) + title = info.get("title", "Unknown title") + uploader = info.get("uploader") or info.get("channel") or "Unknown uploader" + duration = info.get("duration") + duration_text = f"{duration}s" if isinstance(duration, int) else "Unknown" + thumbnail = info.get("thumbnail") + print(f"Title: {title}") + print(f"Uploader: {uploader}") + print(f"Duration: {duration_text}") + if thumbnail: + print(f"Thumbnail: {thumbnail}") + except json.JSONDecodeError: + print(first_line) + + return 0 + + +def fetch_oembed_info(url: str) -> Optional[dict]: + oembed_url = f"https://www.youtube.com/oembed?url={quote(url, safe='')}&format=json" + try: + with urlopen(oembed_url, timeout=15) as response: + payload = response.read().decode("utf-8") + return json.loads(payload) + except Exception: + return None + + +def render_oembed_info(info: dict) -> None: + title = info.get("title", "Unknown title") + uploader = info.get("author_name", "Unknown uploader") + thumbnail = info.get("thumbnail_url") + print(f"Title: {title}") + print(f"Uploader: {uploader}") + print("Duration: Unknown") + if thumbnail: + print(f"Thumbnail: {thumbnail}") def download_video( url: str, output_dir: str = ".", format_spec: str = None, + quality: str = None, + output_template: str = None, + merge_format: str = "mp4", + subtitles: bool = False, + subtitle_lang: str = "en", + cookies_from_browser: str = None, + cookies_file: str = None, + player_client: str = None, + auto_po_token: bool = True, + proxy: str = None, + wpc_browser_path: str = None, + allow_playlist: bool = False, use_android_client: bool = True, audio_only: bool = False, list_formats: bool = False, + info_only: bool = False, ) -> int: """ Download a YouTube video using yt-dlp. @@ -41,9 +706,22 @@ def download_video( url: YouTube video URL output_dir: Directory to save the downloaded file format_spec: Format specification (e.g., "bestvideo+bestaudio/best") + quality: Quality preset (best, 1080p, 720p, 480p, 360p, worst) + output_template: Output filename template (yt-dlp template syntax) + merge_format: Merge output container format (e.g., mp4, mkv) + subtitles: Download subtitles if available + subtitle_lang: Subtitle languages (comma-separated) + cookies_from_browser: Load cookies from browser (e.g., chrome, firefox) + cookies_file: Load cookies from a cookies.txt file + player_client: Use a specific YouTube player client (e.g., web_safari) + auto_po_token: Attempt to auto-start PO Token provider on 403 errors + proxy: Proxy URL for yt-dlp and PO Token provider + wpc_browser_path: Browser path for WPC PO Token provider + allow_playlist: Allow playlist downloads (default: False) use_android_client: Use Android client to avoid nsig extraction issues audio_only: Download audio only list_formats: List available formats instead of downloading + info_only: Print video info before exiting Returns: Exit code (0 for success, non-zero for failure) @@ -60,14 +738,64 @@ def download_video( # Build yt-dlp command cmd = ["yt-dlp"] - # Use Android client by default to avoid nsig extraction issues - if use_android_client: + if cookies_from_browser and cookies_file: + print("✗ Error: Use either --cookies-from-browser or --cookies-file, not both.") + return 2 + + proxy_value, no_proxy = get_proxy_settings(proxy) + + use_po_token = auto_po_token and not info_only + provider_type = None + wpc_available = False + wpc_retried = False + if use_po_token: + provider_type = ensure_po_token_provider(proxy_value, no_proxy) + if not provider_type: + print("✗ PO Token provider could not be started. Aborting download.") + return 2 + wpc_available = provider_type == "wpc" + + if not wpc_browser_path and use_po_token: + wpc_browser_path = find_chrome_path() + + # Use Android client by default only when PO tokens are disabled and no custom client/cookies + use_android = use_android_client and not ( + use_po_token or cookies_from_browser or cookies_file or player_client + ) + if use_android_client and not use_android: + if use_po_token: + print("ℹ︎ Note: Disabling Android client because PO Token provider is enabled.") + else: + print("ℹ︎ Note: Disabling Android client because cookies or player client are in use.") + if use_android: cmd.extend(["--extractor-args", "youtube:player_client=android"]) + if cookies_from_browser: + cmd.extend(["--cookies-from-browser", cookies_from_browser]) + elif cookies_file: + cmd.extend(["--cookies", cookies_file]) + + if proxy_value: + cmd.extend(["--proxy", proxy_value]) + + po_token_client = None + if use_po_token: + po_token_client = "mweb" + if cookies_from_browser or cookies_file: + po_token_client = "web_safari" + + if player_client: + cmd.extend(["--extractor-args", f"youtube:player_client={player_client}"]) + elif po_token_client: + cmd.extend(["--extractor-args", f"youtube:player_client={po_token_client}"]) + + if not allow_playlist: + cmd.append("--no-playlist") + # List formats if requested if list_formats: cmd.extend(["-F", url]) - result = subprocess.run(cmd) + result = run_yt_dlp(cmd, hide_cookie_logs=bool(cookies_from_browser)) # Check if PO token provider might be needed if result.returncode == 0 and use_android_client: @@ -77,10 +805,29 @@ def download_video( return result.returncode + if info_only: + return print_video_info(cmd, url, hide_cookie_logs=bool(cookies_from_browser)) + + if format_spec and quality: + print("✗ Error: Use either --format or --quality, not both.") + return 2 + + if not format_spec and not quality and not audio_only: + quality = "best" + + format_from_quality = False + if quality: + format_spec = QUALITY_PRESETS.get(quality) + if not format_spec: + print(f"✗ Error: Unsupported quality preset: {quality}") + return 2 + format_from_quality = True + # Set output directory - output_path = Path(output_dir).expanduser().resolve() - output_path.mkdir(parents=True, exist_ok=True) - cmd.extend(["-P", str(output_path)]) + output_root = Path(output_dir).expanduser().resolve() + output_root.mkdir(parents=True, exist_ok=True) + output_template_final = build_output_template(str(output_root), output_template) + cmd.extend(["-o", output_template_final]) # Handle audio-only downloads if audio_only: @@ -88,17 +835,131 @@ def download_video( elif format_spec: cmd.extend(["-f", format_spec]) + if subtitles: + cmd.extend(["--write-subs", "--write-auto-subs", "--sub-lang", subtitle_lang]) + + if merge_format: + cmd.extend(["--merge-output-format", merge_format]) + # Add URL cmd.append(url) + def finalize_download(before_snapshot: set) -> None: + after_files = list_files(output_root) + new_files = sorted(after_files - before_snapshot) + primary = pick_primary_file(new_files, audio_only=audio_only) + if primary: + size = human_size(primary.stat().st_size) + print(f"\n✓ Download completed successfully!") + print(f" File: {primary}") + print(f" Size: {size}") + if audio_only: + print(" Resolution: N/A (audio-only)") + else: + resolution = get_video_resolution(primary) + if resolution: + print(f" Resolution: {resolution}") + else: + print(" Resolution: Not available") + else: + print(f"\n✓ Download completed successfully!") + print(f" Location: {output_root}") + + retry_client = player_client or po_token_client + + if wpc_available and wpc_browser_path: + cmd = with_wpc_browser(cmd, wpc_browser_path) + # Execute download + before_files = list_files(output_root) print(f"Executing: {' '.join(cmd)}") - result = subprocess.run(cmd) + result = run_yt_dlp(cmd, hide_cookie_logs=bool(cookies_from_browser)) if result.returncode == 0: - print(f"\n✓ Download completed successfully!") - print(f" Location: {output_path}") + finalize_download(before_files) else: + if use_po_token and provider_type == "bgutil" and has_pot_error(result): + print("\n⚠️ PO Token provider did not respond. Restarting it and retrying...") + if restart_po_token_provider(proxy_value, no_proxy): + retry_cmd = with_player_client(cmd, retry_client or "mweb") + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + result = retry_result + if has_pot_error(result): + print("\n⚠️ Docker provider still failing. Switching to browser-based PO Token provider...") + if ensure_wpc_provider(proxy_value): + retry_cmd = with_player_client(cmd, retry_client or "mweb") + retry_cmd = with_wpc_browser(retry_cmd, wpc_browser_path) + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + result = retry_result + wpc_retried = True + + if use_po_token and has_wpc_error(result): + print("\n⚠️ Browser verification not ready. Keeping Chrome open and retrying once...") + time.sleep(3) + retry_cmd = with_player_client(cmd, retry_client or "mweb") + retry_cmd = with_wpc_browser(retry_cmd, wpc_browser_path) + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + result = retry_result + wpc_retried = True + + if use_po_token and has_pot_error(result) and not wpc_retried: + print("\n⚠️ PO Token provider failed. Switching to browser-based PO Token provider...") + if ensure_wpc_provider(proxy_value): + retry_cmd = with_player_client(cmd, retry_client or "mweb") + retry_cmd = with_wpc_browser(retry_cmd, wpc_browser_path) + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + result = retry_result + + if cookies_from_browser and not player_client and has_403_error(result): + print("\n⚠️ Download failed with 403 errors. Retrying with web_safari client...") + retry_cmd = with_player_client(cmd, "web_safari") + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + result = retry_result + + if not audio_only and format_from_quality: + print("\n⚠️ Download failed. Retrying with non-m3u8 progressive formats...") + retry_cmd = cmd[:] + retry_format = fallback_format_for_quality(quality) + if "-f" in retry_cmd: + format_index = retry_cmd.index("-f") + 1 + if format_index < len(retry_cmd): + retry_cmd[format_index] = retry_format + else: + retry_cmd.extend(["-f", retry_format]) + before_retry = list_files(output_root) + print(f"Executing: {' '.join(retry_cmd)}") + retry_result = run_yt_dlp(retry_cmd, hide_cookie_logs=bool(cookies_from_browser)) + if retry_result.returncode == 0: + finalize_download(before_retry) + return 0 + print(f"\n✗ Download failed with exit code {retry_result.returncode}") + return retry_result.returncode + print(f"\n✗ Download failed with exit code {result.returncode}") return result.returncode @@ -115,9 +976,70 @@ def main(): default=".", help="Output directory (default: current directory)", ) + parser.add_argument( + "--output-template", + help="Output template (e.g., '%%(title)s.%%(ext)s')", + ) parser.add_argument( "-f", "--format", help="Format specification (e.g., 'bestvideo+bestaudio/best')" ) + parser.add_argument( + "-q", + "--quality", + choices=sorted(QUALITY_PRESETS.keys()), + help="Quality preset (best, 1080p, 720p, 480p, 360p, worst)", + ) + parser.add_argument( + "--merge-format", + default="mp4", + help="Merge output container format (default: mp4)", + ) + parser.add_argument( + "--subtitles", + action="store_true", + help="Download subtitles if available", + ) + parser.add_argument( + "--sub-lang", + default="en", + help="Subtitle languages (comma-separated, default: en)", + ) + parser.add_argument( + "--cookies-from-browser", + help="Load cookies from browser (e.g., chrome, firefox)", + ) + parser.add_argument( + "--cookies-file", + help="Load cookies from a cookies.txt file", + ) + parser.add_argument( + "--player-client", + help="Use a specific YouTube player client (e.g., web_safari)", + ) + parser.add_argument( + "--proxy", + help="Proxy URL for yt-dlp and PO Token provider (e.g., http://127.0.0.1:1082)", + ) + parser.add_argument( + "--wpc-browser-path", + help="Browser executable path for WPC PO Token provider", + ) + auto_group = parser.add_mutually_exclusive_group() + auto_group.add_argument( + "--auto-po-token", + action="store_true", + help="Automatically start a PO Token provider (default)", + ) + auto_group.add_argument( + "--no-auto-po-token", + action="store_true", + help="Disable automatic PO Token provider setup", + ) + parser.add_argument( + "--playlist", + action="store_true", + help="Allow playlist downloads (default: single video only)", + ) parser.add_argument( "--no-android-client", action="store_true", @@ -129,6 +1051,11 @@ def main(): parser.add_argument( "-F", "--list-formats", action="store_true", help="List available formats" ) + parser.add_argument( + "--info", + action="store_true", + help="Print video metadata (title/uploader/duration) and exit", + ) args = parser.parse_args() @@ -136,9 +1063,22 @@ def main(): url=args.url, output_dir=args.output_dir, format_spec=args.format, + quality=args.quality, + output_template=args.output_template, + merge_format=args.merge_format, + subtitles=args.subtitles, + subtitle_lang=args.sub_lang, + cookies_from_browser=args.cookies_from_browser, + cookies_file=args.cookies_file, + player_client=args.player_client, + auto_po_token=not args.no_auto_po_token, + proxy=args.proxy, + wpc_browser_path=args.wpc_browser_path, + allow_playlist=args.playlist, use_android_client=not args.no_android_client, audio_only=args.audio_only, list_formats=args.list_formats, + info_only=args.info, ) sys.exit(exit_code)